diff options
426 files changed, 21425 insertions, 4682 deletions
diff --git a/Documentation/PCI/pci-error-recovery.rst b/Documentation/PCI/pci-error-recovery.rst index e5d450df06b4..13beee23cb04 100644 --- a/Documentation/PCI/pci-error-recovery.rst +++ b/Documentation/PCI/pci-error-recovery.rst @@ -421,7 +421,6 @@ That is, the recovery API only requires that: - drivers/net/ixgbe - drivers/net/cxgb3 - drivers/net/s2io.c - - drivers/net/qlge The End ------- diff --git a/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml b/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml new file mode 100644 index 000000000000..71808e78a495 --- /dev/null +++ b/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/aspeed,ast2600-mdio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ASPEED AST2600 MDIO Controller + +maintainers: + - Andrew Jeffery <andrew@aj.id.au> + +description: |+ + The ASPEED AST2600 MDIO controller is the third iteration of ASPEED's MDIO + bus register interface, this time also separating out the controller from the + MAC. + +allOf: + - $ref: "mdio.yaml#" + +properties: + compatible: + const: aspeed,ast2600-mdio + reg: + maxItems: 1 + description: The register range of the MDIO controller instance + +required: + - compatible + - reg + - "#address-cells" + - "#size-cells" + +examples: + - | + mdio0: mdio@1e650000 { + compatible = "aspeed,ast2600-mdio"; + reg = <0x1e650000 0x8>; + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt index bc77477c6878..94c0f8bf4deb 100644 --- a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt +++ b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt @@ -32,6 +32,15 @@ Optional properties: ack_gpr is the gpr register offset of CAN stop acknowledge. ack_bit is the bit offset of CAN stop acknowledge. +- fsl,clk-source: Select the clock source to the CAN Protocol Engine (PE). + It's SoC Implementation dependent. Refer to RM for detailed + definition. If this property is not set in device tree node + then driver selects clock source 1 by default. + 0: clock source 0 (oscillator clock) + 1: clock source 1 (peripheral clock) + +- wakeup-source: enable CAN remote wakeup + Example: can@1c000 { @@ -40,4 +49,5 @@ Example: interrupts = <48 0x2>; interrupt-parent = <&mpic>; clock-frequency = <200000000>; // filled in by bootloader + fsl,clk-source = <0>; // select clock source 0 for PE }; diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt new file mode 100644 index 000000000000..c388f7d9feb1 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt @@ -0,0 +1,37 @@ +Texas Instruments TCAN4x5x CAN Controller +================================================ + +This file provides device node information for the TCAN4x5x interface contains. + +Required properties: + - compatible: "ti,tcan4x5x" + - reg: 0 + - #address-cells: 1 + - #size-cells: 0 + - spi-max-frequency: Maximum frequency of the SPI bus the chip can + operate at should be less than or equal to 18 MHz. + - data-ready-gpios: Interrupt GPIO for data and error reporting. + - device-wake-gpios: Wake up GPIO to wake up the TCAN device. + +See Documentation/devicetree/bindings/net/can/m_can.txt for additional +required property details. + +Optional properties: + - reset-gpios: Hardwired output GPIO. If not defined then software + reset. + - device-state-gpios: Input GPIO that indicates if the device is in + a sleep state or if the device is active. + +Example: +tcan4x5x: tcan4x5x@0 { + compatible = "ti,tcan4x5x"; + reg = <0>; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <10000000>; + bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>; + data-ready-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; + device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; + device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; +}; diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt index 4ac21cef370e..5e8429b6f9ca 100644 --- a/Documentation/devicetree/bindings/net/dsa/ksz.txt +++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt @@ -5,6 +5,9 @@ Required properties: - compatible: For external switch chips, compatible string must be exactly one of the following: + - "microchip,ksz8765" + - "microchip,ksz8794" + - "microchip,ksz8795" - "microchip,ksz9477" - "microchip,ksz9897" - "microchip,ksz9896" diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt index 6f9538974bb9..30c11fea491b 100644 --- a/Documentation/devicetree/bindings/net/dsa/marvell.txt +++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt @@ -22,7 +22,7 @@ which is at a different MDIO base address in different switch families. - "marvell,mv88e6190" : Switch has base address 0x00. Use with models: 6190, 6190X, 6191, 6290, 6390, 6390X - "marvell,mv88e6250" : Switch has base address 0x08 or 0x18. Use with model: - 6250 + 6220, 6250 Required properties: - compatible : Should be one of "marvell,mv88e6085", diff --git a/Documentation/devicetree/bindings/net/fsl-enetc.txt b/Documentation/devicetree/bindings/net/fsl-enetc.txt index 25fc687419db..b7034ccbc1bd 100644 --- a/Documentation/devicetree/bindings/net/fsl-enetc.txt +++ b/Documentation/devicetree/bindings/net/fsl-enetc.txt @@ -11,7 +11,9 @@ Required properties: to parent node bindings. - compatible : Should be "fsl,enetc". -1) The ENETC external port is connected to a MDIO configurable phy: +1. The ENETC external port is connected to a MDIO configurable phy + +1.1. Using the local ENETC Port MDIO interface In this case, the ENETC node should include a "mdio" sub-node that in turn should contain the "ethernet-phy" node describing the @@ -47,8 +49,42 @@ Example: }; }; -2) The ENETC port is an internal port or has a fixed-link external -connection: +1.2. Using the central MDIO PCIe endpoint device + +In this case, the mdio node should be defined as another PCIe +endpoint node, at the same level with the ENETC port nodes. + +Required properties: + +- reg : Specifies PCIe Device Number and Function + Number of the ENETC endpoint device, according + to parent node bindings. +- compatible : Should be "fsl,enetc-mdio". + +The remaining required mdio bus properties are standard, their bindings +already defined in Documentation/devicetree/bindings/net/mdio.txt. + +Example: + + ethernet@0,0 { + compatible = "fsl,enetc"; + reg = <0x000000 0 0 0 0>; + phy-handle = <&sgmii_phy0>; + phy-connection-type = "sgmii"; + }; + + mdio@0,3 { + compatible = "fsl,enetc-mdio"; + reg = <0x000300 0 0 0 0>; + #address-cells = <1>; + #size-cells = <0>; + sgmii_phy0: ethernet-phy@2 { + reg = <0x2>; + }; + }; + +2. The ENETC port is an internal port or has a fixed-link external +connection In this case, the ENETC port node defines a fixed link connection, as specified by Documentation/devicetree/bindings/net/fixed-link.txt. diff --git a/MAINTAINERS b/MAINTAINERS index 47800d32cfbc..e352550a6895 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3635,9 +3635,12 @@ S: Maintained F: Documentation/devicetree/bindings/net/can/ F: drivers/net/can/ F: include/linux/can/dev.h +F: include/linux/can/led.h +F: include/linux/can/rx-offload.h F: include/linux/can/platform/ F: include/uapi/linux/can/error.h F: include/uapi/linux/can/netlink.h +F: include/uapi/linux/can/vxcan.h CAN NETWORK LAYER M: Oliver Hartkopp <socketcan@hartkopp.net> @@ -3650,6 +3653,8 @@ S: Maintained F: Documentation/networking/can.rst F: net/can/ F: include/linux/can/core.h +F: include/linux/can/skb.h +F: include/net/netns/can.h F: include/uapi/linux/can.h F: include/uapi/linux/can/bcm.h F: include/uapi/linux/can/raw.h @@ -11322,7 +11327,6 @@ F: include/net/nfc/ F: include/uapi/linux/nfc.h F: drivers/nfc/ F: include/linux/platform_data/nfcmrvl.h -F: include/linux/platform_data/nxp-nci.h F: Documentation/devicetree/bindings/net/nfc/ NFS, SUNRPC, AND LOCKD CLIENTS @@ -13217,7 +13221,7 @@ M: Manish Chopra <manishc@marvell.com> M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported -F: drivers/net/ethernet/qlogic/qlge/ +F: drivers/staging/qlge/ QM1D1B0004 MEDIA DRIVER M: Akihiro Tsukada <tskd08@gmail.com> diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts index de6ef39f3118..663c4b728c07 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts @@ -85,6 +85,26 @@ system-clock-frequency = <25000000>; }; }; + + mdio-mux { + compatible = "mdio-mux-multiplexer"; + mux-controls = <&mux 0>; + mdio-parent-bus = <&enetc_mdio_pf3>; + #address-cells=<1>; + #size-cells = <0>; + + /* on-board RGMII PHY */ + mdio@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + qds_phy1: ethernet-phy@5 { + /* Atheros 8035 */ + reg = <5>; + }; + }; + }; }; &duart0 { @@ -164,6 +184,26 @@ }; }; }; + + fpga@66 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,ls1028aqds-fpga", "fsl,fpga-qixis-i2c", + "simple-mfd"; + reg = <0x66>; + + mux: mux-controller { + compatible = "reg-mux"; + #mux-control-cells = <1>; + mux-reg-masks = <0x54 0xf0>; /* 0: reg 0x54, bits 7:4 */ + }; + }; + +}; + +&enetc_port1 { + phy-handle = <&qds_phy1>; + phy-connection-type = "rgmii-id"; }; &sai1 { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index 7975519b4f56..de71153fda00 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -536,6 +536,12 @@ compatible = "fsl,enetc"; reg = <0x000100 0 0 0 0>; }; + enetc_mdio_pf3: mdio@0,3 { + compatible = "fsl,enetc-mdio"; + reg = <0x000300 0 0 0 0>; + #address-cells = <1>; + #size-cells = <0>; + }; ethernet@0,4 { compatible = "fsl,enetc-ptp"; reg = <0x000400 0 0 0 0>; diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 79b718430cd1..b23d1e4bad33 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -1136,7 +1136,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */ else put_dma(tx->index,eni_dev->dma,&j,(unsigned long) skb_frag_page(&skb_shinfo(skb)->frags[i]) + - skb_shinfo(skb)->frags[i].page_offset, + skb_frag_off(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i])); } if (skb->len & 3) { diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 211607986134..70b00ae4ec38 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -2580,10 +2580,9 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) slot = 0; } - tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, - (void *) page_address(frag->page) + frag->page_offset, - frag->size, DMA_TO_DEVICE); - tpd->iovec[slot].len = frag->size; + tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev, + frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); + tpd->iovec[slot].len = skb_frag_size(frag); ++slot; } diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 5c4c6eeb505c..c32f7dd9879a 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -516,9 +516,8 @@ struct geos_gpio_attr { static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct pci_dev *pdev = to_pci_dev(dev); struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr); - struct solos_card *card = pci_get_drvdata(pdev); + struct solos_card *card = dev_get_drvdata(dev); uint32_t data32; if (count != 1 && (count != 2 || buf[1] != '\n')) @@ -542,9 +541,8 @@ static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct pci_dev *pdev = to_pci_dev(dev); struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr); - struct solos_card *card = pci_get_drvdata(pdev); + struct solos_card *card = dev_get_drvdata(dev); uint32_t data32; data32 = ioread32(card->config_regs + GPIO_STATUS); @@ -556,9 +554,8 @@ static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr, static ssize_t hardware_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct pci_dev *pdev = to_pci_dev(dev); struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr); - struct solos_card *card = pci_get_drvdata(pdev); + struct solos_card *card = dev_get_drvdata(dev); uint32_t data32; data32 = ioread32(card->config_regs + GPIO_STATUS); diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 551bca6fef24..c70cb5f272cf 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1134,7 +1134,9 @@ copy: } /* Update the skb. */ if (merge) { - skb_shinfo(skb)->frags[i - 1].size += copy; + skb_frag_size_add( + &skb_shinfo(skb)->frags[i - 1], + copy); } else { skb_fill_page_desc(skb, i, page, off, copy); if (off + copy < pg_size) { @@ -1247,7 +1249,7 @@ new_buf: i = skb_shinfo(skb)->nr_frags; if (skb_can_coalesce(skb, i, page, offset)) { - skb_shinfo(skb)->frags[i - 1].size += copy; + skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); } else if (i < MAX_SKB_FRAGS) { get_page(page); skb_fill_page_desc(skb, i, page, offset, copy); diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 9aeed98b87a1..0253e76f1df2 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -181,7 +181,8 @@ static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) sg = sg_next(sg); BUG_ON(!sg); frag = &skb_shinfo(skb)->frags[i]; - sg_set_page(sg, frag->page.p, frag->size, frag->page_offset); + sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag), + skb_frag_off(frag)); } } diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c index af1b1ffcb38e..7d90b900131b 100644 --- a/drivers/infiniband/hw/hfi1/vnic_sdma.c +++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c @@ -102,13 +102,13 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde, goto bail_txadd; for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i]; /* combine physically continuous fragments later? */ ret = sdma_txadd_page(sde->dd, &tx->txreq, skb_frag_page(frag), - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag)); if (unlikely(ret)) goto bail_txadd; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 78fa777c87b1..c332b4761816 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -293,7 +293,8 @@ int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping[i + off] = ib_dma_map_page(ca, skb_frag_page(frag), - frag->page_offset, skb_frag_size(frag), + skb_frag_off(frag), + skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) goto partial_error; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index ab585900a057..17c166cc8482 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -120,6 +120,19 @@ config CAN_JANZ_ICAN3 This driver can also be built as a module. If so, the module will be called janz-ican3.ko. +config CAN_KVASER_PCIEFD + depends on PCI + tristate "Kvaser PCIe FD cards" + help + This is a driver for the Kvaser PCI Express CAN FD family. + + Supported devices: + Kvaser PCIEcan 4xHS + Kvaser PCIEcan 2xHS v2 + Kvaser PCIEcan HS v2 + Kvaser Mini PCI Express HS v2 + Kvaser Mini PCI Express 2xHS v2 + config CAN_SUN4I tristate "Allwinner A10 CAN controller" depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 44922bf29b6a..22164300122d 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o +obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_M_CAN) += m_can/ obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/ diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index fcec8bcb53d6..dc5695dffc2e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -24,6 +24,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/regmap.h> @@ -266,6 +267,7 @@ struct flexcan_stop_mode { struct flexcan_priv { struct can_priv can; struct can_rx_offload offload; + struct device *dev; struct flexcan_regs __iomem *regs; struct flexcan_mb __iomem *tx_mb; @@ -273,6 +275,8 @@ struct flexcan_priv { u8 tx_mb_idx; u8 mb_count; u8 mb_size; + u8 clk_src; /* clock source of CAN Protocol Engine */ + u32 reg_ctrl_default; u32 reg_imask1_default; u32 reg_imask2_default; @@ -462,6 +466,27 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) priv->write(reg_ctrl, ®s->ctrl); } +static int flexcan_clks_enable(const struct flexcan_priv *priv) +{ + int err; + + err = clk_prepare_enable(priv->clk_ipg); + if (err) + return err; + + err = clk_prepare_enable(priv->clk_per); + if (err) + clk_disable_unprepare(priv->clk_ipg); + + return err; +} + +static void flexcan_clks_disable(const struct flexcan_priv *priv) +{ + clk_disable_unprepare(priv->clk_per); + clk_disable_unprepare(priv->clk_ipg); +} + static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) { if (!priv->reg_xceiver) @@ -588,19 +613,13 @@ static int flexcan_get_berr_counter(const struct net_device *dev, const struct flexcan_priv *priv = netdev_priv(dev); int err; - err = clk_prepare_enable(priv->clk_ipg); - if (err) + err = pm_runtime_get_sync(priv->dev); + if (err < 0) return err; - err = clk_prepare_enable(priv->clk_per); - if (err) - goto out_disable_ipg; - err = __flexcan_get_berr_counter(dev, bec); - clk_disable_unprepare(priv->clk_per); - out_disable_ipg: - clk_disable_unprepare(priv->clk_ipg); + pm_runtime_put(priv->dev); return err; } @@ -1233,17 +1252,13 @@ static int flexcan_open(struct net_device *dev) struct flexcan_priv *priv = netdev_priv(dev); int err; - err = clk_prepare_enable(priv->clk_ipg); - if (err) + err = pm_runtime_get_sync(priv->dev); + if (err < 0) return err; - err = clk_prepare_enable(priv->clk_per); - if (err) - goto out_disable_ipg; - err = open_candev(dev); if (err) - goto out_disable_per; + goto out_runtime_put; err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); if (err) @@ -1306,10 +1321,8 @@ static int flexcan_open(struct net_device *dev) free_irq(dev->irq, dev); out_close: close_candev(dev); - out_disable_per: - clk_disable_unprepare(priv->clk_per); - out_disable_ipg: - clk_disable_unprepare(priv->clk_ipg); + out_runtime_put: + pm_runtime_put(priv->dev); return err; } @@ -1324,10 +1337,9 @@ static int flexcan_close(struct net_device *dev) can_rx_offload_del(&priv->offload); free_irq(dev->irq, dev); - clk_disable_unprepare(priv->clk_per); - clk_disable_unprepare(priv->clk_ipg); close_candev(dev); + pm_runtime_put(priv->dev); can_led_event(dev, CAN_LED_EVENT_STOP); @@ -1367,20 +1379,20 @@ static int register_flexcandev(struct net_device *dev) struct flexcan_regs __iomem *regs = priv->regs; u32 reg, err; - err = clk_prepare_enable(priv->clk_ipg); + err = flexcan_clks_enable(priv); if (err) return err; - err = clk_prepare_enable(priv->clk_per); - if (err) - goto out_disable_ipg; - /* select "bus clock", chip must be disabled */ err = flexcan_chip_disable(priv); if (err) - goto out_disable_per; + goto out_clks_disable; + reg = priv->read(®s->ctrl); - reg |= FLEXCAN_CTRL_CLK_SRC; + if (priv->clk_src) + reg |= FLEXCAN_CTRL_CLK_SRC; + else + reg &= ~FLEXCAN_CTRL_CLK_SRC; priv->write(reg, ®s->ctrl); err = flexcan_chip_enable(priv); @@ -1406,15 +1418,21 @@ static int register_flexcandev(struct net_device *dev) } err = register_candev(dev); + if (err) + goto out_chip_disable; - /* disable core and turn off clocks */ - out_chip_disable: + /* Disable core and let pm_runtime_put() disable the clocks. + * If CONFIG_PM is not enabled, the clocks will stay powered. + */ flexcan_chip_disable(priv); - out_disable_per: - clk_disable_unprepare(priv->clk_per); - out_disable_ipg: - clk_disable_unprepare(priv->clk_ipg); + pm_runtime_put(priv->dev); + return 0; + + out_chip_disable: + flexcan_chip_disable(priv); + out_clks_disable: + flexcan_clks_disable(priv); return err; } @@ -1473,6 +1491,11 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) device_set_wakeup_capable(&pdev->dev, true); + if (of_property_read_bool(np, "wakeup-source")) + device_set_wakeup_enable(&pdev->dev, true); + + return 0; + out_put_node: of_node_put(gpr_np); return ret; @@ -1508,6 +1531,7 @@ static int flexcan_probe(struct platform_device *pdev) struct clk *clk_ipg = NULL, *clk_per = NULL; struct flexcan_regs __iomem *regs; int err, irq; + u8 clk_src = 1; u32 clock_freq = 0; reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); @@ -1516,9 +1540,12 @@ static int flexcan_probe(struct platform_device *pdev) else if (IS_ERR(reg_xceiver)) reg_xceiver = NULL; - if (pdev->dev.of_node) + if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "clock-frequency", &clock_freq); + of_property_read_u8(pdev->dev.of_node, + "fsl,clk-source", &clk_src); + } if (!clock_freq) { clk_ipg = devm_clk_get(&pdev->dev, "ipg"); @@ -1576,6 +1603,7 @@ static int flexcan_probe(struct platform_device *pdev) priv->write = flexcan_write_le; } + priv->dev = &pdev->dev; priv->can.clock.freq = clock_freq; priv->can.bittiming_const = &flexcan_bittiming_const; priv->can.do_set_mode = flexcan_set_mode; @@ -1586,9 +1614,14 @@ static int flexcan_probe(struct platform_device *pdev) priv->regs = regs; priv->clk_ipg = clk_ipg; priv->clk_per = clk_per; + priv->clk_src = clk_src; priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + err = register_flexcandev(dev); if (err) { dev_err(&pdev->dev, "registering netdev failed\n"); @@ -1615,6 +1648,7 @@ static int flexcan_remove(struct platform_device *pdev) struct net_device *dev = platform_get_drvdata(pdev); unregister_flexcandev(dev); + pm_runtime_disable(&pdev->dev); free_candev(dev); return 0; @@ -1624,7 +1658,7 @@ static int __maybe_unused flexcan_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - int err; + int err = 0; if (netif_running(dev)) { /* if wakeup is enabled, enter stop mode @@ -1639,20 +1673,22 @@ static int __maybe_unused flexcan_suspend(struct device *device) err = flexcan_chip_disable(priv); if (err) return err; + + err = pm_runtime_force_suspend(device); } netif_stop_queue(dev); netif_device_detach(dev); } priv->can.state = CAN_STATE_SLEEPING; - return 0; + return err; } static int __maybe_unused flexcan_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - int err; + int err = 0; priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(dev)) { @@ -1661,14 +1697,35 @@ static int __maybe_unused flexcan_resume(struct device *device) if (device_may_wakeup(device)) { disable_irq_wake(dev->irq); } else { - err = flexcan_chip_enable(priv); + err = pm_runtime_force_resume(device); if (err) return err; + + err = flexcan_chip_enable(priv); } } + + return err; +} + +static int __maybe_unused flexcan_runtime_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct flexcan_priv *priv = netdev_priv(dev); + + flexcan_clks_disable(priv); + return 0; } +static int __maybe_unused flexcan_runtime_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct flexcan_priv *priv = netdev_priv(dev); + + return flexcan_clks_enable(priv); +} + static int __maybe_unused flexcan_noirq_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); @@ -1698,6 +1755,7 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) static const struct dev_pm_ops flexcan_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(flexcan_suspend, flexcan_resume) + SET_RUNTIME_PM_OPS(flexcan_runtime_suspend, flexcan_runtime_resume, NULL) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(flexcan_noirq_suspend, flexcan_noirq_resume) }; diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 19d4f52a8f90..a761092e6ac9 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1936,7 +1936,6 @@ static int ican3_probe(struct platform_device *pdev) /* find our IRQ number */ mod->irq = platform_get_irq(pdev, 0); if (mod->irq < 0) { - dev_err(dev, "IRQ line not found\n"); ret = -ENODEV; goto out_free_ndev; } diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c new file mode 100644 index 000000000000..3af747cbbde4 --- /dev/null +++ b/drivers/net/can/kvaser_pciefd.c @@ -0,0 +1,1912 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. + * Parts of this driver are based on the following: + * - Kvaser linux pciefd driver (version 5.25) + * - PEAK linux canfd driver + * - Altera Avalon EPCS flash controller driver + */ + +#include <linux/kernel.h> +#include <linux/version.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/can/dev.h> +#include <linux/timer.h> +#include <linux/netdevice.h> +#include <linux/crc32.h> +#include <linux/iopoll.h> + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); +MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); + +#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" + +#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) +#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) +#define KVASER_PCIEFD_MAX_ERR_REP 256 +#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17 +#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4 +#define KVASER_PCIEFD_DMA_COUNT 2 + +#define KVASER_PCIEFD_DMA_SIZE (4 * 1024) +#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0) + +#define KVASER_PCIEFD_VENDOR 0x1a07 +#define KVASER_PCIEFD_4HS_ID 0x0d +#define KVASER_PCIEFD_2HS_ID 0x0e +#define KVASER_PCIEFD_HS_ID 0x0f +#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10 +#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11 + +/* PCIe IRQ registers */ +#define KVASER_PCIEFD_IRQ_REG 0x40 +#define KVASER_PCIEFD_IEN_REG 0x50 +/* DMA map */ +#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000 +/* Kvaser KCAN CAN controller registers */ +#define KVASER_PCIEFD_KCAN0_BASE 0x10000 +#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000 +#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 +#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 +#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 +#define KVASER_PCIEFD_KCAN_CMD_REG 0x400 +#define KVASER_PCIEFD_KCAN_IEN_REG 0x408 +#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 +#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414 +#define KVASER_PCIEFD_KCAN_STAT_REG 0x418 +#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c +#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 +#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 +#define KVASER_PCIEFD_KCAN_PWM_REG 0x430 +/* Loopback control register */ +#define KVASER_PCIEFD_LOOP_REG 0x1f000 +/* System identification and information registers */ +#define KVASER_PCIEFD_SYSID_BASE 0x1f020 +#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8) +#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc) +#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) +/* Shared receive buffer registers */ +#define KVASER_PCIEFD_SRB_BASE 0x1f200 +#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) +#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) +#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) +#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) +#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) +/* EPCS flash controller registers */ +#define KVASER_PCIEFD_SPI_BASE 0x1fc00 +#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE +#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4) +#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8) +#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc) +#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14) + +#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f +#define KVASER_PCIEFD_IRQ_SRB BIT(4) + +#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24 +#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16 +#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1 + +/* Reset DMA buffer 0, 1 and FIFO offset */ +#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) +#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) +#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) + +/* DMA packet done, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) +#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) +/* DMA overflow, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) +#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) +/* DMA underflow, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) +#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) + +/* DMA idle */ +#define KVASER_PCIEFD_SRB_STAT_DI BIT(15) +/* DMA support */ +#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) + +/* DMA Enable */ +#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) + +/* EPCS flash controller definitions */ +#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024) +#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L) +#define KVASER_PCIEFD_CFG_MAX_PARAMS 256 +#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d +#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24 +#define KVASER_PCIEFD_CFG_SYS_VER 1 +#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130 +#define KVASER_PCIEFD_SPI_TMT BIT(5) +#define KVASER_PCIEFD_SPI_TRDY BIT(6) +#define KVASER_PCIEFD_SPI_RRDY BIT(7) +#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14 +/* Commands for controlling the onboard flash */ +#define KVASER_PCIEFD_FLASH_RES_CMD 0xab +#define KVASER_PCIEFD_FLASH_READ_CMD 0x3 +#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5 + +/* Kvaser KCAN definitions */ +#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29) +#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29) + +#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16 +/* Request status packet */ +#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) +/* Abort, flush and reset */ +#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) + +/* Tx FIFO unaligned read */ +#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) +/* Tx FIFO unaligned end */ +#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) +/* Bus parameter protection error */ +#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) +/* FDF bit when controller is in classic mode */ +#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) +/* Rx FIFO overflow */ +#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) +/* Abort done */ +#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) +/* Tx buffer flush done */ +#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) +/* Tx FIFO overflow */ +#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) +/* Tx FIFO empty */ +#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) +/* Transmitter unaligned */ +#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) + +#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16 + +#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24 +/* Abort request */ +#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) +/* Idle state. Controller in reset mode and no abort or flush pending */ +#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) +/* Bus off */ +#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) +/* Reset mode request */ +#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) +/* Controller in reset mode */ +#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) +/* Controller got one-shot capability */ +#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) +/* Controller got CAN FD capability */ +#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) +#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \ + KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \ + KVASER_PCIEFD_KCAN_STAT_IRM) + +/* Reset mode */ +#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) +/* Listen only mode */ +#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) +/* Error packet enable */ +#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) +/* CAN FD non-ISO */ +#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) +/* Acknowledgment packet type */ +#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) +/* Active error flag enable. Clear to force error passive */ +#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) +/* Classic CAN mode */ +#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) + +#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13 +#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17 +#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26 + +#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16 + +/* Kvaser KCAN packet types */ +#define KVASER_PCIEFD_PACK_TYPE_DATA 0 +#define KVASER_PCIEFD_PACK_TYPE_ACK 1 +#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2 +#define KVASER_PCIEFD_PACK_TYPE_ERROR 3 +#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4 +#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5 +#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6 +#define KVASER_PCIEFD_PACK_TYPE_STATUS 8 +#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9 + +/* Kvaser KCAN packet common definitions */ +#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff +#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25 +#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28 + +/* Kvaser KCAN TDATA and RDATA first word */ +#define KVASER_PCIEFD_RPACKET_IDE BIT(30) +#define KVASER_PCIEFD_RPACKET_RTR BIT(29) +/* Kvaser KCAN TDATA and RDATA second word */ +#define KVASER_PCIEFD_RPACKET_ESI BIT(13) +#define KVASER_PCIEFD_RPACKET_BRS BIT(14) +#define KVASER_PCIEFD_RPACKET_FDF BIT(15) +#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8 +/* Kvaser KCAN TDATA second word */ +#define KVASER_PCIEFD_TPACKET_SMS BIT(16) +#define KVASER_PCIEFD_TPACKET_AREQ BIT(31) + +/* Kvaser KCAN APACKET */ +#define KVASER_PCIEFD_APACKET_FLU BIT(8) +#define KVASER_PCIEFD_APACKET_CT BIT(9) +#define KVASER_PCIEFD_APACKET_ABL BIT(10) +#define KVASER_PCIEFD_APACKET_NACK BIT(11) + +/* Kvaser KCAN SPACK first word */ +#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8 +#define KVASER_PCIEFD_SPACK_BOFF BIT(16) +#define KVASER_PCIEFD_SPACK_IDET BIT(20) +#define KVASER_PCIEFD_SPACK_IRM BIT(21) +#define KVASER_PCIEFD_SPACK_RMCD BIT(22) +/* Kvaser KCAN SPACK second word */ +#define KVASER_PCIEFD_SPACK_AUTO BIT(21) +#define KVASER_PCIEFD_SPACK_EWLR BIT(23) +#define KVASER_PCIEFD_SPACK_EPLR BIT(24) + +struct kvaser_pciefd; + +struct kvaser_pciefd_can { + struct can_priv can; + struct kvaser_pciefd *kv_pcie; + void __iomem *reg_base; + struct can_berr_counter bec; + u8 cmd_seq; + int err_rep_cnt; + int echo_idx; + spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ + spinlock_t echo_lock; /* Locks the message echo buffer */ + struct timer_list bec_poll_timer; + struct completion start_comp, flush_comp; +}; + +struct kvaser_pciefd { + struct pci_dev *pci; + void __iomem *reg_base; + struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; + void *dma_data[KVASER_PCIEFD_DMA_COUNT]; + u8 nr_channels; + u32 freq; + u32 freq_to_ticks_div; +}; + +struct kvaser_pciefd_rx_packet { + u32 header[2]; + u64 timestamp; +}; + +struct kvaser_pciefd_tx_packet { + u32 header[2]; + u8 data[64]; +}; + +static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { + .name = KVASER_PCIEFD_DRV_NAME, + .tseg1_min = 1, + .tseg1_max = 255, + .tseg2_min = 1, + .tseg2_max = 32, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 4096, + .brp_inc = 1, +}; + +struct kvaser_pciefd_cfg_param { + __le32 magic; + __le32 nr; + __le32 len; + u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ]; +}; + +struct kvaser_pciefd_cfg_img { + __le32 version; + __le32 magic; + __le32 crc; + struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS]; +}; + +static struct pci_device_id kvaser_pciefd_id_table[] = { + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), }, + { 0,}, +}; +MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); + +/* Onboard flash memory functions */ +static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk) +{ + u32 res; + int ret; + + ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG, + res, res & msk, 0, 10); + + return ret; +} + +static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx, + u32 tx_len, u8 *rx, u32 rx_len) +{ + int c; + + iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG); + iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); + ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); + + c = tx_len; + while (c--) { + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) + return -EIO; + + iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); + + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) + return -EIO; + + ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); + } + + c = rx_len; + while (c-- > 0) { + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) + return -EIO; + + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); + + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) + return -EIO; + + *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); + } + + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT)) + return -EIO; + + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); + + if (c != -1) { + dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n"); + return -EIO; + } + + return 0; +} + +static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_cfg_img *img) +{ + int offset = KVASER_PCIEFD_CFG_IMG_OFFSET; + int res, crc; + u8 *crc_buff; + + u8 cmd[] = { + KVASER_PCIEFD_FLASH_READ_CMD, + (u8)((offset >> 16) & 0xff), + (u8)((offset >> 8) & 0xff), + (u8)(offset & 0xff) + }; + + res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img, + KVASER_PCIEFD_CFG_IMG_SZ); + if (res) + return res; + + crc_buff = (u8 *)img->params; + + if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) { + dev_err(&pcie->pci->dev, + "Config flash corrupted, version number is wrong\n"); + return -ENODEV; + } + + if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) { + dev_err(&pcie->pci->dev, + "Config flash corrupted, magic number is wrong\n"); + return -ENODEV; + } + + crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params)); + if (le32_to_cpu(img->crc) != crc) { + dev_err(&pcie->pci->dev, + "Stored CRC does not match flash image contents\n"); + return -EIO; + } + + return 0; +} + +static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_cfg_img *img) +{ + struct kvaser_pciefd_cfg_param *param; + + param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN]; + memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len)); +} + +static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie) +{ + int res; + struct kvaser_pciefd_cfg_img *img; + + /* Read electronic signature */ + u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0}; + + res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1); + if (res) + return -EIO; + + img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL); + if (!img) + return -ENOMEM; + + if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) { + dev_err(&pcie->pci->dev, + "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n", + cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16); + + res = -ENODEV; + goto image_free; + } + + cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD; + res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1); + if (res) { + goto image_free; + } else if (cmd[0] & 1) { + res = -EIO; + /* No write is ever done, the WIP should never be set */ + dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n"); + goto image_free; + } + + res = kvaser_pciefd_cfg_read_and_verify(pcie, img); + if (res) { + res = -EIO; + goto image_free; + } + + kvaser_pciefd_cfg_read_params(pcie, img); + +image_free: + kfree(img); + return res; +} + +static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) +{ + u32 cmd; + + cmd = KVASER_PCIEFD_KCAN_CMD_SRQ; + cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; + iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); +} + +static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { + mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + } + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) +{ + u32 msk; + + msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | + KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | + KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | + KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | + KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD; + + iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + + return 0; +} + +static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + if (can->can.ctrlmode & CAN_CTRLMODE_FD) { + mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; + if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; + else + mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; + } else { + mode |= KVASER_PCIEFD_KCAN_MODE_CCM; + mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; + } + + if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + mode |= KVASER_PCIEFD_KCAN_MODE_LOM; + + mode |= KVASER_PCIEFD_KCAN_MODE_EEN; + mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; + /* Use ACK packet type */ + mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; + mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) +{ + u32 status; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + + status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); + if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { + u32 cmd; + + /* If controller is already idle, run abort, flush and reset */ + cmd = KVASER_PCIEFD_KCAN_CMD_AT; + cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; + iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); + } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { + u32 mode; + + /* Put controller in reset mode */ + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + mode |= KVASER_PCIEFD_KCAN_MODE_RM; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + } + + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + del_timer(&can->bec_poll_timer); + + if (!completion_done(&can->flush_comp)) + kvaser_pciefd_start_controller_flush(can); + + if (!wait_for_completion_timeout(&can->flush_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during bus on flush\n"); + return -ETIMEDOUT; + } + + spin_lock_irqsave(&can->lock, irq); + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + spin_unlock_irqrestore(&can->lock, irq); + + if (!wait_for_completion_timeout(&can->start_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during bus on reset\n"); + return -ETIMEDOUT; + } + /* Reset interrupt handling */ + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + + kvaser_pciefd_set_tx_irq(can); + kvaser_pciefd_setup_controller(can); + + can->can.state = CAN_STATE_ERROR_ACTIVE; + netif_wake_queue(can->can.dev); + can->bec.txerr = 0; + can->bec.rxerr = 0; + can->err_rep_cnt = 0; + + return 0; +} + +static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) +{ + int top, trigger; + u32 pwm_ctrl; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff; + + trigger = (100 * top + 50) / 100; + if (trigger < 0) + trigger = 0; + + pwm_ctrl = trigger & 0xff; + pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; + iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) +{ + int top, trigger; + u32 pwm_ctrl; + unsigned long irq; + + kvaser_pciefd_pwm_stop(can); + spin_lock_irqsave(&can->lock, irq); + + /* Set frequency to 500 KHz*/ + top = can->can.clock.freq / (2 * 500000) - 1; + + pwm_ctrl = top & 0xff; + pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; + iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + + /* Set duty cycle to 95 */ + trigger = (100 * top - 95 * (top + 1) + 50) / 100; + pwm_ctrl = trigger & 0xff; + pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; + iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_open(struct net_device *netdev) +{ + int err; + struct kvaser_pciefd_can *can = netdev_priv(netdev); + + err = open_candev(netdev); + if (err) + return err; + + err = kvaser_pciefd_bus_on(can); + if (err) + return err; + + return 0; +} + +static int kvaser_pciefd_stop(struct net_device *netdev) +{ + struct kvaser_pciefd_can *can = netdev_priv(netdev); + int ret = 0; + + /* Don't interrupt ongoing flush */ + if (!completion_done(&can->flush_comp)) + kvaser_pciefd_start_controller_flush(can); + + if (!wait_for_completion_timeout(&can->flush_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during stop\n"); + ret = -ETIMEDOUT; + } else { + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + del_timer(&can->bec_poll_timer); + } + close_candev(netdev); + + return ret; +} + +static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, + struct kvaser_pciefd_can *can, + struct sk_buff *skb) +{ + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + int packet_size; + int seq = can->echo_idx; + + memset(p, 0, sizeof(*p)); + + if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; + + if (cf->can_id & CAN_RTR_FLAG) + p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; + + if (cf->can_id & CAN_EFF_FLAG) + p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; + + p->header[0] |= cf->can_id & CAN_EFF_MASK; + p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT; + p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; + + if (can_is_canfd_skb(skb)) { + p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; + if (cf->flags & CANFD_BRS) + p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; + if (cf->flags & CANFD_ESI) + p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; + } + + p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK; + + packet_size = cf->len; + memcpy(p->data, cf->data, packet_size); + + return DIV_ROUND_UP(packet_size, 4); +} + +static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct kvaser_pciefd_can *can = netdev_priv(netdev); + unsigned long irq_flags; + struct kvaser_pciefd_tx_packet packet; + int nwords; + u8 count; + + if (can_dropped_invalid_skb(netdev, skb)) + return NETDEV_TX_OK; + + nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); + + spin_lock_irqsave(&can->echo_lock, irq_flags); + + /* Prepare and save echo skb in internal slot */ + can_put_echo_skb(skb, netdev, can->echo_idx); + + /* Move echo index to the next slot */ + can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; + + /* Write header to fifo */ + iowrite32(packet.header[0], + can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); + iowrite32(packet.header[1], + can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); + + if (nwords) { + u32 data_last = ((u32 *)packet.data)[nwords - 1]; + + /* Write data to fifo, except last word */ + iowrite32_rep(can->reg_base + + KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, + nwords - 1); + /* Write last word to end of fifo */ + __raw_writel(data_last, can->reg_base + + KVASER_PCIEFD_KCAN_FIFO_LAST_REG); + } else { + /* Complete write to fifo */ + __raw_writel(0, can->reg_base + + KVASER_PCIEFD_KCAN_FIFO_LAST_REG); + } + + count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); + /* No room for a new message, stop the queue until at least one + * successful transmit + */ + if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT || + can->can.echo_skb[can->echo_idx]) + netif_stop_queue(netdev); + + spin_unlock_irqrestore(&can->echo_lock, irq_flags); + + return NETDEV_TX_OK; +} + +static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) +{ + u32 mode, test, btrn; + unsigned long irq_flags; + int ret; + struct can_bittiming *bt; + + if (data) + bt = &can->can.data_bittiming; + else + bt = &can->can.bittiming; + + btrn = ((bt->phase_seg2 - 1) & 0x1f) << + KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT | + (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) << + KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT | + ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT | + ((bt->brp - 1) & 0x1fff); + + spin_lock_irqsave(&can->lock, irq_flags); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + + /* Put the circuit in reset mode */ + iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, + can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + + /* Can only set bittiming if in reset mode */ + ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, + test, test & KVASER_PCIEFD_KCAN_MODE_RM, + 0, 10); + + if (ret) { + spin_unlock_irqrestore(&can->lock, irq_flags); + return -EBUSY; + } + + if (data) + iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); + else + iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); + + /* Restore previous reset mode status */ + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + + spin_unlock_irqrestore(&can->lock, irq_flags); + return 0; +} + +static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) +{ + return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); +} + +static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) +{ + return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); +} + +static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) +{ + struct kvaser_pciefd_can *can = netdev_priv(ndev); + int ret = 0; + + switch (mode) { + case CAN_MODE_START: + if (!can->can.restart_ms) + ret = kvaser_pciefd_bus_on(can); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct kvaser_pciefd_can *can = netdev_priv(ndev); + + bec->rxerr = can->bec.rxerr; + bec->txerr = can->bec.txerr; + return 0; +} + +static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) +{ + struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); + + kvaser_pciefd_enable_err_gen(can); + kvaser_pciefd_request_status(can); + can->err_rep_cnt = 0; +} + +static const struct net_device_ops kvaser_pciefd_netdev_ops = { + .ndo_open = kvaser_pciefd_open, + .ndo_stop = kvaser_pciefd_stop, + .ndo_start_xmit = kvaser_pciefd_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) +{ + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + struct net_device *netdev; + struct kvaser_pciefd_can *can; + u32 status, tx_npackets; + + netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), + KVASER_PCIEFD_CAN_TX_MAX_COUNT); + if (!netdev) + return -ENOMEM; + + can = netdev_priv(netdev); + netdev->netdev_ops = &kvaser_pciefd_netdev_ops; + can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + + i * KVASER_PCIEFD_KCAN_BASE_OFFSET; + + can->kv_pcie = pcie; + can->cmd_seq = 0; + can->err_rep_cnt = 0; + can->bec.txerr = 0; + can->bec.rxerr = 0; + + init_completion(&can->start_comp); + init_completion(&can->flush_comp); + timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, + 0); + + tx_npackets = ioread32(can->reg_base + + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); + if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & + 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) { + dev_err(&pcie->pci->dev, + "Max Tx count is smaller than expected\n"); + + free_candev(netdev); + return -ENODEV; + } + + can->can.clock.freq = pcie->freq; + can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT; + can->echo_idx = 0; + spin_lock_init(&can->echo_lock); + spin_lock_init(&can->lock); + can->can.bittiming_const = &kvaser_pciefd_bittiming_const; + can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; + + can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; + can->can.do_set_data_bittiming = + kvaser_pciefd_set_data_bittiming; + + can->can.do_set_mode = kvaser_pciefd_set_mode; + can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; + + can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_FD | + CAN_CTRLMODE_FD_NON_ISO; + + status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); + if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { + dev_err(&pcie->pci->dev, + "CAN FD not supported as expected %d\n", i); + + free_candev(netdev); + return -ENODEV; + } + + if (status & KVASER_PCIEFD_KCAN_STAT_CAP) + can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; + + netdev->flags |= IFF_ECHO; + + SET_NETDEV_DEV(netdev, &pcie->pci->dev); + + iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | + KVASER_PCIEFD_KCAN_IRQ_TFD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + + pcie->can[i] = can; + kvaser_pciefd_pwm_start(can); + } + + return 0; +} + +static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) +{ + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + int err = register_candev(pcie->can[i]->can.dev); + + if (err) { + int j; + + /* Unregister all successfully registered devices. */ + for (j = 0; j < i; j++) + unregister_candev(pcie->can[j]->can.dev); + return err; + } + } + + return 0; +} + +static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie, + dma_addr_t addr, int offset) +{ + u32 word1, word2; + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT; + word2 = addr >> 32; +#else + word1 = addr; + word2 = 0; +#endif + iowrite32(word1, pcie->reg_base + offset); + iowrite32(word2, pcie->reg_base + offset + 4); +} + +static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) +{ + int i; + u32 srb_status; + dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; + + /* Disable the DMA */ + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { + unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; + + pcie->dma_data[i] = + dmam_alloc_coherent(&pcie->pci->dev, + KVASER_PCIEFD_DMA_SIZE, + &dma_addr[i], + GFP_KERNEL); + + if (!pcie->dma_data[i] || !dma_addr[i]) { + dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", + KVASER_PCIEFD_DMA_SIZE); + return -ENOMEM; + } + + kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); + } + + /* Reset Rx FIFO, and both DMA buffers */ + iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | + KVASER_PCIEFD_SRB_CMD_RDB1, + pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + + srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); + if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { + dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); + return -EIO; + } + + /* Enable the DMA */ + iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, + pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + + return 0; +} + +static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) +{ + u32 sysid, srb_status, build; + u8 sysid_nr_chan; + int ret; + + ret = kvaser_pciefd_read_cfg(pcie); + if (ret) + return ret; + + sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG); + sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff; + if (pcie->nr_channels != sysid_nr_chan) { + dev_err(&pcie->pci->dev, + "Number of channels does not match: %u vs %u\n", + pcie->nr_channels, + sysid_nr_chan); + return -ENODEV; + } + + if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS) + pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS; + + build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG); + dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n", + (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff, + sysid & 0xff, + (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff); + + srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); + if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { + dev_err(&pcie->pci->dev, + "Hardware without DMA is not supported\n"); + return -ENODEV; + } + + pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG); + pcie->freq_to_ticks_div = pcie->freq / 1000000; + if (pcie->freq_to_ticks_div == 0) + pcie->freq_to_ticks_div = 1; + + /* Turn off all loopback functionality */ + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG); + return ret; +} + +static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p, + __le32 *data) +{ + struct sk_buff *skb; + struct canfd_frame *cf; + struct can_priv *priv; + struct net_device_stats *stats; + struct skb_shared_hwtstamps *shhwtstamps; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + priv = &pcie->can[ch_id]->can; + stats = &priv->dev->stats; + + if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { + skb = alloc_canfd_skb(priv->dev, &cf); + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + + if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) + cf->flags |= CANFD_BRS; + + if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) + cf->flags |= CANFD_ESI; + } else { + skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + } + + cf->can_id = p->header[0] & CAN_EFF_MASK; + if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) + cf->can_id |= CAN_EFF_FLAG; + + cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT); + + if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, data, cf->len); + + shhwtstamps = skb_hwtstamps(skb); + + shhwtstamps->hwtstamp = + ns_to_ktime(div_u64(p->timestamp * 1000, + pcie->freq_to_ticks_div)); + + stats->rx_bytes += cf->len; + stats->rx_packets++; + + return netif_rx(skb); +} + +static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, + struct can_frame *cf, + enum can_state new_state, + enum can_state tx_state, + enum can_state rx_state) +{ + can_change_state(can->can.dev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + struct net_device *ndev = can->can.dev; + unsigned long irq_flags; + + spin_lock_irqsave(&can->lock, irq_flags); + netif_stop_queue(can->can.dev); + spin_unlock_irqrestore(&can->lock, irq_flags); + + /* Prevent CAN controller from auto recover from bus off */ + if (!can->can.restart_ms) { + kvaser_pciefd_start_controller_flush(can); + can_bus_off(ndev); + } + } +} + +static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, + struct can_berr_counter *bec, + enum can_state *new_state, + enum can_state *tx_state, + enum can_state *rx_state) +{ + if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || + p->header[0] & KVASER_PCIEFD_SPACK_IRM) + *new_state = CAN_STATE_BUS_OFF; + else if (bec->txerr >= 255 || bec->rxerr >= 255) + *new_state = CAN_STATE_BUS_OFF; + else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) + *new_state = CAN_STATE_ERROR_PASSIVE; + else if (bec->txerr >= 128 || bec->rxerr >= 128) + *new_state = CAN_STATE_ERROR_PASSIVE; + else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) + *new_state = CAN_STATE_ERROR_WARNING; + else if (bec->txerr >= 96 || bec->rxerr >= 96) + *new_state = CAN_STATE_ERROR_WARNING; + else + *new_state = CAN_STATE_ERROR_ACTIVE; + + *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; + *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; +} + +static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_rx_packet *p) +{ + struct can_berr_counter bec; + enum can_state old_state, new_state, tx_state, rx_state; + struct net_device *ndev = can->can.dev; + struct sk_buff *skb; + struct can_frame *cf = NULL; + struct skb_shared_hwtstamps *shhwtstamps; + struct net_device_stats *stats = &ndev->stats; + + old_state = can->can.state; + + bec.txerr = p->header[0] & 0xff; + bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; + + kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, + &rx_state); + + skb = alloc_can_err_skb(ndev, &cf); + + if (new_state != old_state) { + kvaser_pciefd_change_state(can, cf, new_state, tx_state, + rx_state); + + if (old_state == CAN_STATE_BUS_OFF && + new_state == CAN_STATE_ERROR_ACTIVE && + can->can.restart_ms) { + can->can.can_stats.restarts++; + if (skb) + cf->can_id |= CAN_ERR_RESTARTED; + } + } + + can->err_rep_cnt++; + can->can.can_stats.bus_error++; + stats->rx_errors++; + + can->bec.txerr = bec.txerr; + can->bec.rxerr = bec.rxerr; + + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(div_u64(p->timestamp * 1000, + can->kv_pcie->freq_to_ticks_div)); + cf->can_id |= CAN_ERR_BUSERROR; + + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + netif_rx(skb); + return 0; +} + +static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + kvaser_pciefd_rx_error_frame(can, p); + if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) + /* Do not report more errors, until bec_poll_timer expires */ + kvaser_pciefd_disable_err_gen(can); + /* Start polling the error counters */ + mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); + return 0; +} + +static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_rx_packet *p) +{ + struct can_berr_counter bec; + enum can_state old_state, new_state, tx_state, rx_state; + + old_state = can->can.state; + + bec.txerr = p->header[0] & 0xff; + bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; + + kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, + &rx_state); + + if (new_state != old_state) { + struct net_device *ndev = can->can.dev; + struct sk_buff *skb; + struct can_frame *cf; + struct skb_shared_hwtstamps *shhwtstamps; + + skb = alloc_can_err_skb(ndev, &cf); + if (!skb) { + struct net_device_stats *stats = &ndev->stats; + + stats->rx_dropped++; + return -ENOMEM; + } + + kvaser_pciefd_change_state(can, cf, new_state, tx_state, + rx_state); + + if (old_state == CAN_STATE_BUS_OFF && + new_state == CAN_STATE_ERROR_ACTIVE && + can->can.restart_ms) { + can->can.can_stats.restarts++; + cf->can_id |= CAN_ERR_RESTARTED; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(div_u64(p->timestamp * 1000, + can->kv_pcie->freq_to_ticks_div)); + + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + netif_rx(skb); + } + can->bec.txerr = bec.txerr; + can->bec.rxerr = bec.rxerr; + /* Check if we need to poll the error counters */ + if (bec.txerr || bec.rxerr) + mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); + + return 0; +} + +static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 cmdseq; + u32 status; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); + cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff; + + /* Reset done, start abort and flush */ + if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && + p->header[0] & KVASER_PCIEFD_SPACK_RMCD && + p->header[1] & KVASER_PCIEFD_SPACK_AUTO && + cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && + status & KVASER_PCIEFD_KCAN_STAT_IDLE) { + u32 cmd; + + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, + can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + cmd = KVASER_PCIEFD_KCAN_CMD_AT; + cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; + iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); + + iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && + p->header[0] & KVASER_PCIEFD_SPACK_IRM && + cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && + status & KVASER_PCIEFD_KCAN_STAT_IDLE) { + /* Reset detected, send end of flush if no packet are in FIFO */ + u8 count = ioread32(can->reg_base + + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; + + if (!count) + iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, + can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); + } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && + cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) { + /* Response to status request received */ + kvaser_pciefd_handle_status_resp(can, p); + if (can->can.state != CAN_STATE_BUS_OFF && + can->can.state != CAN_STATE_ERROR_ACTIVE) { + mod_timer(&can->bec_poll_timer, + KVASER_PCIEFD_BEC_POLL_FREQ); + } + } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && + !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) { + /* Reset to bus on detected */ + if (!completion_done(&can->start_comp)) + complete(&can->start_comp); + } + + return 0; +} + +static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + /* If this is the last flushed packet, send end of flush */ + if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { + u8 count = ioread32(can->reg_base + + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; + + if (count == 0) + iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, + can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); + } else { + int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; + int dlc = can_get_echo_skb(can->can.dev, echo_idx); + struct net_device_stats *stats = &can->can.dev->stats; + + stats->tx_bytes += dlc; + stats->tx_packets++; + + if (netif_queue_stopped(can->can.dev)) + netif_wake_queue(can->can.dev); + } + + return 0; +} + +static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_rx_packet *p) +{ + struct sk_buff *skb; + struct net_device_stats *stats = &can->can.dev->stats; + struct can_frame *cf; + + skb = alloc_can_err_skb(can->can.dev, &cf); + + stats->tx_errors++; + if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { + if (skb) + cf->can_id |= CAN_ERR_LOSTARB; + can->can.can_stats.arbitration_lost++; + } else if (skb) { + cf->can_id |= CAN_ERR_ACK; + } + + if (skb) { + cf->can_id |= CAN_ERR_BUSERROR; + stats->rx_bytes += cf->can_dlc; + stats->rx_packets++; + netif_rx(skb); + } else { + stats->rx_dropped++; + netdev_warn(can->can.dev, "No memory left for err_skb\n"); + } +} + +static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + bool one_shot_fail = false; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + /* Ignore control packet ACK */ + if (p->header[0] & KVASER_PCIEFD_APACKET_CT) + return 0; + + if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { + kvaser_pciefd_handle_nack_packet(can, p); + one_shot_fail = true; + } + + if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { + netdev_dbg(can->can.dev, "Packet was flushed\n"); + } else { + int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; + int dlc = can_get_echo_skb(can->can.dev, echo_idx); + u8 count = ioread32(can->reg_base + + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; + + if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT && + netif_queue_stopped(can->can.dev)) + netif_wake_queue(can->can.dev); + + if (!one_shot_fail) { + struct net_device_stats *stats = &can->can.dev->stats; + + stats->tx_bytes += dlc; + stats->tx_packets++; + } + } + + return 0; +} + +static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + if (!completion_done(&can->flush_comp)) + complete(&can->flush_comp); + + return 0; +} + +static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, + int dma_buf) +{ + __le32 *buffer = pcie->dma_data[dma_buf]; + __le64 timestamp; + struct kvaser_pciefd_rx_packet packet; + struct kvaser_pciefd_rx_packet *p = &packet; + u8 type; + int pos = *start_pos; + int size; + int ret = 0; + + size = le32_to_cpu(buffer[pos++]); + if (!size) { + *start_pos = 0; + return 0; + } + + p->header[0] = le32_to_cpu(buffer[pos++]); + p->header[1] = le32_to_cpu(buffer[pos++]); + + /* Read 64-bit timestamp */ + memcpy(×tamp, &buffer[pos], sizeof(__le64)); + pos += 2; + p->timestamp = le64_to_cpu(timestamp); + + type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf; + switch (type) { + case KVASER_PCIEFD_PACK_TYPE_DATA: + ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); + if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { + u8 data_len; + + data_len = can_dlc2len(p->header[1] >> + KVASER_PCIEFD_RPACKET_DLC_SHIFT); + pos += DIV_ROUND_UP(data_len, 4); + } + break; + + case KVASER_PCIEFD_PACK_TYPE_ACK: + ret = kvaser_pciefd_handle_ack_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_STATUS: + ret = kvaser_pciefd_handle_status_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_ERROR: + ret = kvaser_pciefd_handle_error_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: + ret = kvaser_pciefd_handle_eack_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: + ret = kvaser_pciefd_handle_eflush_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: + case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: + case KVASER_PCIEFD_PACK_TYPE_TXRQ: + dev_info(&pcie->pci->dev, + "Received unexpected packet type 0x%08X\n", type); + break; + + default: + dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); + ret = -EIO; + break; + } + + if (ret) + return ret; + + /* Position does not point to the end of the package, + * corrupted packet size? + */ + if ((*start_pos + size) != pos) + return -EIO; + + /* Point to the next packet header, if any */ + *start_pos = pos; + + return ret; +} + +static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) +{ + int pos = 0; + int res = 0; + + do { + res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); + } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); + + return res; +} + +static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +{ + u32 irq; + + irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { + kvaser_pciefd_read_buffer(pcie, 0); + /* Reset DMA buffer 0 */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, + pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + } + + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { + kvaser_pciefd_read_buffer(pcie, 1); + /* Reset DMA buffer 1 */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, + pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + } + + if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF1) + dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); + + iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); + return 0; +} + +static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) +{ + u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) + netdev_err(can->can.dev, "Tx FIFO overflow\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) { + u8 count = ioread32(can->reg_base + + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; + + if (count == 0) + iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, + can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); + } + + if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) + netdev_err(can->can.dev, + "Fail to change bittiming, when not in reset mode\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) + netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) + netdev_err(can->can.dev, "Rx FIFO overflow\n"); + + iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + return 0; +} + +static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) +{ + struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; + u32 board_irq; + int i; + + board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG); + + if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK)) + return IRQ_NONE; + + if (board_irq & KVASER_PCIEFD_IRQ_SRB) + kvaser_pciefd_receive_irq(pcie); + + for (i = 0; i < pcie->nr_channels; i++) { + if (!pcie->can[i]) { + dev_err(&pcie->pci->dev, + "IRQ mask points to unallocated controller\n"); + break; + } + + /* Check that mask matches channel (i) IRQ mask */ + if (board_irq & (1 << i)) + kvaser_pciefd_transmit_irq(pcie->can[i]); + } + + iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG); + return IRQ_HANDLED; +} + +static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) +{ + int i; + struct kvaser_pciefd_can *can; + + for (i = 0; i < pcie->nr_channels; i++) { + can = pcie->can[i]; + if (can) { + iowrite32(0, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + kvaser_pciefd_pwm_stop(can); + free_candev(can->can.dev); + } + } +} + +static int kvaser_pciefd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int err; + struct kvaser_pciefd *pcie; + + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci_set_drvdata(pdev, pcie); + pcie->pci = pdev; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); + if (err) + goto err_disable_pci; + + pcie->reg_base = pci_iomap(pdev, 0, 0); + if (!pcie->reg_base) { + err = -ENOMEM; + goto err_release_regions; + } + + err = kvaser_pciefd_setup_board(pcie); + if (err) + goto err_pci_iounmap; + + err = kvaser_pciefd_setup_dma(pcie); + if (err) + goto err_pci_iounmap; + + pci_set_master(pdev); + + err = kvaser_pciefd_setup_can_ctrls(pcie); + if (err) + goto err_teardown_can_ctrls; + + iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, + pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); + + iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | + KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | + KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, + pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG); + + /* Reset IRQ handling, expected to be off before */ + iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, + pcie->reg_base + KVASER_PCIEFD_IRQ_REG); + iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, + pcie->reg_base + KVASER_PCIEFD_IEN_REG); + + /* Ready the DMA buffers */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, + pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, + pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + + err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, + IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); + if (err) + goto err_teardown_can_ctrls; + + err = kvaser_pciefd_reg_candev(pcie); + if (err) + goto err_free_irq; + + return 0; + +err_free_irq: + free_irq(pcie->pci->irq, pcie); + +err_teardown_can_ctrls: + kvaser_pciefd_teardown_can_ctrls(pcie); + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + pci_clear_master(pdev); + +err_pci_iounmap: + pci_iounmap(pdev, pcie->reg_base); + +err_release_regions: + pci_release_regions(pdev); + +err_disable_pci: + pci_disable_device(pdev); + + return err; +} + +static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) +{ + struct kvaser_pciefd_can *can; + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + can = pcie->can[i]; + if (can) { + iowrite32(0, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + unregister_candev(can->can.dev); + del_timer(&can->bec_poll_timer); + kvaser_pciefd_pwm_stop(can); + free_candev(can->can.dev); + } + } +} + +static void kvaser_pciefd_remove(struct pci_dev *pdev) +{ + struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); + + kvaser_pciefd_remove_all_ctrls(pcie); + + /* Turn off IRQ generation */ + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, + pcie->reg_base + KVASER_PCIEFD_IRQ_REG); + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); + + free_irq(pcie->pci->irq, pcie); + + pci_clear_master(pdev); + pci_iounmap(pdev, pcie->reg_base); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver kvaser_pciefd = { + .name = KVASER_PCIEFD_DRV_NAME, + .id_table = kvaser_pciefd_id_table, + .probe = kvaser_pciefd_probe, + .remove = kvaser_pciefd_remove, +}; + +module_pci_driver(kvaser_pciefd) diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig index ec4b2e117f66..1ff0b7fe81d6 100644 --- a/drivers/net/can/m_can/Kconfig +++ b/drivers/net/can/m_can/Kconfig @@ -1,6 +1,24 @@ # SPDX-License-Identifier: GPL-2.0-only config CAN_M_CAN + tristate "Bosch M_CAN support" + ---help--- + Say Y here if you want support for Bosch M_CAN controller framework. + This is common support for devices that embed the Bosch M_CAN IP. + +config CAN_M_CAN_PLATFORM + tristate "Bosch M_CAN support for io-mapped devices" depends on HAS_IOMEM - tristate "Bosch M_CAN devices" + depends on CAN_M_CAN + ---help--- + Say Y here if you want support for IO Mapped Bosch M_CAN controller. + This support is for devices that have the Bosch M_CAN controller + IP embedded into the device and the IP is IO Mapped to the processor. + +config CAN_M_CAN_TCAN4X5X + depends on CAN_M_CAN + depends on REGMAP_SPI + tristate "TCAN4X5X M_CAN device" ---help--- - Say Y here if you want to support for Bosch M_CAN controller. + Say Y here if you want support for Texas Instruments TCAN4x5x + M_CAN controller. This device is a peripherial device that uses the + SPI bus for communication. diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile index 599ae69cb4a1..52a4a6fbe527 100644 --- a/drivers/net/can/m_can/Makefile +++ b/drivers/net/can/m_can/Makefile @@ -4,3 +4,5 @@ # obj-$(CONFIG_CAN_M_CAN) += m_can.o +obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o +obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index deb274a19ba0..562c8317e3aa 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1,20 +1,14 @@ -/* - * CAN bus driver for Bosch M_CAN controller - * - * Copyright (C) 2014 Freescale Semiconductor, Inc. - * Dong Aisheng <b29396@freescale.com> - * - * Bosch M_CAN user manual can be obtained from: +// SPDX-License-Identifier: GPL-2.0 +// CAN bus driver for Bosch M_CAN controller +// Copyright (C) 2014 Freescale Semiconductor, Inc. +// Dong Aisheng <b29396@freescale.com> +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +/* Bosch M_CAN user manual can be obtained from: * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/ * mcan_users_manual_v302.pdf - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ -#include <linux/clk.h> -#include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> @@ -28,11 +22,7 @@ #include <linux/can/dev.h> #include <linux/pinctrl/consumer.h> -/* napi related */ -#define M_CAN_NAPI_WEIGHT 64 - -/* message ram configuration data length */ -#define MRAM_CFG_LEN 8 +#include "m_can.h" /* registers definition */ enum m_can_reg { @@ -86,28 +76,11 @@ enum m_can_reg { M_CAN_TXEFA = 0xf8, }; -/* m_can lec values */ -enum m_can_lec_type { - LEC_NO_ERROR = 0, - LEC_STUFF_ERROR, - LEC_FORM_ERROR, - LEC_ACK_ERROR, - LEC_BIT1_ERROR, - LEC_BIT0_ERROR, - LEC_CRC_ERROR, - LEC_UNUSED, -}; +/* napi related */ +#define M_CAN_NAPI_WEIGHT 64 -enum m_can_mram_cfg { - MRAM_SIDF = 0, - MRAM_XIDF, - MRAM_RXF0, - MRAM_RXF1, - MRAM_RXB, - MRAM_TXE, - MRAM_TXB, - MRAM_CFG_NUM, -}; +/* message ram configuration data length */ +#define MRAM_CFG_LEN 8 /* Core Release Register (CREL) */ #define CREL_REL_SHIFT 28 @@ -347,90 +320,85 @@ enum m_can_mram_cfg { #define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT #define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT) -/* address offset and element number for each FIFO/Buffer in the Message RAM */ -struct mram_cfg { - u16 off; - u8 num; -}; - -/* m_can private data structure */ -struct m_can_priv { - struct can_priv can; /* must be the first member */ - struct napi_struct napi; - struct net_device *dev; - struct device *device; - struct clk *hclk; - struct clk *cclk; - void __iomem *base; - u32 irqstatus; - int version; - - /* message ram configuration */ - void __iomem *mram_base; - struct mram_cfg mcfg[MRAM_CFG_NUM]; -}; +static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) +{ + return cdev->ops->read_reg(cdev, reg); +} -static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg) +static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg, + u32 val) { - return readl(priv->base + reg); + cdev->ops->write_reg(cdev, reg, val); } -static inline void m_can_write(const struct m_can_priv *priv, - enum m_can_reg reg, u32 val) +static u32 m_can_fifo_read(struct m_can_classdev *cdev, + u32 fgi, unsigned int offset) { - writel(val, priv->base + reg); + u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + + offset; + + return cdev->ops->read_fifo(cdev, addr_offset); } -static inline u32 m_can_fifo_read(const struct m_can_priv *priv, - u32 fgi, unsigned int offset) +static void m_can_fifo_write(struct m_can_classdev *cdev, + u32 fpi, unsigned int offset, u32 val) { - return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off + - fgi * RXF0_ELEMENT_SIZE + offset); + u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + + offset; + + cdev->ops->write_fifo(cdev, addr_offset, val); } -static inline void m_can_fifo_write(const struct m_can_priv *priv, - u32 fpi, unsigned int offset, u32 val) +static inline void m_can_fifo_write_no_off(struct m_can_classdev *cdev, + u32 fpi, u32 val) { - writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off + - fpi * TXB_ELEMENT_SIZE + offset); + cdev->ops->write_fifo(cdev, fpi, val); } -static inline u32 m_can_txe_fifo_read(const struct m_can_priv *priv, - u32 fgi, - u32 offset) { - return readl(priv->mram_base + priv->mcfg[MRAM_TXE].off + - fgi * TXE_ELEMENT_SIZE + offset); +static u32 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset) +{ + u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE + + offset; + + return cdev->ops->read_fifo(cdev, addr_offset); } -static inline bool m_can_tx_fifo_full(const struct m_can_priv *priv) +static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev) { - return !!(m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQF); + return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF); } -static inline void m_can_config_endisable(const struct m_can_priv *priv, - bool enable) +void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) { - u32 cccr = m_can_read(priv, M_CAN_CCCR); + u32 cccr = m_can_read(cdev, M_CAN_CCCR); u32 timeout = 10; u32 val = 0; + /* Clear the Clock stop request if it was set */ + if (cccr & CCCR_CSR) + cccr &= ~CCCR_CSR; + if (enable) { + /* Clear the Clock stop request if it was set */ + if (cccr & CCCR_CSR) + cccr &= ~CCCR_CSR; + /* enable m_can configuration */ - m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); + m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); udelay(5); /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ - m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); + m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); } else { - m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); + m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); } /* there's a delay for module initialization */ if (enable) val = CCCR_INIT | CCCR_CCE; - while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { + while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { if (timeout == 0) { - netdev_warn(priv->dev, "Failed to init module\n"); + netdev_warn(cdev->net, "Failed to init module\n"); return; } timeout--; @@ -438,21 +406,38 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv, } } -static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv) +static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) { /* Only interrupt line 0 is used in this driver */ - m_can_write(priv, M_CAN_ILE, ILE_EINT0); + m_can_write(cdev, M_CAN_ILE, ILE_EINT0); } -static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) +static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) { - m_can_write(priv, M_CAN_ILE, 0x0); + m_can_write(cdev, M_CAN_ILE, 0x0); +} + +static void m_can_clean(struct net_device *net) +{ + struct m_can_classdev *cdev = netdev_priv(net); + + if (cdev->tx_skb) { + int putidx = 0; + + net->stats.tx_errors++; + if (cdev->version > 30) + putidx = ((m_can_read(cdev, M_CAN_TXFQS) & + TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT); + + can_free_echo_skb(cdev->net, putidx); + cdev->tx_skb = NULL; + } } static void m_can_read_fifo(struct net_device *dev, u32 rxfs) { struct net_device_stats *stats = &dev->stats; - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct canfd_frame *cf; struct sk_buff *skb; u32 id, fgi, dlc; @@ -460,7 +445,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) /* calculate the fifo get index for where to read data */ fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT; - dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); + dlc = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DLC); if (dlc & RX_BUF_FDF) skb = alloc_canfd_skb(dev, &cf); else @@ -475,7 +460,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) else cf->len = get_can_dlc((dlc >> 16) & 0x0F); - id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); + id = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID); if (id & RX_BUF_XTD) cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; else @@ -494,12 +479,12 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) for (i = 0; i < cf->len; i += 4) *(u32 *)(cf->data + i) = - m_can_fifo_read(priv, fgi, + m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA(i / 4)); } /* acknowledge rx fifo 0 */ - m_can_write(priv, M_CAN_RXF0A, fgi); + m_can_write(cdev, M_CAN_RXF0A, fgi); stats->rx_packets++; stats->rx_bytes += cf->len; @@ -509,11 +494,11 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) static int m_can_do_rx_poll(struct net_device *dev, int quota) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); u32 pkts = 0; u32 rxfs; - rxfs = m_can_read(priv, M_CAN_RXF0S); + rxfs = m_can_read(cdev, M_CAN_RXF0S); if (!(rxfs & RXFS_FFL_MASK)) { netdev_dbg(dev, "no messages in fifo0\n"); return 0; @@ -527,7 +512,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) quota--; pkts++; - rxfs = m_can_read(priv, M_CAN_RXF0S); + rxfs = m_can_read(cdev, M_CAN_RXF0S); } if (pkts) @@ -562,12 +547,12 @@ static int m_can_handle_lost_msg(struct net_device *dev) static int m_can_handle_lec_err(struct net_device *dev, enum m_can_lec_type lec_type) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; - priv->can.can_stats.bus_error++; + cdev->can.can_stats.bus_error++; stats->rx_errors++; /* propagate the error condition to the CAN stack */ @@ -619,47 +604,51 @@ static int m_can_handle_lec_err(struct net_device *dev, static int __m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); unsigned int ecr; - ecr = m_can_read(priv, M_CAN_ECR); + ecr = m_can_read(cdev, M_CAN_ECR); bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT; return 0; } -static int m_can_clk_start(struct m_can_priv *priv) +static int m_can_clk_start(struct m_can_classdev *cdev) { int err; - err = pm_runtime_get_sync(priv->device); + if (cdev->pm_clock_support == 0) + return 0; + + err = pm_runtime_get_sync(cdev->dev); if (err < 0) { - pm_runtime_put_noidle(priv->device); + pm_runtime_put_noidle(cdev->dev); return err; } return 0; } -static void m_can_clk_stop(struct m_can_priv *priv) +static void m_can_clk_stop(struct m_can_classdev *cdev) { - pm_runtime_put_sync(priv->device); + if (cdev->pm_clock_support) + pm_runtime_put_sync(cdev->dev); } static int m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); int err; - err = m_can_clk_start(priv); + err = m_can_clk_start(cdev); if (err) return err; __m_can_get_berr_counter(dev, bec); - m_can_clk_stop(priv); + m_can_clk_stop(cdev); return 0; } @@ -667,7 +656,7 @@ static int m_can_get_berr_counter(const struct net_device *dev, static int m_can_handle_state_change(struct net_device *dev, enum can_state new_state) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; @@ -677,19 +666,19 @@ static int m_can_handle_state_change(struct net_device *dev, switch (new_state) { case CAN_STATE_ERROR_ACTIVE: /* error warning state */ - priv->can.can_stats.error_warning++; - priv->can.state = CAN_STATE_ERROR_WARNING; + cdev->can.can_stats.error_warning++; + cdev->can.state = CAN_STATE_ERROR_WARNING; break; case CAN_STATE_ERROR_PASSIVE: /* error passive state */ - priv->can.can_stats.error_passive++; - priv->can.state = CAN_STATE_ERROR_PASSIVE; + cdev->can.can_stats.error_passive++; + cdev->can.state = CAN_STATE_ERROR_PASSIVE; break; case CAN_STATE_BUS_OFF: /* bus-off state */ - priv->can.state = CAN_STATE_BUS_OFF; - m_can_disable_all_interrupts(priv); - priv->can.can_stats.bus_off++; + cdev->can.state = CAN_STATE_BUS_OFF; + m_can_disable_all_interrupts(cdev); + cdev->can.can_stats.bus_off++; can_bus_off(dev); break; default: @@ -716,7 +705,7 @@ static int m_can_handle_state_change(struct net_device *dev, case CAN_STATE_ERROR_PASSIVE: /* error passive state */ cf->can_id |= CAN_ERR_CRTL; - ecr = m_can_read(priv, M_CAN_ECR); + ecr = m_can_read(cdev, M_CAN_ECR); if (ecr & ECR_RP) cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; if (bec.txerr > 127) @@ -741,25 +730,22 @@ static int m_can_handle_state_change(struct net_device *dev, static int m_can_handle_state_errors(struct net_device *dev, u32 psr) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); int work_done = 0; - if ((psr & PSR_EW) && - (priv->can.state != CAN_STATE_ERROR_WARNING)) { + if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) { netdev_dbg(dev, "entered error warning state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_ERROR_WARNING); } - if ((psr & PSR_EP) && - (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { + if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) { netdev_dbg(dev, "entered error passive state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_ERROR_PASSIVE); } - if ((psr & PSR_BO) && - (priv->can.state != CAN_STATE_BUS_OFF)) { + if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) { netdev_dbg(dev, "entered error bus off state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_BUS_OFF); @@ -794,14 +780,14 @@ static inline bool is_lec_err(u32 psr) static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, u32 psr) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); int work_done = 0; if (irqstatus & IR_RF0L) work_done += m_can_handle_lost_msg(dev); /* handle lec errors on the bus */ - if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && is_lec_err(psr)) work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED); @@ -811,14 +797,13 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, return work_done; } -static int m_can_poll(struct napi_struct *napi, int quota) +static int m_can_rx_handler(struct net_device *dev, int quota) { - struct net_device *dev = napi->dev; - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); int work_done = 0; u32 irqstatus, psr; - irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR); + irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR); if (!irqstatus) goto end; @@ -832,18 +817,19 @@ static int m_can_poll(struct napi_struct *napi, int quota) * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127. * In this case, reset MCAN_IR.MRAF. No further action is required. */ - if ((priv->version <= 31) && (irqstatus & IR_MRAF) && - (m_can_read(priv, M_CAN_ECR) & ECR_RP)) { + if (cdev->version <= 31 && irqstatus & IR_MRAF && + m_can_read(cdev, M_CAN_ECR) & ECR_RP) { struct can_berr_counter bec; __m_can_get_berr_counter(dev, &bec); if (bec.rxerr == 127) { - m_can_write(priv, M_CAN_IR, IR_MRAF); + m_can_write(cdev, M_CAN_IR, IR_MRAF); irqstatus &= ~IR_MRAF; } } - psr = m_can_read(priv, M_CAN_PSR); + psr = m_can_read(cdev, M_CAN_PSR); + if (irqstatus & IR_ERR_STATE) work_done += m_can_handle_state_errors(dev, psr); @@ -852,13 +838,33 @@ static int m_can_poll(struct napi_struct *napi, int quota) if (irqstatus & IR_RF0N) work_done += m_can_do_rx_poll(dev, (quota - work_done)); +end: + return work_done; +} + +static int m_can_rx_peripheral(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + m_can_rx_handler(dev, 1); + + m_can_enable_all_interrupts(cdev); + + return 0; +} +static int m_can_poll(struct napi_struct *napi, int quota) +{ + struct net_device *dev = napi->dev; + struct m_can_classdev *cdev = netdev_priv(dev); + int work_done; + + work_done = m_can_rx_handler(dev, quota); if (work_done < quota) { napi_complete_done(napi, work_done); - m_can_enable_all_interrupts(priv); + m_can_enable_all_interrupts(cdev); } -end: return work_done; } @@ -870,11 +876,11 @@ static void m_can_echo_tx_event(struct net_device *dev) int i = 0; unsigned int msg_mark; - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; /* read tx event fifo status */ - m_can_txefs = m_can_read(priv, M_CAN_TXEFS); + m_can_txefs = m_can_read(cdev, M_CAN_TXEFS); /* Get Tx Event fifo element count */ txe_count = (m_can_txefs & TXEFS_EFFL_MASK) @@ -883,15 +889,15 @@ static void m_can_echo_tx_event(struct net_device *dev) /* Get and process all sent elements */ for (i = 0; i < txe_count; i++) { /* retrieve get index */ - fgi = (m_can_read(priv, M_CAN_TXEFS) & TXEFS_EFGI_MASK) + fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK) >> TXEFS_EFGI_SHIFT; /* get message marker */ - msg_mark = (m_can_txe_fifo_read(priv, fgi, 4) & + msg_mark = (m_can_txe_fifo_read(cdev, fgi, 4) & TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT; /* ack txe element */ - m_can_write(priv, M_CAN_TXEFA, (TXEFA_EFAI_MASK & + m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK & (fgi << TXEFA_EFAI_SHIFT))); /* update stats */ @@ -903,17 +909,20 @@ static void m_can_echo_tx_event(struct net_device *dev) static irqreturn_t m_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; u32 ir; - ir = m_can_read(priv, M_CAN_IR); + ir = m_can_read(cdev, M_CAN_IR); if (!ir) return IRQ_NONE; /* ACK all irqs */ if (ir & IR_ALL_INT) - m_can_write(priv, M_CAN_IR, ir); + m_can_write(cdev, M_CAN_IR, ir); + + if (cdev->ops->clear_interrupts) + cdev->ops->clear_interrupts(cdev); /* schedule NAPI in case of * - rx IRQ @@ -921,12 +930,15 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) * - bus error IRQ and bus error reporting */ if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) { - priv->irqstatus = ir; - m_can_disable_all_interrupts(priv); - napi_schedule(&priv->napi); + cdev->irqstatus = ir; + m_can_disable_all_interrupts(cdev); + if (!cdev->is_peripheral) + napi_schedule(&cdev->napi); + else + m_can_rx_peripheral(dev); } - if (priv->version == 30) { + if (cdev->version == 30) { if (ir & IR_TC) { /* Transmission Complete Interrupt*/ stats->tx_bytes += can_get_echo_skb(dev, 0); @@ -940,7 +952,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) m_can_echo_tx_event(dev); can_led_event(dev, CAN_LED_EVENT_TX); if (netif_queue_stopped(dev) && - !m_can_tx_fifo_full(priv)) + !m_can_tx_fifo_full(cdev)) netif_wake_queue(dev); } } @@ -998,9 +1010,9 @@ static const struct can_bittiming_const m_can_data_bittiming_const_31X = { static int m_can_set_bittiming(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); - const struct can_bittiming *bt = &priv->can.bittiming; - const struct can_bittiming *dbt = &priv->can.data_bittiming; + struct m_can_classdev *cdev = netdev_priv(dev); + const struct can_bittiming *bt = &cdev->can.bittiming; + const struct can_bittiming *dbt = &cdev->can.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 reg_btp; @@ -1010,9 +1022,9 @@ static int m_can_set_bittiming(struct net_device *dev) tseg2 = bt->phase_seg2 - 1; reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) | (tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT); - m_can_write(priv, M_CAN_NBTP, reg_btp); + m_can_write(cdev, M_CAN_NBTP, reg_btp); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { reg_btp = 0; brp = dbt->brp - 1; sjw = dbt->sjw - 1; @@ -1034,7 +1046,7 @@ static int m_can_set_bittiming(struct net_device *dev) /* Equation based on Bosch's M_CAN User Manual's * Transmitter Delay Compensation Section */ - tdco = (priv->can.clock.freq / 1000) * + tdco = (cdev->can.clock.freq / 1000) * ssp / dbt->bitrate; /* Max valid TDCO value is 127 */ @@ -1045,7 +1057,7 @@ static int m_can_set_bittiming(struct net_device *dev) } reg_btp |= DBTP_TDC; - m_can_write(priv, M_CAN_TDCR, + m_can_write(cdev, M_CAN_TDCR, tdco << TDCR_TDCO_SHIFT); } @@ -1054,7 +1066,7 @@ static int m_can_set_bittiming(struct net_device *dev) (tseg1 << DBTP_DTSEG1_SHIFT) | (tseg2 << DBTP_DTSEG2_SHIFT); - m_can_write(priv, M_CAN_DBTP, reg_btp); + m_can_write(cdev, M_CAN_DBTP, reg_btp); } return 0; @@ -1071,63 +1083,63 @@ static int m_can_set_bittiming(struct net_device *dev) */ static void m_can_chip_config(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); u32 cccr, test; - m_can_config_endisable(priv, true); + m_can_config_endisable(cdev, true); /* RX Buffer/FIFO Element Size 64 bytes data field */ - m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES); + m_can_write(cdev, M_CAN_RXESC, M_CAN_RXESC_64BYTES); /* Accept Non-matching Frames Into FIFO 0 */ - m_can_write(priv, M_CAN_GFC, 0x0); + m_can_write(cdev, M_CAN_GFC, 0x0); - if (priv->version == 30) { + if (cdev->version == 30) { /* only support one Tx Buffer currently */ - m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) | - priv->mcfg[MRAM_TXB].off); + m_can_write(cdev, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) | + cdev->mcfg[MRAM_TXB].off); } else { /* TX FIFO is used for newer IP Core versions */ - m_can_write(priv, M_CAN_TXBC, - (priv->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) | - (priv->mcfg[MRAM_TXB].off)); + m_can_write(cdev, M_CAN_TXBC, + (cdev->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) | + (cdev->mcfg[MRAM_TXB].off)); } /* support 64 bytes payload */ - m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES); + m_can_write(cdev, M_CAN_TXESC, TXESC_TBDS_64BYTES); /* TX Event FIFO */ - if (priv->version == 30) { - m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) | - priv->mcfg[MRAM_TXE].off); + if (cdev->version == 30) { + m_can_write(cdev, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) | + cdev->mcfg[MRAM_TXE].off); } else { /* Full TX Event FIFO is used */ - m_can_write(priv, M_CAN_TXEFC, - ((priv->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT) + m_can_write(cdev, M_CAN_TXEFC, + ((cdev->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT) & TXEFC_EFS_MASK) | - priv->mcfg[MRAM_TXE].off); + cdev->mcfg[MRAM_TXE].off); } /* rx fifo configuration, blocking mode, fifo size 1 */ - m_can_write(priv, M_CAN_RXF0C, - (priv->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) | - priv->mcfg[MRAM_RXF0].off); + m_can_write(cdev, M_CAN_RXF0C, + (cdev->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) | + cdev->mcfg[MRAM_RXF0].off); - m_can_write(priv, M_CAN_RXF1C, - (priv->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) | - priv->mcfg[MRAM_RXF1].off); + m_can_write(cdev, M_CAN_RXF1C, + (cdev->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) | + cdev->mcfg[MRAM_RXF1].off); - cccr = m_can_read(priv, M_CAN_CCCR); - test = m_can_read(priv, M_CAN_TEST); + cccr = m_can_read(cdev, M_CAN_CCCR); + test = m_can_read(cdev, M_CAN_TEST); test &= ~TEST_LBCK; - if (priv->version == 30) { + if (cdev->version == 30) { /* Version 3.0.x */ cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | (CCCR_CME_MASK << CCCR_CME_SHIFT)); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; } else { @@ -1136,64 +1148,68 @@ static void m_can_chip_config(struct net_device *dev) CCCR_NISO); /* Only 3.2.x has NISO Bit implemented */ - if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) cccr |= CCCR_NISO; - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) cccr |= (CCCR_BRSE | CCCR_FDOE); } /* Loopback Mode */ - if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { cccr |= CCCR_TEST | CCCR_MON; test |= TEST_LBCK; } /* Enable Monitoring (all versions) */ - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) cccr |= CCCR_MON; /* Write config */ - m_can_write(priv, M_CAN_CCCR, cccr); - m_can_write(priv, M_CAN_TEST, test); + m_can_write(cdev, M_CAN_CCCR, cccr); + m_can_write(cdev, M_CAN_TEST, test); /* Enable interrupts */ - m_can_write(priv, M_CAN_IR, IR_ALL_INT); - if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) - if (priv->version == 30) - m_can_write(priv, M_CAN_IE, IR_ALL_INT & + m_can_write(cdev, M_CAN_IR, IR_ALL_INT); + if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) + if (cdev->version == 30) + m_can_write(cdev, M_CAN_IE, IR_ALL_INT & ~(IR_ERR_LEC_30X)); else - m_can_write(priv, M_CAN_IE, IR_ALL_INT & + m_can_write(cdev, M_CAN_IE, IR_ALL_INT & ~(IR_ERR_LEC_31X)); else - m_can_write(priv, M_CAN_IE, IR_ALL_INT); + m_can_write(cdev, M_CAN_IE, IR_ALL_INT); /* route all interrupts to INT0 */ - m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0); + m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0); /* set bittiming params */ m_can_set_bittiming(dev); - m_can_config_endisable(priv, false); + m_can_config_endisable(cdev, false); + + if (cdev->ops->init) + cdev->ops->init(cdev); } static void m_can_start(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); /* basic m_can configuration */ m_can_chip_config(dev); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + cdev->can.state = CAN_STATE_ERROR_ACTIVE; - m_can_enable_all_interrupts(priv); + m_can_enable_all_interrupts(cdev); } static int m_can_set_mode(struct net_device *dev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: + m_can_clean(dev); m_can_start(dev); netif_wake_queue(dev); break; @@ -1209,20 +1225,17 @@ static int m_can_set_mode(struct net_device *dev, enum can_mode mode) * else it returns the release and step coded as: * return value = 10 * <release> + 1 * <step> */ -static int m_can_check_core_release(void __iomem *m_can_base) +static int m_can_check_core_release(struct m_can_classdev *cdev) { u32 crel_reg; u8 rel; u8 step; int res; - struct m_can_priv temp_priv = { - .base = m_can_base - }; /* Read Core Release Version and split into version number * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1; */ - crel_reg = m_can_read(&temp_priv, M_CAN_CREL); + crel_reg = m_can_read(cdev, M_CAN_CREL); rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT); step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT); @@ -1240,152 +1253,142 @@ static int m_can_check_core_release(void __iomem *m_can_base) /* Selectable Non ISO support only in version 3.2.x * This function checks if the bit is writable. */ -static bool m_can_niso_supported(const struct m_can_priv *priv) +static bool m_can_niso_supported(struct m_can_classdev *cdev) { - u32 cccr_reg, cccr_poll; - int niso_timeout; + u32 cccr_reg, cccr_poll = 0; + int niso_timeout = -ETIMEDOUT; + int i; - m_can_config_endisable(priv, true); - cccr_reg = m_can_read(priv, M_CAN_CCCR); + m_can_config_endisable(cdev, true); + cccr_reg = m_can_read(cdev, M_CAN_CCCR); cccr_reg |= CCCR_NISO; - m_can_write(priv, M_CAN_CCCR, cccr_reg); + m_can_write(cdev, M_CAN_CCCR, cccr_reg); - niso_timeout = readl_poll_timeout((priv->base + M_CAN_CCCR), cccr_poll, - (cccr_poll == cccr_reg), 0, 10); + for (i = 0; i <= 10; i++) { + cccr_poll = m_can_read(cdev, M_CAN_CCCR); + if (cccr_poll == cccr_reg) { + niso_timeout = 0; + break; + } + + usleep_range(1, 5); + } /* Clear NISO */ cccr_reg &= ~(CCCR_NISO); - m_can_write(priv, M_CAN_CCCR, cccr_reg); + m_can_write(cdev, M_CAN_CCCR, cccr_reg); - m_can_config_endisable(priv, false); + m_can_config_endisable(cdev, false); /* return false if time out (-ETIMEDOUT), else return true */ return !niso_timeout; } -static int m_can_dev_setup(struct platform_device *pdev, struct net_device *dev, - void __iomem *addr) +static int m_can_dev_setup(struct m_can_classdev *m_can_dev) { - struct m_can_priv *priv; + struct net_device *dev = m_can_dev->net; int m_can_version; - m_can_version = m_can_check_core_release(addr); + m_can_version = m_can_check_core_release(m_can_dev); /* return if unsupported version */ if (!m_can_version) { - dev_err(&pdev->dev, "Unsupported version number: %2d", + dev_err(m_can_dev->dev, "Unsupported version number: %2d", m_can_version); return -EINVAL; } - priv = netdev_priv(dev); - netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT); + if (!m_can_dev->is_peripheral) + netif_napi_add(dev, &m_can_dev->napi, + m_can_poll, M_CAN_NAPI_WEIGHT); /* Shared properties of all M_CAN versions */ - priv->version = m_can_version; - priv->dev = dev; - priv->base = addr; - priv->can.do_set_mode = m_can_set_mode; - priv->can.do_get_berr_counter = m_can_get_berr_counter; + m_can_dev->version = m_can_version; + m_can_dev->can.do_set_mode = m_can_set_mode; + m_can_dev->can.do_get_berr_counter = m_can_get_berr_counter; /* Set M_CAN supported operations */ - priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + m_can_dev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD; /* Set properties depending on M_CAN version */ - switch (priv->version) { + switch (m_can_dev->version) { case 30: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - priv->can.bittiming_const = &m_can_bittiming_const_30X; - priv->can.data_bittiming_const = - &m_can_data_bittiming_const_30X; + m_can_dev->can.bittiming_const = m_can_dev->bit_timing ? + m_can_dev->bit_timing : &m_can_bittiming_const_30X; + + m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ? + m_can_dev->data_timing : + &m_can_data_bittiming_const_30X; break; case 31: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - priv->can.bittiming_const = &m_can_bittiming_const_31X; - priv->can.data_bittiming_const = - &m_can_data_bittiming_const_31X; + m_can_dev->can.bittiming_const = m_can_dev->bit_timing ? + m_can_dev->bit_timing : &m_can_bittiming_const_31X; + + m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ? + m_can_dev->data_timing : + &m_can_data_bittiming_const_31X; break; case 32: - priv->can.bittiming_const = &m_can_bittiming_const_31X; - priv->can.data_bittiming_const = - &m_can_data_bittiming_const_31X; - priv->can.ctrlmode_supported |= (m_can_niso_supported(priv) + m_can_dev->can.bittiming_const = m_can_dev->bit_timing ? + m_can_dev->bit_timing : &m_can_bittiming_const_31X; + + m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ? + m_can_dev->data_timing : + &m_can_data_bittiming_const_31X; + + m_can_dev->can.ctrlmode_supported |= + (m_can_niso_supported(m_can_dev) ? CAN_CTRLMODE_FD_NON_ISO : 0); break; default: - dev_err(&pdev->dev, "Unsupported version number: %2d", - priv->version); + dev_err(m_can_dev->dev, "Unsupported version number: %2d", + m_can_dev->version); return -EINVAL; } - return 0; -} - -static int m_can_open(struct net_device *dev) -{ - struct m_can_priv *priv = netdev_priv(dev); - int err; - - err = m_can_clk_start(priv); - if (err) - return err; - - /* open the can device */ - err = open_candev(dev); - if (err) { - netdev_err(dev, "failed to open can device\n"); - goto exit_disable_clks; - } - - /* register interrupt handler */ - err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, - dev); - if (err < 0) { - netdev_err(dev, "failed to request interrupt\n"); - goto exit_irq_fail; - } - - /* start the m_can controller */ - m_can_start(dev); - - can_led_event(dev, CAN_LED_EVENT_OPEN); - napi_enable(&priv->napi); - netif_start_queue(dev); + if (m_can_dev->ops->init) + m_can_dev->ops->init(m_can_dev); return 0; - -exit_irq_fail: - close_candev(dev); -exit_disable_clks: - m_can_clk_stop(priv); - return err; } static void m_can_stop(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); /* disable all interrupts */ - m_can_disable_all_interrupts(priv); + m_can_disable_all_interrupts(cdev); /* set the state as STOPPED */ - priv->can.state = CAN_STATE_STOPPED; + cdev->can.state = CAN_STATE_STOPPED; } static int m_can_close(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); netif_stop_queue(dev); - napi_disable(&priv->napi); + + if (!cdev->is_peripheral) + napi_disable(&cdev->napi); + m_can_stop(dev); - m_can_clk_stop(priv); + m_can_clk_stop(cdev); free_irq(dev->irq, dev); + + if (cdev->is_peripheral) { + cdev->tx_skb = NULL; + destroy_workqueue(cdev->tx_wq); + cdev->tx_wq = NULL; + } + close_candev(dev); can_led_event(dev, CAN_LED_EVENT_STOP); @@ -1394,30 +1397,27 @@ static int m_can_close(struct net_device *dev) static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); /*get wrap around for loopback skb index */ - unsigned int wrap = priv->can.echo_skb_max; + unsigned int wrap = cdev->can.echo_skb_max; int next_idx; /* calculate next index */ next_idx = (++putidx >= wrap ? 0 : putidx); /* check if occupied */ - return !!priv->can.echo_skb[next_idx]; + return !!cdev->can.echo_skb[next_idx]; } -static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, - struct net_device *dev) +static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) { - struct m_can_priv *priv = netdev_priv(dev); - struct canfd_frame *cf = (struct canfd_frame *)skb->data; + struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; + struct net_device *dev = cdev->net; + struct sk_buff *skb = cdev->tx_skb; u32 id, cccr, fdflags; int i; int putidx; - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; - /* Generate ID field for TX buffer Element */ /* Common to all supported M_CAN versions */ if (cf->can_id & CAN_EFF_FLAG) { @@ -1430,23 +1430,23 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_RTR_FLAG) id |= TX_BUF_RTR; - if (priv->version == 30) { + if (cdev->version == 30) { netif_stop_queue(dev); /* message ram configuration */ - m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, + m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, id); + m_can_fifo_write(cdev, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16); for (i = 0; i < cf->len; i += 4) - m_can_fifo_write(priv, 0, + m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA(i / 4), *(u32 *)(cf->data + i)); can_put_echo_skb(skb, dev, 0); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { - cccr = m_can_read(priv, M_CAN_CCCR); + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + cccr = m_can_read(cdev, M_CAN_CCCR); cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); if (can_is_canfd_skb(skb)) { if (cf->flags & CANFD_BRS) @@ -1458,28 +1458,35 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, } else { cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; } - m_can_write(priv, M_CAN_CCCR, cccr); + m_can_write(cdev, M_CAN_CCCR, cccr); } - m_can_write(priv, M_CAN_TXBTIE, 0x1); - m_can_write(priv, M_CAN_TXBAR, 0x1); + m_can_write(cdev, M_CAN_TXBTIE, 0x1); + m_can_write(cdev, M_CAN_TXBAR, 0x1); /* End of xmit function for version 3.0.x */ } else { /* Transmit routine for version >= v3.1.x */ /* Check if FIFO full */ - if (m_can_tx_fifo_full(priv)) { + if (m_can_tx_fifo_full(cdev)) { /* This shouldn't happen */ netif_stop_queue(dev); netdev_warn(dev, "TX queue active although FIFO is full."); - return NETDEV_TX_BUSY; + + if (cdev->is_peripheral) { + kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } else { + return NETDEV_TX_BUSY; + } } /* get put index for frame */ - putidx = ((m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQPI_MASK) + putidx = ((m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT); /* Write ID Field to FIFO Element */ - m_can_fifo_write(priv, putidx, M_CAN_FIFO_ID, id); + m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, id); /* get CAN FD configuration of frame */ fdflags = 0; @@ -1494,14 +1501,14 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, * it is used in TX interrupt for * sending the correct echo frame */ - m_can_fifo_write(priv, putidx, M_CAN_FIFO_DLC, + m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC, ((putidx << TX_BUF_MM_SHIFT) & TX_BUF_MM_MASK) | (can_len2dlc(cf->len) << 16) | fdflags | TX_BUF_EFC); for (i = 0; i < cf->len; i += 4) - m_can_fifo_write(priv, putidx, M_CAN_FIFO_DATA(i / 4), + m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA(i / 4), *(u32 *)(cf->data + i)); /* Push loopback echo. @@ -1510,17 +1517,123 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, can_put_echo_skb(skb, dev, putidx); /* Enable TX FIFO element to start transfer */ - m_can_write(priv, M_CAN_TXBAR, (1 << putidx)); + m_can_write(cdev, M_CAN_TXBAR, (1 << putidx)); /* stop network queue if fifo full */ - if (m_can_tx_fifo_full(priv) || - m_can_next_echo_skb_occupied(dev, putidx)) - netif_stop_queue(dev); + if (m_can_tx_fifo_full(cdev) || + m_can_next_echo_skb_occupied(dev, putidx)) + netif_stop_queue(dev); } return NETDEV_TX_OK; } +static void m_can_tx_work_queue(struct work_struct *ws) +{ + struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev, + tx_work); + + m_can_tx_handler(cdev); + cdev->tx_skb = NULL; +} + +static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + if (cdev->is_peripheral) { + if (cdev->tx_skb) { + netdev_err(dev, "hard_xmit called while tx busy\n"); + return NETDEV_TX_BUSY; + } + + if (cdev->can.state == CAN_STATE_BUS_OFF) { + m_can_clean(dev); + } else { + /* Need to stop the queue to avoid numerous requests + * from being sent. Suggested improvement is to create + * a queueing mechanism that will queue the skbs and + * process them in order. + */ + cdev->tx_skb = skb; + netif_stop_queue(cdev->net); + queue_work(cdev->tx_wq, &cdev->tx_work); + } + } else { + cdev->tx_skb = skb; + return m_can_tx_handler(cdev); + } + + return NETDEV_TX_OK; +} + +static int m_can_open(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + int err; + + err = m_can_clk_start(cdev); + if (err) + return err; + + /* open the can device */ + err = open_candev(dev); + if (err) { + netdev_err(dev, "failed to open can device\n"); + goto exit_disable_clks; + } + + /* register interrupt handler */ + if (cdev->is_peripheral) { + cdev->tx_skb = NULL; + cdev->tx_wq = alloc_workqueue("mcan_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); + if (!cdev->tx_wq) { + err = -ENOMEM; + goto out_wq_fail; + } + + INIT_WORK(&cdev->tx_work, m_can_tx_work_queue); + + err = request_threaded_irq(dev->irq, NULL, m_can_isr, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + dev->name, dev); + } else { + err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, + dev); + } + + if (err < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto exit_irq_fail; + } + + /* start the m_can controller */ + m_can_start(dev); + + can_led_event(dev, CAN_LED_EVENT_OPEN); + + if (!cdev->is_peripheral) + napi_enable(&cdev->napi); + + netif_start_queue(dev); + + return 0; + +exit_irq_fail: + if (cdev->is_peripheral) + destroy_workqueue(cdev->tx_wq); +out_wq_fail: + close_candev(dev); +exit_disable_clks: + m_can_clk_stop(cdev); + return err; +} + static const struct net_device_ops m_can_netdev_ops = { .ndo_open = m_can_open, .ndo_stop = m_can_close, @@ -1536,114 +1649,91 @@ static int register_m_can_dev(struct net_device *dev) return register_candev(dev); } -static void m_can_init_ram(struct m_can_priv *priv) -{ - int end, i, start; - - /* initialize the entire Message RAM in use to avoid possible - * ECC/parity checksum errors when reading an uninitialized buffer - */ - start = priv->mcfg[MRAM_SIDF].off; - end = priv->mcfg[MRAM_TXB].off + - priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; - for (i = start; i < end; i += 4) - writel(0x0, priv->mram_base + i); -} - -static void m_can_of_parse_mram(struct m_can_priv *priv, +static void m_can_of_parse_mram(struct m_can_classdev *cdev, const u32 *mram_config_vals) { - priv->mcfg[MRAM_SIDF].off = mram_config_vals[0]; - priv->mcfg[MRAM_SIDF].num = mram_config_vals[1]; - priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off + - priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; - priv->mcfg[MRAM_XIDF].num = mram_config_vals[2]; - priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off + - priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; - priv->mcfg[MRAM_RXF0].num = mram_config_vals[3] & + cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0]; + cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1]; + cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off + + cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; + cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2]; + cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off + + cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] & (RXFC_FS_MASK >> RXFC_FS_SHIFT); - priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off + - priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; - priv->mcfg[MRAM_RXF1].num = mram_config_vals[4] & + cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off + + cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] & (RXFC_FS_MASK >> RXFC_FS_SHIFT); - priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off + - priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; - priv->mcfg[MRAM_RXB].num = mram_config_vals[5]; - priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off + - priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; - priv->mcfg[MRAM_TXE].num = mram_config_vals[6]; - priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off + - priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; - priv->mcfg[MRAM_TXB].num = mram_config_vals[7] & + cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off + + cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXB].num = mram_config_vals[5]; + cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off + + cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; + cdev->mcfg[MRAM_TXE].num = mram_config_vals[6]; + cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off + + cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; + cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] & (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT); - dev_dbg(priv->device, - "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", - priv->mram_base, - priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num, - priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num, - priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num, - priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num, - priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num, - priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, - priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); - - m_can_init_ram(priv); + dev_dbg(cdev->dev, + "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", + cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num, + cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num, + cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num, + cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num, + cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num, + cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num, + cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); } -static int m_can_plat_probe(struct platform_device *pdev) +void m_can_init_ram(struct m_can_classdev *cdev) { - struct net_device *dev; - struct m_can_priv *priv; - struct resource *res; - void __iomem *addr; - void __iomem *mram_addr; - struct clk *hclk, *cclk; - int irq, ret; - struct device_node *np; - u32 mram_config_vals[MRAM_CFG_LEN]; - u32 tx_fifo_size; - - np = pdev->dev.of_node; + int end, i, start; - hclk = devm_clk_get(&pdev->dev, "hclk"); - cclk = devm_clk_get(&pdev->dev, "cclk"); + /* initialize the entire Message RAM in use to avoid possible + * ECC/parity checksum errors when reading an uninitialized buffer + */ + start = cdev->mcfg[MRAM_SIDF].off; + end = cdev->mcfg[MRAM_TXB].off + + cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; - if (IS_ERR(hclk) || IS_ERR(cclk)) { - dev_err(&pdev->dev, "no clock found\n"); - ret = -ENODEV; - goto failed_ret; - } + for (i = start; i < end; i += 4) + m_can_fifo_write_no_off(cdev, i, 0x0); +} +EXPORT_SYMBOL_GPL(m_can_init_ram); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); - addr = devm_ioremap_resource(&pdev->dev, res); - irq = platform_get_irq_byname(pdev, "int0"); +int m_can_class_get_clocks(struct m_can_classdev *m_can_dev) +{ + int ret = 0; - if (IS_ERR(addr) || irq < 0) { - ret = -EINVAL; - goto failed_ret; - } + m_can_dev->hclk = devm_clk_get(m_can_dev->dev, "hclk"); + m_can_dev->cclk = devm_clk_get(m_can_dev->dev, "cclk"); - /* message ram could be shared */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); - if (!res) { + if (IS_ERR(m_can_dev->cclk)) { + dev_err(m_can_dev->dev, "no clock found\n"); ret = -ENODEV; - goto failed_ret; } - mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!mram_addr) { - ret = -ENOMEM; - goto failed_ret; - } + return ret; +} +EXPORT_SYMBOL_GPL(m_can_class_get_clocks); - /* get message ram configuration */ - ret = of_property_read_u32_array(np, "bosch,mram-cfg", - mram_config_vals, - sizeof(mram_config_vals) / 4); +struct m_can_classdev *m_can_class_allocate_dev(struct device *dev) +{ + struct m_can_classdev *class_dev = NULL; + u32 mram_config_vals[MRAM_CFG_LEN]; + struct net_device *net_dev; + u32 tx_fifo_size; + int ret; + + ret = fwnode_property_read_u32_array(dev_fwnode(dev), + "bosch,mram-cfg", + mram_config_vals, + sizeof(mram_config_vals) / 4); if (ret) { - dev_err(&pdev->dev, "Could not get Message RAM configuration."); - goto failed_ret; + dev_err(dev, "Could not get Message RAM configuration."); + goto out; } /* Get TX FIFO size @@ -1652,101 +1742,110 @@ static int m_can_plat_probe(struct platform_device *pdev) tx_fifo_size = mram_config_vals[7]; /* allocate the m_can device */ - dev = alloc_candev(sizeof(*priv), tx_fifo_size); - if (!dev) { - ret = -ENOMEM; - goto failed_ret; + net_dev = alloc_candev(sizeof(*class_dev), tx_fifo_size); + if (!net_dev) { + dev_err(dev, "Failed to allocate CAN device"); + goto out; } - priv = netdev_priv(dev); - dev->irq = irq; - priv->device = &pdev->dev; - priv->hclk = hclk; - priv->cclk = cclk; - priv->can.clock.freq = clk_get_rate(cclk); - priv->mram_base = mram_addr; + class_dev = netdev_priv(net_dev); + if (!class_dev) { + dev_err(dev, "Failed to init netdev cdevate"); + goto out; + } - platform_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); + class_dev->net = net_dev; + class_dev->dev = dev; + SET_NETDEV_DEV(net_dev, dev); - /* Enable clocks. Necessary to read Core Release in order to determine - * M_CAN version - */ - pm_runtime_enable(&pdev->dev); - ret = m_can_clk_start(priv); - if (ret) - goto pm_runtime_fail; + m_can_of_parse_mram(class_dev, mram_config_vals); +out: + return class_dev; +} +EXPORT_SYMBOL_GPL(m_can_class_allocate_dev); + +int m_can_class_register(struct m_can_classdev *m_can_dev) +{ + int ret; - ret = m_can_dev_setup(pdev, dev, addr); + if (m_can_dev->pm_clock_support) { + pm_runtime_enable(m_can_dev->dev); + ret = m_can_clk_start(m_can_dev); + if (ret) + goto pm_runtime_fail; + } + + ret = m_can_dev_setup(m_can_dev); if (ret) goto clk_disable; - ret = register_m_can_dev(dev); + ret = register_m_can_dev(m_can_dev->net); if (ret) { - dev_err(&pdev->dev, "registering %s failed (err=%d)\n", - KBUILD_MODNAME, ret); + dev_err(m_can_dev->dev, "registering %s failed (err=%d)\n", + m_can_dev->net->name, ret); goto clk_disable; } - m_can_of_parse_mram(priv, mram_config_vals); - - devm_can_led_init(dev); + devm_can_led_init(m_can_dev->net); - of_can_transceiver(dev); + of_can_transceiver(m_can_dev->net); - dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n", - KBUILD_MODNAME, dev->irq, priv->version); + dev_info(m_can_dev->dev, "%s device registered (irq=%d, version=%d)\n", + KBUILD_MODNAME, m_can_dev->net->irq, m_can_dev->version); /* Probe finished * Stop clocks. They will be reactivated once the M_CAN device is opened */ clk_disable: - m_can_clk_stop(priv); + m_can_clk_stop(m_can_dev); pm_runtime_fail: if (ret) { - pm_runtime_disable(&pdev->dev); - free_candev(dev); + if (m_can_dev->pm_clock_support) + pm_runtime_disable(m_can_dev->dev); + free_candev(m_can_dev->net); } -failed_ret: + return ret; } +EXPORT_SYMBOL_GPL(m_can_class_register); -static __maybe_unused int m_can_suspend(struct device *dev) +int m_can_class_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); + struct m_can_classdev *cdev = netdev_priv(ndev); if (netif_running(ndev)) { netif_stop_queue(ndev); netif_device_detach(ndev); m_can_stop(ndev); - m_can_clk_stop(priv); + m_can_clk_stop(cdev); } pinctrl_pm_select_sleep_state(dev); - priv->can.state = CAN_STATE_SLEEPING; + cdev->can.state = CAN_STATE_SLEEPING; return 0; } +EXPORT_SYMBOL_GPL(m_can_class_suspend); -static __maybe_unused int m_can_resume(struct device *dev) +int m_can_class_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); + struct m_can_classdev *cdev = netdev_priv(ndev); pinctrl_pm_select_default_state(dev); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + cdev->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(ndev)) { int ret; - ret = m_can_clk_start(priv); + ret = m_can_clk_start(cdev); if (ret) return ret; - m_can_init_ram(priv); + m_can_init_ram(cdev); m_can_start(ndev); netif_device_attach(ndev); netif_start_queue(ndev); @@ -1754,79 +1853,19 @@ static __maybe_unused int m_can_resume(struct device *dev) return 0; } +EXPORT_SYMBOL_GPL(m_can_class_resume); -static void unregister_m_can_dev(struct net_device *dev) +void m_can_class_unregister(struct m_can_classdev *m_can_dev) { - unregister_candev(dev); -} + unregister_candev(m_can_dev->net); -static int m_can_plat_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); + m_can_clk_stop(m_can_dev); - unregister_m_can_dev(dev); - - pm_runtime_disable(&pdev->dev); - - platform_set_drvdata(pdev, NULL); - - free_candev(dev); - - return 0; -} - -static int __maybe_unused m_can_runtime_suspend(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); - - clk_disable_unprepare(priv->cclk); - clk_disable_unprepare(priv->hclk); - - return 0; -} - -static int __maybe_unused m_can_runtime_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); - int err; - - err = clk_prepare_enable(priv->hclk); - if (err) - return err; - - err = clk_prepare_enable(priv->cclk); - if (err) - clk_disable_unprepare(priv->hclk); - - return err; + free_candev(m_can_dev->net); } - -static const struct dev_pm_ops m_can_pmops = { - SET_RUNTIME_PM_OPS(m_can_runtime_suspend, - m_can_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) -}; - -static const struct of_device_id m_can_of_table[] = { - { .compatible = "bosch,m_can", .data = NULL }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, m_can_of_table); - -static struct platform_driver m_can_plat_driver = { - .driver = { - .name = KBUILD_MODNAME, - .of_match_table = m_can_of_table, - .pm = &m_can_pmops, - }, - .probe = m_can_plat_probe, - .remove = m_can_plat_remove, -}; - -module_platform_driver(m_can_plat_driver); +EXPORT_SYMBOL_GPL(m_can_class_unregister); MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller"); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h new file mode 100644 index 000000000000..49f42b50627a --- /dev/null +++ b/drivers/net/can/m_can/m_can.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* CAN bus driver for Bosch M_CAN controller + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef _CAN_M_CAN_H_ +#define _CAN_M_CAN_H_ + +#include <linux/can/core.h> +#include <linux/can/led.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/freezer.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <linux/iopoll.h> +#include <linux/can/dev.h> +#include <linux/pinctrl/consumer.h> + +/* m_can lec values */ +enum m_can_lec_type { + LEC_NO_ERROR = 0, + LEC_STUFF_ERROR, + LEC_FORM_ERROR, + LEC_ACK_ERROR, + LEC_BIT1_ERROR, + LEC_BIT0_ERROR, + LEC_CRC_ERROR, + LEC_UNUSED, +}; + +enum m_can_mram_cfg { + MRAM_SIDF = 0, + MRAM_XIDF, + MRAM_RXF0, + MRAM_RXF1, + MRAM_RXB, + MRAM_TXE, + MRAM_TXB, + MRAM_CFG_NUM, +}; + +/* address offset and element number for each FIFO/Buffer in the Message RAM */ +struct mram_cfg { + u16 off; + u8 num; +}; + +struct m_can_classdev; +struct m_can_ops { + /* Device specific call backs */ + int (*clear_interrupts)(struct m_can_classdev *cdev); + u32 (*read_reg)(struct m_can_classdev *cdev, int reg); + int (*write_reg)(struct m_can_classdev *cdev, int reg, int val); + u32 (*read_fifo)(struct m_can_classdev *cdev, int addr_offset); + int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset, + int val); + int (*init)(struct m_can_classdev *cdev); +}; + +struct m_can_classdev { + struct can_priv can; + struct napi_struct napi; + struct net_device *net; + struct device *dev; + struct clk *hclk; + struct clk *cclk; + + struct workqueue_struct *tx_wq; + struct work_struct tx_work; + struct sk_buff *tx_skb; + + struct can_bittiming_const *bit_timing; + struct can_bittiming_const *data_timing; + + struct m_can_ops *ops; + + void *device_data; + + int version; + int freq; + u32 irqstatus; + + int pm_clock_support; + int is_peripheral; + + struct mram_cfg mcfg[MRAM_CFG_NUM]; +}; + +struct m_can_classdev *m_can_class_allocate_dev(struct device *dev); +int m_can_class_register(struct m_can_classdev *cdev); +void m_can_class_unregister(struct m_can_classdev *cdev); +int m_can_class_get_clocks(struct m_can_classdev *cdev); +void m_can_init_ram(struct m_can_classdev *priv); +void m_can_config_endisable(struct m_can_classdev *priv, bool enable); + +int m_can_class_suspend(struct device *dev); +int m_can_class_resume(struct device *dev); +#endif /* _CAN_M_H_ */ diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c new file mode 100644 index 000000000000..c2989e0431f2 --- /dev/null +++ b/drivers/net/can/m_can/m_can_platform.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +// IOMapped CAN bus driver for Bosch M_CAN controller +// Copyright (C) 2014 Freescale Semiconductor, Inc. +// Dong Aisheng <b29396@freescale.com> +// +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +#include <linux/platform_device.h> + +#include "m_can.h" + +struct m_can_plat_priv { + void __iomem *base; + void __iomem *mram_base; +}; + +static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct m_can_plat_priv *priv = + (struct m_can_plat_priv *)cdev->device_data; + + return readl(priv->base + reg); +} + +static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset) +{ + struct m_can_plat_priv *priv = + (struct m_can_plat_priv *)cdev->device_data; + + return readl(priv->mram_base + offset); +} + +static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct m_can_plat_priv *priv = + (struct m_can_plat_priv *)cdev->device_data; + + writel(val, priv->base + reg); + + return 0; +} + +static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val) +{ + struct m_can_plat_priv *priv = + (struct m_can_plat_priv *)cdev->device_data; + + writel(val, priv->mram_base + offset); + + return 0; +} + +static struct m_can_ops m_can_plat_ops = { + .read_reg = iomap_read_reg, + .write_reg = iomap_write_reg, + .write_fifo = iomap_write_fifo, + .read_fifo = iomap_read_fifo, +}; + +static int m_can_plat_probe(struct platform_device *pdev) +{ + struct m_can_classdev *mcan_class; + struct m_can_plat_priv *priv; + struct resource *res; + void __iomem *addr; + void __iomem *mram_addr; + int irq, ret = 0; + + mcan_class = m_can_class_allocate_dev(&pdev->dev); + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mcan_class->device_data = priv; + + m_can_class_get_clocks(mcan_class); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); + addr = devm_ioremap_resource(&pdev->dev, res); + irq = platform_get_irq_byname(pdev, "int0"); + if (IS_ERR(addr) || irq < 0) { + ret = -EINVAL; + goto failed_ret; + } + + /* message ram could be shared */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); + if (!res) { + ret = -ENODEV; + goto failed_ret; + } + + mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!mram_addr) { + ret = -ENOMEM; + goto failed_ret; + } + + priv->base = addr; + priv->mram_base = mram_addr; + + mcan_class->net->irq = irq; + mcan_class->pm_clock_support = 1; + mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk); + mcan_class->dev = &pdev->dev; + + mcan_class->ops = &m_can_plat_ops; + + mcan_class->is_peripheral = false; + + platform_set_drvdata(pdev, mcan_class->dev); + + m_can_init_ram(mcan_class); + + ret = m_can_class_register(mcan_class); + +failed_ret: + return ret; +} + +static __maybe_unused int m_can_suspend(struct device *dev) +{ + return m_can_class_suspend(dev); +} + +static __maybe_unused int m_can_resume(struct device *dev) +{ + return m_can_class_resume(dev); +} + +static int m_can_plat_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct m_can_classdev *mcan_class = netdev_priv(dev); + + m_can_class_unregister(mcan_class); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static int __maybe_unused m_can_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = netdev_priv(ndev); + + m_can_class_suspend(dev); + + clk_disable_unprepare(mcan_class->cclk); + clk_disable_unprepare(mcan_class->hclk); + + return 0; +} + +static int __maybe_unused m_can_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = netdev_priv(ndev); + int err; + + err = clk_prepare_enable(mcan_class->hclk); + if (err) + return err; + + err = clk_prepare_enable(mcan_class->cclk); + if (err) + clk_disable_unprepare(mcan_class->hclk); + + m_can_class_resume(dev); + + return err; +} + +static const struct dev_pm_ops m_can_pmops = { + SET_RUNTIME_PM_OPS(m_can_runtime_suspend, + m_can_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) +}; + +static const struct of_device_id m_can_of_table[] = { + { .compatible = "bosch,m_can", .data = NULL }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, m_can_of_table); + +static struct platform_driver m_can_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = m_can_of_table, + .pm = &m_can_pmops, + }, + .probe = m_can_plat_probe, + .remove = m_can_plat_remove, +}; + +module_platform_driver(m_can_plat_driver); + +MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers"); diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c new file mode 100644 index 000000000000..b115b2e5333f --- /dev/null +++ b/drivers/net/can/m_can/tcan4x5x.c @@ -0,0 +1,532 @@ +// SPDX-License-Identifier: GPL-2.0 +// SPI to CAN driver for the Texas Instruments TCAN4x5x +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +#include <linux/regmap.h> +#include <linux/spi/spi.h> + +#include <linux/regulator/consumer.h> +#include <linux/gpio/consumer.h> + +#include "m_can.h" + +#define DEVICE_NAME "tcan4x5x" +#define TCAN4X5X_EXT_CLK_DEF 40000000 + +#define TCAN4X5X_DEV_ID0 0x00 +#define TCAN4X5X_DEV_ID1 0x04 +#define TCAN4X5X_REV 0x08 +#define TCAN4X5X_STATUS 0x0C +#define TCAN4X5X_ERROR_STATUS 0x10 +#define TCAN4X5X_CONTROL 0x14 + +#define TCAN4X5X_CONFIG 0x800 +#define TCAN4X5X_TS_PRESCALE 0x804 +#define TCAN4X5X_TEST_REG 0x808 +#define TCAN4X5X_INT_FLAGS 0x820 +#define TCAN4X5X_MCAN_INT_REG 0x824 +#define TCAN4X5X_INT_EN 0x830 + +/* Interrupt bits */ +#define TCAN4X5X_CANBUSTERMOPEN_INT_EN BIT(30) +#define TCAN4X5X_CANHCANL_INT_EN BIT(29) +#define TCAN4X5X_CANHBAT_INT_EN BIT(28) +#define TCAN4X5X_CANLGND_INT_EN BIT(27) +#define TCAN4X5X_CANBUSOPEN_INT_EN BIT(26) +#define TCAN4X5X_CANBUSGND_INT_EN BIT(25) +#define TCAN4X5X_CANBUSBAT_INT_EN BIT(24) +#define TCAN4X5X_UVSUP_INT_EN BIT(22) +#define TCAN4X5X_UVIO_INT_EN BIT(21) +#define TCAN4X5X_TSD_INT_EN BIT(19) +#define TCAN4X5X_ECCERR_INT_EN BIT(16) +#define TCAN4X5X_CANINT_INT_EN BIT(15) +#define TCAN4X5X_LWU_INT_EN BIT(14) +#define TCAN4X5X_CANSLNT_INT_EN BIT(10) +#define TCAN4X5X_CANDOM_INT_EN BIT(8) +#define TCAN4X5X_CANBUS_ERR_INT_EN BIT(5) +#define TCAN4X5X_BUS_FAULT BIT(4) +#define TCAN4X5X_MCAN_INT BIT(1) +#define TCAN4X5X_ENABLE_TCAN_INT \ + (TCAN4X5X_MCAN_INT | TCAN4X5X_BUS_FAULT | \ + TCAN4X5X_CANBUS_ERR_INT_EN | TCAN4X5X_CANINT_INT_EN) + +/* MCAN Interrupt bits */ +#define TCAN4X5X_MCAN_IR_ARA BIT(29) +#define TCAN4X5X_MCAN_IR_PED BIT(28) +#define TCAN4X5X_MCAN_IR_PEA BIT(27) +#define TCAN4X5X_MCAN_IR_WD BIT(26) +#define TCAN4X5X_MCAN_IR_BO BIT(25) +#define TCAN4X5X_MCAN_IR_EW BIT(24) +#define TCAN4X5X_MCAN_IR_EP BIT(23) +#define TCAN4X5X_MCAN_IR_ELO BIT(22) +#define TCAN4X5X_MCAN_IR_BEU BIT(21) +#define TCAN4X5X_MCAN_IR_BEC BIT(20) +#define TCAN4X5X_MCAN_IR_DRX BIT(19) +#define TCAN4X5X_MCAN_IR_TOO BIT(18) +#define TCAN4X5X_MCAN_IR_MRAF BIT(17) +#define TCAN4X5X_MCAN_IR_TSW BIT(16) +#define TCAN4X5X_MCAN_IR_TEFL BIT(15) +#define TCAN4X5X_MCAN_IR_TEFF BIT(14) +#define TCAN4X5X_MCAN_IR_TEFW BIT(13) +#define TCAN4X5X_MCAN_IR_TEFN BIT(12) +#define TCAN4X5X_MCAN_IR_TFE BIT(11) +#define TCAN4X5X_MCAN_IR_TCF BIT(10) +#define TCAN4X5X_MCAN_IR_TC BIT(9) +#define TCAN4X5X_MCAN_IR_HPM BIT(8) +#define TCAN4X5X_MCAN_IR_RF1L BIT(7) +#define TCAN4X5X_MCAN_IR_RF1F BIT(6) +#define TCAN4X5X_MCAN_IR_RF1W BIT(5) +#define TCAN4X5X_MCAN_IR_RF1N BIT(4) +#define TCAN4X5X_MCAN_IR_RF0L BIT(3) +#define TCAN4X5X_MCAN_IR_RF0F BIT(2) +#define TCAN4X5X_MCAN_IR_RF0W BIT(1) +#define TCAN4X5X_MCAN_IR_RF0N BIT(0) +#define TCAN4X5X_ENABLE_MCAN_INT \ + (TCAN4X5X_MCAN_IR_TC | TCAN4X5X_MCAN_IR_RF0N | \ + TCAN4X5X_MCAN_IR_RF1N | TCAN4X5X_MCAN_IR_RF0F | \ + TCAN4X5X_MCAN_IR_RF1F) + +#define TCAN4X5X_MRAM_START 0x8000 +#define TCAN4X5X_MCAN_OFFSET 0x1000 +#define TCAN4X5X_MAX_REGISTER 0x8fff + +#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff +#define TCAN4X5X_SET_ALL_INT 0xffffffff + +#define TCAN4X5X_WRITE_CMD (0x61 << 24) +#define TCAN4X5X_READ_CMD (0x41 << 24) + +#define TCAN4X5X_MODE_SEL_MASK (BIT(7) | BIT(6)) +#define TCAN4X5X_MODE_SLEEP 0x00 +#define TCAN4X5X_MODE_STANDBY BIT(6) +#define TCAN4X5X_MODE_NORMAL BIT(7) + +#define TCAN4X5X_SW_RESET BIT(2) + +#define TCAN4X5X_MCAN_CONFIGURED BIT(5) +#define TCAN4X5X_WATCHDOG_EN BIT(3) +#define TCAN4X5X_WD_60_MS_TIMER 0 +#define TCAN4X5X_WD_600_MS_TIMER BIT(28) +#define TCAN4X5X_WD_3_S_TIMER BIT(29) +#define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29)) + +struct tcan4x5x_priv { + struct regmap *regmap; + struct spi_device *spi; + struct mutex tcan4x5x_lock; /* SPI device lock */ + + struct m_can_classdev *mcan_dev; + + struct gpio_desc *reset_gpio; + struct gpio_desc *interrupt_gpio; + struct gpio_desc *device_wake_gpio; + struct gpio_desc *device_state_gpio; + struct regulator *power; + + /* Register based ip */ + int mram_start; + int reg_offset; +}; + +static struct can_bittiming_const tcan4x5x_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 2, + .tseg1_max = 31, + .tseg2_min = 2, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static struct can_bittiming_const tcan4x5x_data_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 1, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) +{ + int wake_state = 0; + + if (priv->device_state_gpio) + wake_state = gpiod_get_value(priv->device_state_gpio); + + if (priv->device_wake_gpio && wake_state) { + gpiod_set_value(priv->device_wake_gpio, 0); + usleep_range(5, 50); + gpiod_set_value(priv->device_wake_gpio, 1); + } +} + +static int regmap_spi_gather_write(void *context, const void *reg, + size_t reg_len, const void *val, + size_t val_len) +{ + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + struct spi_message m; + u32 addr; + struct spi_transfer t[2] = { + { .tx_buf = &addr, .len = reg_len, .cs_change = 0,}, + { .tx_buf = val, .len = val_len, }, + }; + + addr = TCAN4X5X_WRITE_CMD | (*((u16 *)reg) << 8) | val_len >> 3; + + spi_message_init(&m); + spi_message_add_tail(&t[0], &m); + spi_message_add_tail(&t[1], &m); + + return spi_sync(spi, &m); +} + +static int tcan4x5x_regmap_write(void *context, const void *data, size_t count) +{ + u16 *reg = (u16 *)(data); + const u32 *val = data + 4; + + return regmap_spi_gather_write(context, reg, 4, val, count); +} + +static int regmap_spi_async_write(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len, + struct regmap_async *a) +{ + return -ENOTSUPP; +} + +static struct regmap_async *regmap_spi_async_alloc(void) +{ + return NULL; +} + +static int tcan4x5x_regmap_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + + u32 addr = TCAN4X5X_READ_CMD | (*((u16 *)reg) << 8) | val_size >> 2; + + return spi_write_then_read(spi, &addr, reg_size, (u32 *)val, val_size); +} + +static struct regmap_bus tcan4x5x_bus = { + .write = tcan4x5x_regmap_write, + .gather_write = regmap_spi_gather_write, + .async_write = regmap_spi_async_write, + .async_alloc = regmap_spi_async_alloc, + .read = tcan4x5x_regmap_read, + .read_flag_mask = 0x00, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, +}; + +static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct tcan4x5x_priv *priv = (struct tcan4x5x_priv *)cdev->device_data; + u32 val; + + tcan4x5x_check_wake(priv); + + regmap_read(priv->regmap, priv->reg_offset + reg, &val); + + return val; +} + +static u32 tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset) +{ + struct tcan4x5x_priv *priv = (struct tcan4x5x_priv *)cdev->device_data; + u32 val; + + tcan4x5x_check_wake(priv); + + regmap_read(priv->regmap, priv->mram_start + addr_offset, &val); + + return val; +} + +static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct tcan4x5x_priv *priv = (struct tcan4x5x_priv *)cdev->device_data; + + tcan4x5x_check_wake(priv); + + return regmap_write(priv->regmap, priv->reg_offset + reg, val); +} + +static int tcan4x5x_write_fifo(struct m_can_classdev *cdev, + int addr_offset, int val) +{ + struct tcan4x5x_priv *priv = + (struct tcan4x5x_priv *)cdev->device_data; + + tcan4x5x_check_wake(priv); + + return regmap_write(priv->regmap, priv->mram_start + addr_offset, val); +} + +static int tcan4x5x_power_enable(struct regulator *reg, int enable) +{ + if (IS_ERR_OR_NULL(reg)) + return 0; + + if (enable) + return regulator_enable(reg); + else + return regulator_disable(reg); +} + +static int tcan4x5x_write_tcan_reg(struct m_can_classdev *cdev, + int reg, int val) +{ + struct tcan4x5x_priv *priv = + (struct tcan4x5x_priv *)cdev->device_data; + + tcan4x5x_check_wake(priv); + + return regmap_write(priv->regmap, reg, val); +} + +static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = + (struct tcan4x5x_priv *)cdev->device_data; + int ret; + + tcan4x5x_check_wake(tcan4x5x); + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_STATUS, + TCAN4X5X_CLEAR_ALL_INT); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG, + TCAN4X5X_ENABLE_MCAN_INT); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS, + TCAN4X5X_CLEAR_ALL_INT); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS, + TCAN4X5X_CLEAR_ALL_INT); + if (ret) + return ret; + + return ret; +} + +static int tcan4x5x_init(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = + (struct tcan4x5x_priv *)cdev->device_data; + int ret; + + tcan4x5x_check_wake(tcan4x5x); + + ret = tcan4x5x_clear_interrupts(cdev); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_EN, + TCAN4X5X_ENABLE_TCAN_INT); + if (ret) + return ret; + + ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL); + if (ret) + return ret; + + /* Zero out the MCAN buffers */ + m_can_init_ram(cdev); + + return ret; +} + +static int tcan4x5x_parse_config(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = + (struct tcan4x5x_priv *)cdev->device_data; + + tcan4x5x->interrupt_gpio = devm_gpiod_get(cdev->dev, "data-ready", + GPIOD_IN); + if (IS_ERR(tcan4x5x->interrupt_gpio)) { + dev_err(cdev->dev, "data-ready gpio not defined\n"); + return -EINVAL; + } + + tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", + GPIOD_OUT_HIGH); + if (IS_ERR(tcan4x5x->device_wake_gpio)) { + dev_err(cdev->dev, "device-wake gpio not defined\n"); + return -EINVAL; + } + + tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(tcan4x5x->reset_gpio)) + tcan4x5x->reset_gpio = NULL; + + tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, + "device-state", + GPIOD_IN); + if (IS_ERR(tcan4x5x->device_state_gpio)) + tcan4x5x->device_state_gpio = NULL; + + cdev->net->irq = gpiod_to_irq(tcan4x5x->interrupt_gpio); + + tcan4x5x->power = devm_regulator_get_optional(cdev->dev, + "vsup"); + if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + return 0; +} + +static const struct regmap_config tcan4x5x_regmap = { + .reg_bits = 32, + .val_bits = 32, + .cache_type = REGCACHE_NONE, + .max_register = TCAN4X5X_MAX_REGISTER, +}; + +static struct m_can_ops tcan4x5x_ops = { + .init = tcan4x5x_init, + .read_reg = tcan4x5x_read_reg, + .write_reg = tcan4x5x_write_reg, + .write_fifo = tcan4x5x_write_fifo, + .read_fifo = tcan4x5x_read_fifo, + .clear_interrupts = tcan4x5x_clear_interrupts, +}; + +static int tcan4x5x_can_probe(struct spi_device *spi) +{ + struct tcan4x5x_priv *priv; + struct m_can_classdev *mcan_class; + int freq, ret; + + mcan_class = m_can_class_allocate_dev(&spi->dev); + priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mcan_class->device_data = priv; + + m_can_class_get_clocks(mcan_class); + if (IS_ERR(mcan_class->cclk)) { + dev_err(&spi->dev, "no CAN clock source defined\n"); + freq = TCAN4X5X_EXT_CLK_DEF; + } else { + freq = clk_get_rate(mcan_class->cclk); + } + + /* Sanity check */ + if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) + return -ERANGE; + + priv->reg_offset = TCAN4X5X_MCAN_OFFSET; + priv->mram_start = TCAN4X5X_MRAM_START; + priv->spi = spi; + priv->mcan_dev = mcan_class; + + mcan_class->pm_clock_support = 0; + mcan_class->can.clock.freq = freq; + mcan_class->dev = &spi->dev; + mcan_class->ops = &tcan4x5x_ops; + mcan_class->is_peripheral = true; + mcan_class->bit_timing = &tcan4x5x_bittiming_const; + mcan_class->data_timing = &tcan4x5x_data_bittiming_const; + + spi_set_drvdata(spi, priv); + + ret = tcan4x5x_parse_config(mcan_class); + if (ret) + goto out_clk; + + /* Configure the SPI bus */ + spi->bits_per_word = 32; + ret = spi_setup(spi); + if (ret) + goto out_clk; + + priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus, + &spi->dev, &tcan4x5x_regmap); + + mutex_init(&priv->tcan4x5x_lock); + + tcan4x5x_power_enable(priv->power, 1); + + ret = m_can_class_register(mcan_class); + if (ret) + goto out_power; + + netdev_info(mcan_class->net, "TCAN4X5X successfully initialized.\n"); + return 0; + +out_power: + tcan4x5x_power_enable(priv->power, 0); +out_clk: + if (!IS_ERR(mcan_class->cclk)) { + clk_disable_unprepare(mcan_class->cclk); + clk_disable_unprepare(mcan_class->hclk); + } + + dev_err(&spi->dev, "Probe failed, err=%d\n", ret); + return ret; +} + +static int tcan4x5x_can_remove(struct spi_device *spi) +{ + struct tcan4x5x_priv *priv = spi_get_drvdata(spi); + + tcan4x5x_power_enable(priv->power, 0); + + m_can_class_unregister(priv->mcan_dev); + + return 0; +} + +static const struct of_device_id tcan4x5x_of_match[] = { + { .compatible = "ti,tcan4x5x", }, + { } +}; +MODULE_DEVICE_TABLE(of, tcan4x5x_of_match); + +static const struct spi_device_id tcan4x5x_id_table[] = { + { + .name = "tcan4x5x", + .driver_data = 0, + }, + { } +}; +MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table); + +static struct spi_driver tcan4x5x_can_driver = { + .driver = { + .name = DEVICE_NAME, + .of_match_table = tcan4x5x_of_match, + .pm = NULL, + }, + .id_table = tcan4x5x_id_table, + .probe = tcan4x5x_can_probe, + .remove = tcan4x5x_can_remove, +}; +module_spi_driver(tcan4x5x_can_driver); + +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); +MODULE_DESCRIPTION("Texas Instruments TCAN4x5x CAN driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 13e66297b65f..cf218949a8fb 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -759,7 +759,6 @@ static int rcar_can_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "No IRQ resource\n"); err = irq; goto fail; } diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index de34a4b82d4a..edaa1ca972c1 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1652,14 +1652,12 @@ static int rcar_canfd_probe(struct platform_device *pdev) ch_irq = platform_get_irq(pdev, 0); if (ch_irq < 0) { - dev_err(&pdev->dev, "no Channel IRQ resource\n"); err = ch_irq; goto fail_dev; } g_irq = platform_get_irq(pdev, 1); if (g_irq < 0) { - dev_err(&pdev->dev, "no Global IRQ resource\n"); err = g_irq; goto fail_dev; } diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index 6b72da2f18a6..32d242dc0d9f 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -1,26 +1,18 @@ # SPDX-License-Identifier: GPL-2.0-only + menuconfig CAN_SJA1000 tristate "Philips/NXP SJA1000 devices" depends on HAS_IOMEM if CAN_SJA1000 -config CAN_SJA1000_ISA - tristate "ISA Bus based legacy SJA1000 driver" - ---help--- - This driver adds legacy support for SJA1000 chips connected to - the ISA bus using I/O port, memory mapped or indirect access. - -config CAN_SJA1000_PLATFORM - tristate "Generic Platform Bus based SJA1000 driver" +config CAN_EMS_PCI + tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card" + depends on PCI ---help--- - This driver adds support for the SJA1000 chips connected to - the "platform bus" (Linux abstraction for directly to the - processor attached devices). Which can be found on various - boards from Phytec (http://www.phytec.de) like the PCM027, - PCM038. It also provides the OpenFirmware "platform bus" found - on embedded systems with OpenFirmware bindings, e.g. if you - have a PowerPC based system you may want to enable this option. + This driver is for the one, two or four channel CPC-PCI, + CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche + (http://www.ems-wuensche.de). config CAN_EMS_PCMCIA tristate "EMS CPC-CARD Card" @@ -29,23 +21,22 @@ config CAN_EMS_PCMCIA This driver is for the one or two channel CPC-CARD cards from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de). -config CAN_EMS_PCI - tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card" +config CAN_F81601 + tristate "Fintek F81601 PCIE to 2 CAN Controller" depends on PCI - ---help--- - This driver is for the one, two or four channel CPC-PCI, - CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche - (http://www.ems-wuensche.de). + help + This driver adds support for Fintek F81601 PCIE to 2 CAN + Controller. It had internal 24MHz clock source, but it can + be changed by manufacturer. Use modinfo to get usage for + parameters. Visit http://www.fintek.com.tw to get more + information. -config CAN_PEAK_PCMCIA - tristate "PEAK PCAN-PC Card" - depends on PCMCIA - depends on HAS_IOPORT_MAP +config CAN_KVASER_PCI + tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards" + depends on PCI ---help--- - This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) - from PEAK-System (http://www.peak-system.com). To compile this - driver as a module, choose M here: the module will be called - peak_pcmcia. + This driver is for the PCIcanx and PCIcan cards (1, 2 or + 4 channel) from Kvaser (http://www.kvaser.com). config CAN_PEAK_PCI tristate "PEAK PCAN-PCI/PCIe/miniPCI Cards" @@ -66,12 +57,15 @@ config CAN_PEAK_PCIEC Technik. This will also automatically select I2C and I2C_ALGO configuration options. -config CAN_KVASER_PCI - tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards" - depends on PCI +config CAN_PEAK_PCMCIA + tristate "PEAK PCAN-PC Card" + depends on PCMCIA + depends on HAS_IOPORT_MAP ---help--- - This driver is for the PCIcanx and PCIcan cards (1, 2 or - 4 channel) from Kvaser (http://www.kvaser.com). + This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) + from PEAK-System (http://www.peak-system.com). To compile this + driver as a module, choose M here: the module will be called + peak_pcmcia. config CAN_PLX_PCI tristate "PLX90xx PCI-bridge based Cards" @@ -91,6 +85,23 @@ config CAN_PLX_PCI - Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com) - ASEM CAN raw - 2 isolated CAN channels (www.asem.it) +config CAN_SJA1000_ISA + tristate "ISA Bus based legacy SJA1000 driver" + ---help--- + This driver adds legacy support for SJA1000 chips connected to + the ISA bus using I/O port, memory mapped or indirect access. + +config CAN_SJA1000_PLATFORM + tristate "Generic Platform Bus based SJA1000 driver" + ---help--- + This driver adds support for the SJA1000 chips connected to + the "platform bus" (Linux abstraction for directly to the + processor attached devices). Which can be found on various + boards from Phytec (http://www.phytec.de) like the PCM027, + PCM038. It also provides the OpenFirmware "platform bus" found + on embedded systems with OpenFirmware bindings, e.g. if you + have a PowerPC based system you may want to enable this option. + config CAN_TSCAN1 tristate "TS-CAN1 PC104 boards" depends on ISA diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile index 9253aaf9e739..500ce1dddaec 100644 --- a/drivers/net/can/sja1000/Makefile +++ b/drivers/net/can/sja1000/Makefile @@ -3,13 +3,14 @@ # Makefile for the SJA1000 CAN controller drivers. # -obj-$(CONFIG_CAN_SJA1000) += sja1000.o -obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o -obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o -obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o +obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o +obj-$(CONFIG_CAN_F81601) += f81601.o obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o -obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o +obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o +obj-$(CONFIG_CAN_SJA1000) += sja1000.o +obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o +obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o obj-$(CONFIG_CAN_TSCAN1) += tscan1.o diff --git a/drivers/net/can/sja1000/f81601.c b/drivers/net/can/sja1000/f81601.c new file mode 100644 index 000000000000..362a9d4f44d5 --- /dev/null +++ b/drivers/net/can/sja1000/f81601.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Fintek F81601 PCIE to 2 CAN controller driver + * + * Copyright (C) 2019 Peter Hong <peter_hong@fintek.com.tw> + * Copyright (C) 2019 Linux Foundation + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/can/dev.h> +#include <linux/io.h> +#include <linux/version.h> + +#include "sja1000.h" + +#define F81601_PCI_MAX_CHAN 2 + +#define F81601_DECODE_REG 0x209 +#define F81601_IO_MODE BIT(7) +#define F81601_MEM_MODE BIT(6) +#define F81601_CFG_MODE BIT(5) +#define F81601_CAN2_INTERNAL_CLK BIT(3) +#define F81601_CAN1_INTERNAL_CLK BIT(2) +#define F81601_CAN2_EN BIT(1) +#define F81601_CAN1_EN BIT(0) + +#define F81601_TRAP_REG 0x20a +#define F81601_CAN2_HAS_EN BIT(4) + +struct f81601_pci_card { + void __iomem *addr; + spinlock_t lock; /* use this spin lock only for write access */ + struct pci_dev *dev; + struct net_device *net_dev[F81601_PCI_MAX_CHAN]; +}; + +static const struct pci_device_id f81601_pci_tbl[] = { + { PCI_DEVICE(0x1c29, 0x1703) }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(pci, f81601_pci_tbl); + +static bool internal_clk = true; +module_param(internal_clk, bool, 0444); +MODULE_PARM_DESC(internal_clk, "Use internal clock, default true (24MHz)"); + +static unsigned int external_clk; +module_param(external_clk, uint, 0444); +MODULE_PARM_DESC(external_clk, "External clock when internal_clk disabled"); + +static u8 f81601_pci_read_reg(const struct sja1000_priv *priv, int port) +{ + return readb(priv->reg_base + port); +} + +static void f81601_pci_write_reg(const struct sja1000_priv *priv, int port, + u8 val) +{ + struct f81601_pci_card *card = priv->priv; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + writeb(val, priv->reg_base + port); + readb(priv->reg_base); + spin_unlock_irqrestore(&card->lock, flags); +} + +static void f81601_pci_remove(struct pci_dev *pdev) +{ + struct f81601_pci_card *card = pci_get_drvdata(pdev); + struct net_device *dev; + int i; + + for (i = 0; i < ARRAY_SIZE(card->net_dev); i++) { + dev = card->net_dev[i]; + if (!dev) + continue; + + dev_info(&pdev->dev, "%s: Removing %s\n", __func__, dev->name); + + unregister_sja1000dev(dev); + free_sja1000dev(dev); + } +} + +/* Probe F81601 based device for the SJA1000 chips and register each + * available CAN channel to SJA1000 Socket-CAN subsystem. + */ +static int f81601_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct sja1000_priv *priv; + struct net_device *dev; + struct f81601_pci_card *card; + int err, i, count; + u8 tmp; + + if (pcim_enable_device(pdev) < 0) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return -ENODEV; + } + + dev_info(&pdev->dev, "Detected card at slot #%i\n", + PCI_SLOT(pdev->devfn)); + + card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->dev = pdev; + spin_lock_init(&card->lock); + + pci_set_drvdata(pdev, card); + + tmp = F81601_IO_MODE | F81601_MEM_MODE | F81601_CFG_MODE | + F81601_CAN2_EN | F81601_CAN1_EN; + + if (internal_clk) { + tmp |= F81601_CAN2_INTERNAL_CLK | F81601_CAN1_INTERNAL_CLK; + + dev_info(&pdev->dev, + "F81601 running with internal clock: 24Mhz\n"); + } else { + dev_info(&pdev->dev, + "F81601 running with external clock: %dMhz\n", + external_clk / 1000000); + } + + pci_write_config_byte(pdev, F81601_DECODE_REG, tmp); + + card->addr = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); + + if (!card->addr) { + err = -ENOMEM; + dev_err(&pdev->dev, "%s: Failed to remap BAR\n", __func__); + goto failure_cleanup; + } + + /* read CAN2_HW_EN strap pin to detect how many CANBUS do we have */ + count = ARRAY_SIZE(card->net_dev); + pci_read_config_byte(pdev, F81601_TRAP_REG, &tmp); + if (!(tmp & F81601_CAN2_HAS_EN)) + count = 1; + + for (i = 0; i < count; i++) { + dev = alloc_sja1000dev(0); + if (!dev) { + err = -ENOMEM; + goto failure_cleanup; + } + + priv = netdev_priv(dev); + priv->priv = card; + priv->irq_flags = IRQF_SHARED; + priv->reg_base = card->addr + 0x80 * i; + priv->read_reg = f81601_pci_read_reg; + priv->write_reg = f81601_pci_write_reg; + + if (internal_clk) + priv->can.clock.freq = 24000000 / 2; + else + priv->can.clock.freq = external_clk / 2; + + priv->ocr = OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL; + priv->cdr = CDR_CBP; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->dev_id = i; + dev->irq = pdev->irq; + + /* Register SJA1000 device */ + err = register_sja1000dev(dev); + if (err) { + dev_err(&pdev->dev, + "%s: Registering device failed: %x\n", __func__, + err); + free_sja1000dev(dev); + goto failure_cleanup; + } + + card->net_dev[i] = dev; + dev_info(&pdev->dev, "Channel #%d, %s at 0x%p, irq %d\n", i, + dev->name, priv->reg_base, dev->irq); + } + + return 0; + + failure_cleanup: + dev_err(&pdev->dev, "%s: failed: %d. Cleaning Up.\n", __func__, err); + f81601_pci_remove(pdev); + + return err; +} + +static struct pci_driver f81601_pci_driver = { + .name = "f81601", + .id_table = f81601_pci_tbl, + .probe = f81601_pci_probe, + .remove = f81601_pci_remove, +}; + +MODULE_DESCRIPTION("Fintek F81601 PCIE to 2 CANBUS adaptor driver"); +MODULE_AUTHOR("Peter Hong <peter_hong@fintek.com.tw>"); +MODULE_LICENSE("GPL v2"); + +module_pci_driver(f81601_pci_driver); diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 093fc9a529f0..f4cd88196404 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -787,7 +787,6 @@ static int sun4ican_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "could not get a valid irq\n"); err = -ENODEV; goto exit; } diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index db6ea936dc3f..b62f75fa03f0 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -5,6 +5,7 @@ * specs for the same is available at <http://www.ti.com> * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -34,6 +35,7 @@ #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/led.h> +#include <linux/can/rx-offload.h> #define DRV_NAME "ti_hecc" #define HECC_MODULE_VERSION "0.7" @@ -63,29 +65,15 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT) #define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1) #define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK) -#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1)) -#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX -/* - * Important Note: RX mailbox configuration - * RX mailboxes are further logically split into two - main and buffer - * mailboxes. The goal is to get all packets into main mailboxes as - * driven by mailbox number and receive priority (higher to lower) and - * buffer mailboxes are used to receive pkts while main mailboxes are being - * processed. This ensures in-order packet reception. - * - * Here are the recommended values for buffer mailbox. Note that RX mailboxes - * start after TX mailboxes: +/* RX mailbox configuration * - * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes - * 28 12 8 - * 16 20 4 + * The remaining mailboxes are used for reception and are delivered + * based on their timestamp, to avoid a hardware race when CANME is + * changed while CAN-bus traffic is being received. */ - #define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) -#define HECC_RX_BUFFER_MBOX 12 /* as per table above */ #define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) -#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1)) /* TI HECC module registers */ #define HECC_CANME 0x0 /* Mailbox enable */ @@ -117,6 +105,9 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */ #define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */ +/* TI HECC RAM registers */ +#define HECC_CANMOTS 0x80 /* Message object time stamp */ + /* Mailbox registers */ #define HECC_CANMID 0x0 #define HECC_CANMCF 0x4 @@ -193,7 +184,7 @@ static const struct can_bittiming_const ti_hecc_bittiming_const = { struct ti_hecc_priv { struct can_priv can; /* MUST be first member/field */ - struct napi_struct napi; + struct can_rx_offload offload; struct net_device *ndev; struct clk *clk; void __iomem *base; @@ -203,7 +194,6 @@ struct ti_hecc_priv { spinlock_t mbx_lock; /* CANME register needs protection */ u32 tx_head; u32 tx_tail; - u32 rx_next; struct regulator *reg_xceiver; }; @@ -227,6 +217,11 @@ static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val) __raw_writel(val, priv->hecc_ram + mbxno * 4); } +static inline u32 hecc_read_stamp(struct ti_hecc_priv *priv, u32 mbxno) +{ + return __raw_readl(priv->hecc_ram + HECC_CANMOTS + mbxno * 4); +} + static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg, u32 val) { @@ -375,7 +370,6 @@ static void ti_hecc_start(struct net_device *ndev) ti_hecc_reset(ndev); priv->tx_head = priv->tx_tail = HECC_TX_MASK; - priv->rx_next = HECC_RX_FIRST_MBOX; /* Enable local and global acceptance mask registers */ hecc_write(priv, HECC_CANGAM, HECC_SET_REG); @@ -526,21 +520,17 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } -static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno) +static inline struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload) { - struct net_device_stats *stats = &priv->ndev->stats; - struct can_frame *cf; - struct sk_buff *skb; - u32 data, mbx_mask; - unsigned long flags; + return container_of(offload, struct ti_hecc_priv, offload); +} - skb = alloc_can_skb(priv->ndev, &cf); - if (!skb) { - if (printk_ratelimit()) - netdev_err(priv->ndev, - "ti_hecc_rx_pkt: alloc_can_skb() failed\n"); - return -ENOMEM; - } +static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload, + struct can_frame *cf, + u32 *timestamp, unsigned int mbxno) +{ + struct ti_hecc_priv *priv = rx_offload_to_priv(offload); + u32 data, mbx_mask; mbx_mask = BIT(mbxno); data = hecc_read_mbx(priv, mbxno, HECC_CANMID); @@ -558,100 +548,19 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno) data = hecc_read_mbx(priv, mbxno, HECC_CANMDH); *(__be32 *)(cf->data + 4) = cpu_to_be32(data); } - spin_lock_irqsave(&priv->mbx_lock, flags); - hecc_clear_bit(priv, HECC_CANME, mbx_mask); - hecc_write(priv, HECC_CANRMP, mbx_mask); - /* enable mailbox only if it is part of rx buffer mailboxes */ - if (priv->rx_next < HECC_RX_BUFFER_MBOX) - hecc_set_bit(priv, HECC_CANME, mbx_mask); - spin_unlock_irqrestore(&priv->mbx_lock, flags); - - stats->rx_bytes += cf->can_dlc; - can_led_event(priv->ndev, CAN_LED_EVENT_RX); - netif_receive_skb(skb); - stats->rx_packets++; - - return 0; -} - -/* - * ti_hecc_rx_poll - HECC receive pkts - * - * The receive mailboxes start from highest numbered mailbox till last xmit - * mailbox. On CAN frame reception the hardware places the data into highest - * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes - * have same filtering (ALL CAN frames) packets will arrive in the highest - * available RX mailbox and we need to ensure in-order packet reception. - * - * To ensure the packets are received in the right order we logically divide - * the RX mailboxes into main and buffer mailboxes. Packets are received as per - * mailbox priotity (higher to lower) in the main bank and once it is full we - * disable further reception into main mailboxes. While the main mailboxes are - * processed in NAPI, further packets are received in buffer mailboxes. - * - * We maintain a RX next mailbox counter to process packets and once all main - * mailboxe packets are passed to the upper stack we enable all of them but - * continue to process packets received in buffer mailboxes. With each packet - * received from buffer mailbox we enable it immediately so as to handle the - * overflow from higher mailboxes. - */ -static int ti_hecc_rx_poll(struct napi_struct *napi, int quota) -{ - struct net_device *ndev = napi->dev; - struct ti_hecc_priv *priv = netdev_priv(ndev); - u32 num_pkts = 0; - u32 mbx_mask; - unsigned long pending_pkts, flags; - - if (!netif_running(ndev)) - return 0; - - while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) && - num_pkts < quota) { - mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */ - if (mbx_mask & pending_pkts) { - if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0) - return num_pkts; - ++num_pkts; - } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) { - break; /* pkt not received yet */ - } - --priv->rx_next; - if (priv->rx_next == HECC_RX_BUFFER_MBOX) { - /* enable high bank mailboxes */ - spin_lock_irqsave(&priv->mbx_lock, flags); - mbx_mask = hecc_read(priv, HECC_CANME); - mbx_mask |= HECC_RX_HIGH_MBOX_MASK; - hecc_write(priv, HECC_CANME, mbx_mask); - spin_unlock_irqrestore(&priv->mbx_lock, flags); - } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) { - priv->rx_next = HECC_RX_FIRST_MBOX; - break; - } - } - /* Enable packet interrupt if all pkts are handled */ - if (hecc_read(priv, HECC_CANRMP) == 0) { - napi_complete(napi); - /* Re-enable RX mailbox interrupts */ - mbx_mask = hecc_read(priv, HECC_CANMIM); - mbx_mask |= HECC_TX_MBOX_MASK; - hecc_write(priv, HECC_CANMIM, mbx_mask); - } else { - /* repoll is done only if whole budget is used */ - num_pkts = quota; - } + *timestamp = hecc_read_stamp(priv, mbxno); - return num_pkts; + return 1; } static int ti_hecc_error(struct net_device *ndev, int int_status, int err_status) { struct ti_hecc_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; + u32 timestamp; /* propagate the error condition to the can stack */ skb = alloc_can_err_skb(ndev, &cf); @@ -732,9 +641,8 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); + timestamp = hecc_read(priv, HECC_CANLNT); + can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); return 0; } @@ -744,8 +652,8 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) struct net_device *ndev = (struct net_device *)dev_id; struct ti_hecc_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; - u32 mbxno, mbx_mask, int_status, err_status; - unsigned long ack, flags; + u32 mbxno, mbx_mask, int_status, err_status, stamp; + unsigned long flags, rx_pending; int_status = hecc_read(priv, (priv->use_hecc1int) ? HECC_CANGIF1 : HECC_CANGIF0); @@ -769,11 +677,11 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) spin_lock_irqsave(&priv->mbx_lock, flags); hecc_clear_bit(priv, HECC_CANME, mbx_mask); spin_unlock_irqrestore(&priv->mbx_lock, flags); - stats->tx_bytes += hecc_read_mbx(priv, mbxno, - HECC_CANMCF) & 0xF; + stamp = hecc_read_stamp(priv, mbxno); + stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload, + mbxno, stamp); stats->tx_packets++; can_led_event(ndev, CAN_LED_EVENT_TX); - can_get_echo_skb(ndev, mbxno); --priv->tx_tail; } @@ -784,12 +692,11 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK))) netif_wake_queue(ndev); - /* Disable RX mailbox interrupts and let NAPI reenable them */ - if (hecc_read(priv, HECC_CANRMP)) { - ack = hecc_read(priv, HECC_CANMIM); - ack &= BIT(HECC_MAX_TX_MBOX) - 1; - hecc_write(priv, HECC_CANMIM, ack); - napi_schedule(&priv->napi); + /* offload RX mailboxes and let NAPI deliver them */ + while ((rx_pending = hecc_read(priv, HECC_CANRMP))) { + can_rx_offload_irq_offload_timestamp(&priv->offload, + rx_pending); + hecc_write(priv, HECC_CANRMP, rx_pending); } } @@ -831,7 +738,7 @@ static int ti_hecc_open(struct net_device *ndev) can_led_event(ndev, CAN_LED_EVENT_OPEN); ti_hecc_start(ndev); - napi_enable(&priv->napi); + can_rx_offload_enable(&priv->offload); netif_start_queue(ndev); return 0; @@ -842,7 +749,7 @@ static int ti_hecc_close(struct net_device *ndev) struct ti_hecc_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); - napi_disable(&priv->napi); + can_rx_offload_disable(&priv->offload); ti_hecc_stop(ndev); free_irq(ndev->irq, ndev); close_candev(ndev); @@ -962,8 +869,6 @@ static int ti_hecc_probe(struct platform_device *pdev) goto probe_exit_candev; } priv->can.clock.freq = clk_get_rate(priv->clk); - netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, - HECC_DEF_NAPI_WEIGHT); err = clk_prepare_enable(priv->clk); if (err) { @@ -971,10 +876,19 @@ static int ti_hecc_probe(struct platform_device *pdev) goto probe_exit_clk; } + priv->offload.mailbox_read = ti_hecc_mailbox_read; + priv->offload.mb_first = HECC_RX_FIRST_MBOX; + priv->offload.mb_last = HECC_MAX_TX_MBOX; + err = can_rx_offload_add_timestamp(ndev, &priv->offload); + if (err) { + dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); + goto probe_exit_clk; + } + err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed\n"); - goto probe_exit_clk; + goto probe_exit_offload; } devm_can_led_init(ndev); @@ -984,6 +898,8 @@ static int ti_hecc_probe(struct platform_device *pdev) return 0; +probe_exit_offload: + can_rx_offload_del(&priv->offload); probe_exit_clk: clk_put(priv->clk); probe_exit_candev: @@ -1000,6 +916,7 @@ static int ti_hecc_remove(struct platform_device *pdev) unregister_candev(ndev); clk_disable_unprepare(priv->clk); clk_put(priv->clk); + can_rx_offload_del(&priv->offload); free_candev(ndev); return 0; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index c89c7d4900d7..0f1d3e807d63 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -643,8 +643,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, return err; } - netdev = alloc_candev(sizeof(*priv) + - dev->max_tx_urbs * sizeof(*priv->tx_contexts), + netdev = alloc_candev(struct_size(priv, tx_contexts, dev->max_tx_urbs), dev->max_tx_urbs); if (!netdev) { dev_err(&dev->intf->dev, "Cannot alloc candev\n"); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 63203ff452b5..bd95cfaff857 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -50,6 +50,10 @@ enum xcan_reg { XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ /* only on CAN FD cores */ + XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate + * Prescalar + */ + XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */ XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ @@ -62,6 +66,8 @@ enum xcan_reg { #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) +#define XCANFD_FRAME_DW_OFFSET(frame_base, n) (((frame_base) + 0x08) + \ + ((n) * XCAN_CANFD_FRAME_SIZE)) #define XCAN_CANFD_FRAME_SIZE 0x48 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ @@ -120,6 +126,8 @@ enum xcan_reg { #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ +#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */ +#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */ /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ @@ -133,6 +141,7 @@ enum xcan_reg { /* CAN frame length constants */ #define XCAN_FRAME_MAX_DATA_LEN 8 +#define XCANFD_DW_BYTES 4 #define XCAN_TIMEOUT (1 * HZ) /* TX-FIFO-empty interrupt available */ @@ -149,7 +158,15 @@ enum xcan_reg { #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 #define XCAN_FLAG_CANFD_2 0x0020 +enum xcan_ip_type { + XAXI_CAN = 0, + XZYNQ_CANPS, + XAXI_CANFD, + XAXI_CANFD_2_0, +}; + struct xcan_devtype_data { + enum xcan_ip_type cantype; unsigned int flags; const struct can_bittiming_const *bittiming_const; const char *bus_clk_name; @@ -183,7 +200,7 @@ struct xcan_priv { struct napi_struct napi; u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, - u32 val); + u32 val); struct device *dev; void __iomem *reg_base; unsigned long irq_flags; @@ -205,6 +222,7 @@ static const struct can_bittiming_const xcan_bittiming_const = { .brp_inc = 1, }; +/* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */ static const struct can_bittiming_const xcan_bittiming_const_canfd = { .name = DRIVER_NAME, .tseg1_min = 1, @@ -217,6 +235,20 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = { .brp_inc = 1, }; +/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ +static struct can_bittiming_const xcan_data_bittiming_const_canfd = { + .name = DRIVER_NAME, + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +/* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { .name = DRIVER_NAME, .tseg1_min = 1, @@ -229,6 +261,19 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { .brp_inc = 1, }; +/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ +static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { + .name = DRIVER_NAME, + .tseg1_min = 1, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure @@ -238,7 +283,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { * Write data to the paricular CAN register */ static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, - u32 val) + u32 val) { iowrite32(val, priv->reg_base + reg); } @@ -265,7 +310,7 @@ static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) * Write data to the paricular CAN register */ static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, - u32 val) + u32 val) { iowrite32be(val, priv->reg_base + reg); } @@ -343,6 +388,7 @@ static int xcan_set_bittiming(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); struct can_bittiming *bt = &priv->can.bittiming; + struct can_bittiming *dbt = &priv->can.data_bittiming; u32 btr0, btr1; u32 is_config_mode; @@ -372,9 +418,27 @@ static int xcan_set_bittiming(struct net_device *ndev) priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); + if (priv->devtype.cantype == XAXI_CANFD || + priv->devtype.cantype == XAXI_CANFD_2_0) { + /* Setting Baud Rate prescalar value in F_BRPR Register */ + btr0 = dbt->brp - 1; + + /* Setting Time Segment 1 in BTR Register */ + btr1 = dbt->prop_seg + bt->phase_seg1 - 1; + + /* Setting Time Segment 2 in BTR Register */ + btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; + + /* Setting Synchronous jump width in BTR Register */ + btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift; + + priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0); + priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1); + } + netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", - priv->read_reg(priv, XCAN_BRPR_OFFSET), - priv->read_reg(priv, XCAN_BTR_OFFSET)); + priv->read_reg(priv, XCAN_BRPR_OFFSET), + priv->read_reg(priv, XCAN_BTR_OFFSET)); return 0; } @@ -439,12 +503,12 @@ static int xcan_chip_start(struct net_device *ndev) while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) { if (time_after(jiffies, timeout)) { netdev_warn(ndev, - "timed out for correct mode\n"); + "timed out for correct mode\n"); return -ETIMEDOUT; } } netdev_dbg(ndev, "status:#x%08x\n", - priv->read_reg(priv, XCAN_SR_OFFSET)); + priv->read_reg(priv, XCAN_SR_OFFSET)); priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; @@ -483,6 +547,7 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) /** * xcan_write_frame - Write a frame to HW + * @priv: Driver private data structure * @skb: sk_buff pointer that contains data to be Txed * @frame_offset: Register offset to write the frame to */ @@ -490,7 +555,8 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, int frame_offset) { u32 id, dlc, data[2] = {0, 0}; - struct can_frame *cf = (struct can_frame *)skb->data; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 ramoff, dwindex = 0, i; /* Watch carefully on the bit sequence */ if (cf->can_id & CAN_EFF_FLAG) { @@ -498,7 +564,7 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & XCAN_IDR_ID2_MASK; id |= (((cf->can_id & CAN_EFF_MASK) >> - (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) << + (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; /* The substibute remote TX request bit should be "1" @@ -519,31 +585,51 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, id |= XCAN_IDR_SRR_MASK; } - dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT; - - if (cf->can_dlc > 0) - data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); - if (cf->can_dlc > 4) - data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); + dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; + if (can_is_canfd_skb(skb)) { + if (cf->flags & CANFD_BRS) + dlc |= XCAN_DLCR_BRS_MASK; + dlc |= XCAN_DLCR_EDL_MASK; + } priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); /* If the CAN frame is RTR frame this write triggers transmission * (not on CAN FD) */ priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); - if (!(cf->can_id & CAN_RTR_FLAG)) { - priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset), - data[0]); - /* If the CAN frame is Standard/Extended frame this - * write triggers transmission (not on CAN FD) - */ - priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset), - data[1]); + if (priv->devtype.cantype == XAXI_CANFD || + priv->devtype.cantype == XAXI_CANFD_2_0) { + for (i = 0; i < cf->len; i += 4) { + ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset, dwindex) + + (dwindex * XCANFD_DW_BYTES); + priv->write_reg(priv, ramoff, + be32_to_cpup((__be32 *)(cf->data + i))); + dwindex++; + } + } else { + if (cf->len > 0) + data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); + if (cf->len > 4) + data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); + + if (!(cf->can_id & CAN_RTR_FLAG)) { + priv->write_reg(priv, + XCAN_FRAME_DW1_OFFSET(frame_offset), + data[0]); + /* If the CAN frame is Standard/Extended frame this + * write triggers transmission (not on CAN FD) + */ + priv->write_reg(priv, + XCAN_FRAME_DW2_OFFSET(frame_offset), + data[1]); + } } } /** * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure * * Return: 0 on success, -ENOSPC if FIFO is full. */ @@ -580,6 +666,8 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) /** * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure * * Return: 0 on success, -ENOSPC if there is no space */ @@ -712,6 +800,113 @@ static int xcan_rx(struct net_device *ndev, int frame_base) } /** + * xcanfd_rx - Is called from CAN isr to complete the received + * frame processing + * @ndev: Pointer to net_device structure + * @frame_base: Register offset to the frame to be read + * + * This function is invoked from the CAN isr(poll) to process the Rx frames. It + * does minimal processing and invokes "netif_receive_skb" to complete further + * processing. + * Return: 1 on success and 0 on failure. + */ +static int xcanfd_rx(struct net_device *ndev, int frame_base) +{ + struct xcan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, fsr, readindex; + + fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); + if (fsr & XCAN_FSR_FL_MASK) { + readindex = fsr & XCAN_FSR_RI_MASK; + id_xcan = priv->read_reg(priv, + XCAN_FRAME_ID_OFFSET(frame_base)); + dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)); + if (dlc & XCAN_DLCR_EDL_MASK) + skb = alloc_canfd_skb(ndev, &cf); + else + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + stats->rx_dropped++; + return 0; + } + + /* Change Xilinx CANFD data length format to socketCAN data + * format + */ + if (dlc & XCAN_DLCR_EDL_MASK) + cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> + XCAN_DLCR_DLC_SHIFT); + else + cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >> + XCAN_DLCR_DLC_SHIFT); + + /* Change Xilinx CAN ID format to socketCAN ID format */ + if (id_xcan & XCAN_IDR_IDE_MASK) { + /* The received frame is an Extended format frame */ + cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; + cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> + XCAN_IDR_ID2_SHIFT; + cf->can_id |= CAN_EFF_FLAG; + if (id_xcan & XCAN_IDR_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } else { + /* The received frame is a standard format frame */ + cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> + XCAN_IDR_ID1_SHIFT; + if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan & + XCAN_IDR_SRR_MASK)) + cf->can_id |= CAN_RTR_FLAG; + } + + /* Check the frame received is FD or not*/ + if (dlc & XCAN_DLCR_EDL_MASK) { + for (i = 0; i < cf->len; i += 4) { + if (priv->devtype.flags & XCAN_FLAG_CANFD_2) + data[0] = priv->read_reg(priv, + (XCAN_RXMSG_2_FRAME_OFFSET(readindex) + + (dwindex * XCANFD_DW_BYTES))); + else + data[0] = priv->read_reg(priv, + (XCAN_RXMSG_FRAME_OFFSET(readindex) + + (dwindex * XCANFD_DW_BYTES))); + *(__be32 *)(cf->data + i) = + cpu_to_be32(data[0]); + dwindex++; + } + } else { + for (i = 0; i < cf->len; i += 4) { + if (priv->devtype.flags & XCAN_FLAG_CANFD_2) + data[0] = priv->read_reg(priv, + XCAN_RXMSG_2_FRAME_OFFSET(readindex) + i); + else + data[0] = priv->read_reg(priv, + XCAN_RXMSG_FRAME_OFFSET(readindex) + i); + *(__be32 *)(cf->data + i) = + cpu_to_be32(data[0]); + } + } + /* Update FSR Register so that next packet will save to + * buffer + */ + fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); + fsr |= XCAN_FSR_IRI_MASK; + priv->write_reg(priv, XCAN_FSR_OFFSET, fsr); + fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); + stats->rx_bytes += cf->len; + stats->rx_packets++; + netif_receive_skb(skb); + + return 1; + } + /* If FSR Register is not updated with fill level */ + return 0; +} + +/** * xcan_current_error_state - Get current error state from HW * @ndev: Pointer to net_device structure * @@ -924,7 +1119,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } } - priv->can.can_stats.bus_error++; + priv->can.can_stats.bus_error++; } if (skb) { @@ -934,7 +1129,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) } netdev_dbg(ndev, "%s: error status register:0x%x\n", - __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); + __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); } /** @@ -960,6 +1155,7 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr) /** * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame + * @priv: Driver private data structure * * Return: Register offset of the next frame in RX FIFO. */ @@ -982,9 +1178,11 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) return -ENOENT; if (priv->devtype.flags & XCAN_FLAG_CANFD_2) - offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); + offset = + XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); else - offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); + offset = + XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); } else { /* check if RX FIFO is empty */ @@ -1019,7 +1217,10 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && (work_done < quota)) { - work_done += xcan_rx(ndev, frame_offset); + if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK) + work_done += xcanfd_rx(ndev, frame_offset); + else + work_done += xcan_rx(ndev, frame_offset); if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) /* increment read index */ @@ -1094,8 +1295,10 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) * via TXFEMP handling as we read TXFEMP *after* TXOK * clear to satisfy (1). */ - while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { - priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + while ((isr & XCAN_IXR_TXOK_MASK) && + !WARN_ON(++retries == 100)) { + priv->write_reg(priv, XCAN_ICR_OFFSET, + XCAN_IXR_TXOK_MASK); isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } @@ -1208,12 +1411,12 @@ static int xcan_open(struct net_device *ndev) ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); + __func__, ret); return ret; } ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, - ndev->name, ndev); + ndev->name, ndev); if (ret < 0) { netdev_err(ndev, "irq allocation for CAN failed\n"); goto err; @@ -1284,7 +1487,7 @@ static int xcan_close(struct net_device *ndev) * Return: 0 on success and failure value on error */ static int xcan_get_berr_counter(const struct net_device *ndev, - struct can_berr_counter *bec) + struct can_berr_counter *bec) { struct xcan_priv *priv = netdev_priv(ndev); int ret; @@ -1292,7 +1495,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev, ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); + __func__, ret); return ret; } @@ -1305,7 +1508,6 @@ static int xcan_get_berr_counter(const struct net_device *ndev, return 0; } - static const struct net_device_ops xcan_netdev_ops = { .ndo_open = xcan_open, .ndo_stop = xcan_close, @@ -1417,6 +1619,8 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { }; static const struct xcan_devtype_data xcan_zynq_data = { + .cantype = XZYNQ_CANPS, + .flags = XCAN_FLAG_TXFEMP, .bittiming_const = &xcan_bittiming_const, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, @@ -1424,6 +1628,8 @@ static const struct xcan_devtype_data xcan_zynq_data = { }; static const struct xcan_devtype_data xcan_axi_data = { + .cantype = XAXI_CAN, + .flags = XCAN_FLAG_TXFEMP, .bittiming_const = &xcan_bittiming_const, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, @@ -1431,6 +1637,7 @@ static const struct xcan_devtype_data xcan_axi_data = { }; static const struct xcan_devtype_data xcan_canfd_data = { + .cantype = XAXI_CANFD, .flags = XCAN_FLAG_EXT_FILTERS | XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | @@ -1442,6 +1649,7 @@ static const struct xcan_devtype_data xcan_canfd_data = { }; static const struct xcan_devtype_data xcan_canfd2_data = { + .cantype = XAXI_CANFD_2_0, .flags = XCAN_FLAG_EXT_FILTERS | XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | @@ -1554,6 +1762,19 @@ static int xcan_probe(struct platform_device *pdev) priv->can.do_get_berr_counter = xcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; + + if (devtype->cantype == XAXI_CANFD) + priv->can.data_bittiming_const = + &xcan_data_bittiming_const_canfd; + + if (devtype->cantype == XAXI_CANFD_2_0) + priv->can.data_bittiming_const = + &xcan_data_bittiming_const_canfd2; + + if (devtype->cantype == XAXI_CANFD || + devtype->cantype == XAXI_CANFD_2_0) + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + priv->reg_base = addr; priv->tx_max = tx_max; priv->devtype = *devtype; @@ -1589,7 +1810,7 @@ static int xcan_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); + __func__, ret); goto err_pmdisable; } diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index d9c56a779c08..0a1be5259be0 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -536,7 +536,6 @@ static void b53_srab_mux_init(struct platform_device *pdev) struct b53_device *dev = platform_get_drvdata(pdev); struct b53_srab_priv *priv = dev->priv; struct b53_srab_port_priv *p; - struct resource *r; unsigned int port; u32 reg, off = 0; int ret; @@ -544,8 +543,7 @@ static void b53_srab_mux_init(struct platform_device *pdev) if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) return; - r = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->mux_config = devm_ioremap_resource(&pdev->dev, r); + priv->mux_config = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(priv->mux_config)) return; @@ -593,7 +591,6 @@ static int b53_srab_probe(struct platform_device *pdev) const struct of_device_id *of_id = NULL; struct b53_srab_priv *priv; struct b53_device *dev; - struct resource *r; if (dn) of_id = of_match_node(b53_srab_of_match, dn); @@ -610,8 +607,7 @@ static int b53_srab_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->regs = devm_ioremap_resource(&pdev->dev, r); + priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) return -ENOMEM; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 3811fdbda13e..49f99436018a 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1041,7 +1041,6 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) struct b53_device *dev; struct dsa_switch *ds; void __iomem **base; - struct resource *r; unsigned int i; u32 reg, rev; int ret; @@ -1107,8 +1106,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - r = platform_get_resource(pdev, IORESOURCE_MEM, i); - *base = devm_ioremap_resource(&pdev->dev, r); + *base = devm_platform_ioremap_resource(pdev, i); if (IS_ERR(*base)) { pr_err("unable to find register: %s\n", reg_names[i]); return PTR_ERR(*base); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 4e64835deac2..2175ec13bb2c 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1822,7 +1822,6 @@ remove_gphy: static int gswip_probe(struct platform_device *pdev) { struct gswip_priv *priv; - struct resource *gswip_res, *mdio_res, *mii_res; struct device_node *mdio_np, *gphy_fw_np; struct device *dev = &pdev->dev; int err; @@ -1833,18 +1832,15 @@ static int gswip_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - gswip_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->gswip = devm_ioremap_resource(dev, gswip_res); + priv->gswip = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->gswip)) return PTR_ERR(priv->gswip); - mdio_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->mdio = devm_ioremap_resource(dev, mdio_res); + priv->mdio = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(priv->mdio)) return PTR_ERR(priv->mdio); - mii_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - priv->mii = devm_ioremap_resource(dev, mii_res); + priv->mii = devm_platform_ioremap_resource(pdev, 2); if (IS_ERR(priv->mii)) return PTR_ERR(priv->mii); diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig index fe0a13b79c4b..e1c23d1e91e6 100644 --- a/drivers/net/dsa/microchip/Kconfig +++ b/drivers/net/dsa/microchip/Kconfig @@ -5,7 +5,6 @@ config NET_DSA_MICROCHIP_KSZ_COMMON menuconfig NET_DSA_MICROCHIP_KSZ9477 tristate "Microchip KSZ9477 series switch support" depends on NET_DSA - select NET_DSA_TAG_KSZ9477 select NET_DSA_MICROCHIP_KSZ_COMMON help This driver adds support for Microchip KSZ9477 switch chips. @@ -16,3 +15,20 @@ config NET_DSA_MICROCHIP_KSZ9477_SPI select REGMAP_SPI help Select to enable support for registering switches configured through SPI. + +menuconfig NET_DSA_MICROCHIP_KSZ8795 + tristate "Microchip KSZ8795 series switch support" + depends on NET_DSA + select NET_DSA_MICROCHIP_KSZ_COMMON + help + This driver adds support for Microchip KSZ8795 switch chips. + +config NET_DSA_MICROCHIP_KSZ8795_SPI + tristate "KSZ8795 series SPI connected switch driver" + depends on NET_DSA_MICROCHIP_KSZ8795 && SPI + select REGMAP_SPI + help + This driver accesses KSZ8795 chip through SPI. + + It is required to use the KSZ8795 switch driver as the only access + is through SPI. diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile index 68451b02f775..e3d799b95d7d 100644 --- a/drivers/net/dsa/microchip/Makefile +++ b/drivers/net/dsa/microchip/Makefile @@ -2,3 +2,5 @@ obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o +obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o +obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c new file mode 100644 index 000000000000..a23d3ffdf0c4 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -0,0 +1,1310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip KSZ8795 switch driver + * + * Copyright (C) 2017 Microchip Technology Inc. + * Tristram Ha <Tristram.Ha@microchip.com> + */ + +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/gpio.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_data/microchip-ksz.h> +#include <linux/phy.h> +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <net/dsa.h> +#include <net/switchdev.h> + +#include "ksz_common.h" +#include "ksz8795_reg.h" + +static const struct { + char string[ETH_GSTRING_LEN]; +} mib_names[TOTAL_SWITCH_COUNTER_NUM] = { + { "rx_hi" }, + { "rx_undersize" }, + { "rx_fragments" }, + { "rx_oversize" }, + { "rx_jabbers" }, + { "rx_symbol_err" }, + { "rx_crc_err" }, + { "rx_align_err" }, + { "rx_mac_ctrl" }, + { "rx_pause" }, + { "rx_bcast" }, + { "rx_mcast" }, + { "rx_ucast" }, + { "rx_64_or_less" }, + { "rx_65_127" }, + { "rx_128_255" }, + { "rx_256_511" }, + { "rx_512_1023" }, + { "rx_1024_1522" }, + { "rx_1523_2000" }, + { "rx_2001" }, + { "tx_hi" }, + { "tx_late_col" }, + { "tx_pause" }, + { "tx_bcast" }, + { "tx_mcast" }, + { "tx_ucast" }, + { "tx_deferred" }, + { "tx_total_col" }, + { "tx_exc_col" }, + { "tx_single_col" }, + { "tx_mult_col" }, + { "rx_total" }, + { "tx_total" }, + { "rx_discards" }, + { "tx_discards" }, +}; + +static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) +{ + regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); +} + +static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, + bool set) +{ + regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset), + bits, set ? bits : 0); +} + +static int ksz8795_reset_switch(struct ksz_device *dev) +{ + /* reset switch */ + ksz_write8(dev, REG_POWER_MANAGEMENT_1, + SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S); + ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0); + + return 0; +} + +static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue) +{ + u8 hi, lo; + + /* Number of queues can only be 1, 2, or 4. */ + switch (queue) { + case 4: + case 3: + queue = PORT_QUEUE_SPLIT_4; + break; + case 2: + queue = PORT_QUEUE_SPLIT_2; + break; + default: + queue = PORT_QUEUE_SPLIT_1; + } + ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo); + ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi); + lo &= ~PORT_QUEUE_SPLIT_L; + if (queue & PORT_QUEUE_SPLIT_2) + lo |= PORT_QUEUE_SPLIT_L; + hi &= ~PORT_QUEUE_SPLIT_H; + if (queue & PORT_QUEUE_SPLIT_4) + hi |= PORT_QUEUE_SPLIT_H; + ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo); + ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi); + + /* Default is port based for egress rate limit. */ + if (queue != PORT_QUEUE_SPLIT_1) + ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED, + true); +} + +static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, + u64 *cnt) +{ + u16 ctrl_addr; + u32 data; + u8 check; + int loop; + + ctrl_addr = addr + SWITCH_COUNTER_NUM * port; + ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + + /* It is almost guaranteed to always read the valid bit because of + * slow SPI speed. + */ + for (loop = 2; loop > 0; loop--) { + ksz_read8(dev, REG_IND_MIB_CHECK, &check); + + if (check & MIB_COUNTER_VALID) { + ksz_read32(dev, REG_IND_DATA_LO, &data); + if (check & MIB_COUNTER_OVERFLOW) + *cnt += MIB_COUNTER_VALUE + 1; + *cnt += data & MIB_COUNTER_VALUE; + break; + } + } + mutex_unlock(&dev->alu_mutex); +} + +static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) +{ + u16 ctrl_addr; + u32 data; + u8 check; + int loop; + + addr -= SWITCH_COUNTER_NUM; + ctrl_addr = (KS_MIB_TOTAL_RX_1 - KS_MIB_TOTAL_RX_0) * port; + ctrl_addr += addr + KS_MIB_TOTAL_RX_0; + ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + + /* It is almost guaranteed to always read the valid bit because of + * slow SPI speed. + */ + for (loop = 2; loop > 0; loop--) { + ksz_read8(dev, REG_IND_MIB_CHECK, &check); + + if (check & MIB_COUNTER_VALID) { + ksz_read32(dev, REG_IND_DATA_LO, &data); + if (addr < 2) { + u64 total; + + total = check & MIB_TOTAL_BYTES_H; + total <<= 32; + *cnt += total; + *cnt += data; + if (check & MIB_COUNTER_OVERFLOW) { + total = MIB_TOTAL_BYTES_H + 1; + total <<= 32; + *cnt += total; + } + } else { + if (check & MIB_COUNTER_OVERFLOW) + *cnt += MIB_PACKET_DROPPED + 1; + *cnt += data & MIB_PACKET_DROPPED; + } + break; + } + } + mutex_unlock(&dev->alu_mutex); +} + +static void ksz8795_freeze_mib(struct ksz_device *dev, int port, bool freeze) +{ + /* enable the port for flush/freeze function */ + if (freeze) + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); + ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FREEZE, freeze); + + /* disable the port after freeze is done */ + if (!freeze) + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); +} + +static void ksz8795_port_init_cnt(struct ksz_device *dev, int port) +{ + struct ksz_port_mib *mib = &dev->ports[port].mib; + + /* flush all enabled port MIB counters */ + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); + ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true); + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); + + mib->cnt_ptr = 0; + + /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */ + while (mib->cnt_ptr < dev->reg_mib_cnt) { + dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, + &mib->counters[mib->cnt_ptr]); + ++mib->cnt_ptr; + } + + /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */ + while (mib->cnt_ptr < dev->mib_cnt) { + dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, + NULL, &mib->counters[mib->cnt_ptr]); + ++mib->cnt_ptr; + } + mib->cnt_ptr = 0; + memset(mib->counters, 0, dev->mib_cnt * sizeof(u64)); +} + +static void ksz8795_r_table(struct ksz_device *dev, int table, u16 addr, + u64 *data) +{ + u16 ctrl_addr; + + ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr; + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + ksz_read64(dev, REG_IND_DATA_HI, data); + mutex_unlock(&dev->alu_mutex); +} + +static void ksz8795_w_table(struct ksz_device *dev, int table, u16 addr, + u64 data) +{ + u16 ctrl_addr; + + ctrl_addr = IND_ACC_TABLE(table) | addr; + + mutex_lock(&dev->alu_mutex); + ksz_write64(dev, REG_IND_DATA_HI, data); + ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + mutex_unlock(&dev->alu_mutex); +} + +static int ksz8795_valid_dyn_entry(struct ksz_device *dev, u8 *data) +{ + int timeout = 100; + + do { + ksz_read8(dev, REG_IND_DATA_CHECK, data); + timeout--; + } while ((*data & DYNAMIC_MAC_TABLE_NOT_READY) && timeout); + + /* Entry is not ready for accessing. */ + if (*data & DYNAMIC_MAC_TABLE_NOT_READY) { + return -EAGAIN; + /* Entry is ready for accessing. */ + } else { + ksz_read8(dev, REG_IND_DATA_8, data); + + /* There is no valid entry in the table. */ + if (*data & DYNAMIC_MAC_TABLE_MAC_EMPTY) + return -ENXIO; + } + return 0; +} + +static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr, + u8 *mac_addr, u8 *fid, u8 *src_port, + u8 *timestamp, u16 *entries) +{ + u32 data_hi, data_lo; + u16 ctrl_addr; + u8 data; + int rc; + + ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr; + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + + rc = ksz8795_valid_dyn_entry(dev, &data); + if (rc == -EAGAIN) { + if (addr == 0) + *entries = 0; + } else if (rc == -ENXIO) { + *entries = 0; + /* At least one valid entry in the table. */ + } else { + u64 buf = 0; + int cnt; + + ksz_read64(dev, REG_IND_DATA_HI, &buf); + data_hi = (u32)(buf >> 32); + data_lo = (u32)buf; + + /* Check out how many valid entry in the table. */ + cnt = data & DYNAMIC_MAC_TABLE_ENTRIES_H; + cnt <<= DYNAMIC_MAC_ENTRIES_H_S; + cnt |= (data_hi & DYNAMIC_MAC_TABLE_ENTRIES) >> + DYNAMIC_MAC_ENTRIES_S; + *entries = cnt + 1; + + *fid = (data_hi & DYNAMIC_MAC_TABLE_FID) >> + DYNAMIC_MAC_FID_S; + *src_port = (data_hi & DYNAMIC_MAC_TABLE_SRC_PORT) >> + DYNAMIC_MAC_SRC_PORT_S; + *timestamp = (data_hi & DYNAMIC_MAC_TABLE_TIMESTAMP) >> + DYNAMIC_MAC_TIMESTAMP_S; + + mac_addr[5] = (u8)data_lo; + mac_addr[4] = (u8)(data_lo >> 8); + mac_addr[3] = (u8)(data_lo >> 16); + mac_addr[2] = (u8)(data_lo >> 24); + + mac_addr[1] = (u8)data_hi; + mac_addr[0] = (u8)(data_hi >> 8); + rc = 0; + } + mutex_unlock(&dev->alu_mutex); + + return rc; +} + +static int ksz8795_r_sta_mac_table(struct ksz_device *dev, u16 addr, + struct alu_struct *alu) +{ + u32 data_hi, data_lo; + u64 data; + + ksz8795_r_table(dev, TABLE_STATIC_MAC, addr, &data); + data_hi = data >> 32; + data_lo = (u32)data; + if (data_hi & (STATIC_MAC_TABLE_VALID | STATIC_MAC_TABLE_OVERRIDE)) { + alu->mac[5] = (u8)data_lo; + alu->mac[4] = (u8)(data_lo >> 8); + alu->mac[3] = (u8)(data_lo >> 16); + alu->mac[2] = (u8)(data_lo >> 24); + alu->mac[1] = (u8)data_hi; + alu->mac[0] = (u8)(data_hi >> 8); + alu->port_forward = (data_hi & STATIC_MAC_TABLE_FWD_PORTS) >> + STATIC_MAC_FWD_PORTS_S; + alu->is_override = + (data_hi & STATIC_MAC_TABLE_OVERRIDE) ? 1 : 0; + data_hi >>= 1; + alu->is_use_fid = (data_hi & STATIC_MAC_TABLE_USE_FID) ? 1 : 0; + alu->fid = (data_hi & STATIC_MAC_TABLE_FID) >> + STATIC_MAC_FID_S; + return 0; + } + return -ENXIO; +} + +static void ksz8795_w_sta_mac_table(struct ksz_device *dev, u16 addr, + struct alu_struct *alu) +{ + u32 data_hi, data_lo; + u64 data; + + data_lo = ((u32)alu->mac[2] << 24) | + ((u32)alu->mac[3] << 16) | + ((u32)alu->mac[4] << 8) | alu->mac[5]; + data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1]; + data_hi |= (u32)alu->port_forward << STATIC_MAC_FWD_PORTS_S; + + if (alu->is_override) + data_hi |= STATIC_MAC_TABLE_OVERRIDE; + if (alu->is_use_fid) { + data_hi |= STATIC_MAC_TABLE_USE_FID; + data_hi |= (u32)alu->fid << STATIC_MAC_FID_S; + } + if (alu->is_static) + data_hi |= STATIC_MAC_TABLE_VALID; + else + data_hi &= ~STATIC_MAC_TABLE_OVERRIDE; + + data = (u64)data_hi << 32 | data_lo; + ksz8795_w_table(dev, TABLE_STATIC_MAC, addr, data); +} + +static void ksz8795_from_vlan(u16 vlan, u8 *fid, u8 *member, u8 *valid) +{ + *fid = vlan & VLAN_TABLE_FID; + *member = (vlan & VLAN_TABLE_MEMBERSHIP) >> VLAN_TABLE_MEMBERSHIP_S; + *valid = !!(vlan & VLAN_TABLE_VALID); +} + +static void ksz8795_to_vlan(u8 fid, u8 member, u8 valid, u16 *vlan) +{ + *vlan = fid; + *vlan |= (u16)member << VLAN_TABLE_MEMBERSHIP_S; + if (valid) + *vlan |= VLAN_TABLE_VALID; +} + +static void ksz8795_r_vlan_entries(struct ksz_device *dev, u16 addr) +{ + u64 data; + int i; + + ksz8795_r_table(dev, TABLE_VLAN, addr, &data); + addr *= 4; + for (i = 0; i < 4; i++) { + dev->vlan_cache[addr + i].table[0] = (u16)data; + data >>= VLAN_TABLE_S; + } +} + +static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) +{ + int index; + u16 *data; + u16 addr; + u64 buf; + + data = (u16 *)&buf; + addr = vid / 4; + index = vid & 3; + ksz8795_r_table(dev, TABLE_VLAN, addr, &buf); + *vlan = data[index]; +} + +static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) +{ + int index; + u16 *data; + u16 addr; + u64 buf; + + data = (u16 *)&buf; + addr = vid / 4; + index = vid & 3; + ksz8795_r_table(dev, TABLE_VLAN, addr, &buf); + data[index] = vlan; + dev->vlan_cache[vid].table[0] = vlan; + ksz8795_w_table(dev, TABLE_VLAN, addr, buf); +} + +static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) +{ + u8 restart, speed, ctrl, link; + int processed = true; + u16 data = 0; + u8 p = phy; + + switch (reg) { + case PHY_REG_CTRL: + ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart); + ksz_pread8(dev, p, P_SPEED_STATUS, &speed); + ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl); + if (restart & PORT_PHY_LOOPBACK) + data |= PHY_LOOPBACK; + if (ctrl & PORT_FORCE_100_MBIT) + data |= PHY_SPEED_100MBIT; + if (!(ctrl & PORT_AUTO_NEG_DISABLE)) + data |= PHY_AUTO_NEG_ENABLE; + if (restart & PORT_POWER_DOWN) + data |= PHY_POWER_DOWN; + if (restart & PORT_AUTO_NEG_RESTART) + data |= PHY_AUTO_NEG_RESTART; + if (ctrl & PORT_FORCE_FULL_DUPLEX) + data |= PHY_FULL_DUPLEX; + if (speed & PORT_HP_MDIX) + data |= PHY_HP_MDIX; + if (restart & PORT_FORCE_MDIX) + data |= PHY_FORCE_MDIX; + if (restart & PORT_AUTO_MDIX_DISABLE) + data |= PHY_AUTO_MDIX_DISABLE; + if (restart & PORT_TX_DISABLE) + data |= PHY_TRANSMIT_DISABLE; + if (restart & PORT_LED_OFF) + data |= PHY_LED_DISABLE; + break; + case PHY_REG_STATUS: + ksz_pread8(dev, p, P_LINK_STATUS, &link); + data = PHY_100BTX_FD_CAPABLE | + PHY_100BTX_CAPABLE | + PHY_10BT_FD_CAPABLE | + PHY_10BT_CAPABLE | + PHY_AUTO_NEG_CAPABLE; + if (link & PORT_AUTO_NEG_COMPLETE) + data |= PHY_AUTO_NEG_ACKNOWLEDGE; + if (link & PORT_STAT_LINK_GOOD) + data |= PHY_LINK_STATUS; + break; + case PHY_REG_ID_1: + data = KSZ8795_ID_HI; + break; + case PHY_REG_ID_2: + data = KSZ8795_ID_LO; + break; + case PHY_REG_AUTO_NEGOTIATION: + ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl); + data = PHY_AUTO_NEG_802_3; + if (ctrl & PORT_AUTO_NEG_SYM_PAUSE) + data |= PHY_AUTO_NEG_SYM_PAUSE; + if (ctrl & PORT_AUTO_NEG_100BTX_FD) + data |= PHY_AUTO_NEG_100BTX_FD; + if (ctrl & PORT_AUTO_NEG_100BTX) + data |= PHY_AUTO_NEG_100BTX; + if (ctrl & PORT_AUTO_NEG_10BT_FD) + data |= PHY_AUTO_NEG_10BT_FD; + if (ctrl & PORT_AUTO_NEG_10BT) + data |= PHY_AUTO_NEG_10BT; + break; + case PHY_REG_REMOTE_CAPABILITY: + ksz_pread8(dev, p, P_REMOTE_STATUS, &link); + data = PHY_AUTO_NEG_802_3; + if (link & PORT_REMOTE_SYM_PAUSE) + data |= PHY_AUTO_NEG_SYM_PAUSE; + if (link & PORT_REMOTE_100BTX_FD) + data |= PHY_AUTO_NEG_100BTX_FD; + if (link & PORT_REMOTE_100BTX) + data |= PHY_AUTO_NEG_100BTX; + if (link & PORT_REMOTE_10BT_FD) + data |= PHY_AUTO_NEG_10BT_FD; + if (link & PORT_REMOTE_10BT) + data |= PHY_AUTO_NEG_10BT; + if (data & ~PHY_AUTO_NEG_802_3) + data |= PHY_REMOTE_ACKNOWLEDGE_NOT; + break; + default: + processed = false; + break; + } + if (processed) + *val = data; +} + +static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) +{ + u8 p = phy; + u8 restart, speed, ctrl, data; + + switch (reg) { + case PHY_REG_CTRL: + + /* Do not support PHY reset function. */ + if (val & PHY_RESET) + break; + ksz_pread8(dev, p, P_SPEED_STATUS, &speed); + data = speed; + if (val & PHY_HP_MDIX) + data |= PORT_HP_MDIX; + else + data &= ~PORT_HP_MDIX; + if (data != speed) + ksz_pwrite8(dev, p, P_SPEED_STATUS, data); + ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl); + data = ctrl; + if (!(val & PHY_AUTO_NEG_ENABLE)) + data |= PORT_AUTO_NEG_DISABLE; + else + data &= ~PORT_AUTO_NEG_DISABLE; + + /* Fiber port does not support auto-negotiation. */ + if (dev->ports[p].fiber) + data |= PORT_AUTO_NEG_DISABLE; + if (val & PHY_SPEED_100MBIT) + data |= PORT_FORCE_100_MBIT; + else + data &= ~PORT_FORCE_100_MBIT; + if (val & PHY_FULL_DUPLEX) + data |= PORT_FORCE_FULL_DUPLEX; + else + data &= ~PORT_FORCE_FULL_DUPLEX; + if (data != ctrl) + ksz_pwrite8(dev, p, P_FORCE_CTRL, data); + ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart); + data = restart; + if (val & PHY_LED_DISABLE) + data |= PORT_LED_OFF; + else + data &= ~PORT_LED_OFF; + if (val & PHY_TRANSMIT_DISABLE) + data |= PORT_TX_DISABLE; + else + data &= ~PORT_TX_DISABLE; + if (val & PHY_AUTO_NEG_RESTART) + data |= PORT_AUTO_NEG_RESTART; + else + data &= ~(PORT_AUTO_NEG_RESTART); + if (val & PHY_POWER_DOWN) + data |= PORT_POWER_DOWN; + else + data &= ~PORT_POWER_DOWN; + if (val & PHY_AUTO_MDIX_DISABLE) + data |= PORT_AUTO_MDIX_DISABLE; + else + data &= ~PORT_AUTO_MDIX_DISABLE; + if (val & PHY_FORCE_MDIX) + data |= PORT_FORCE_MDIX; + else + data &= ~PORT_FORCE_MDIX; + if (val & PHY_LOOPBACK) + data |= PORT_PHY_LOOPBACK; + else + data &= ~PORT_PHY_LOOPBACK; + if (data != restart) + ksz_pwrite8(dev, p, P_NEG_RESTART_CTRL, data); + break; + case PHY_REG_AUTO_NEGOTIATION: + ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl); + data = ctrl; + data &= ~(PORT_AUTO_NEG_SYM_PAUSE | + PORT_AUTO_NEG_100BTX_FD | + PORT_AUTO_NEG_100BTX | + PORT_AUTO_NEG_10BT_FD | + PORT_AUTO_NEG_10BT); + if (val & PHY_AUTO_NEG_SYM_PAUSE) + data |= PORT_AUTO_NEG_SYM_PAUSE; + if (val & PHY_AUTO_NEG_100BTX_FD) + data |= PORT_AUTO_NEG_100BTX_FD; + if (val & PHY_AUTO_NEG_100BTX) + data |= PORT_AUTO_NEG_100BTX; + if (val & PHY_AUTO_NEG_10BT_FD) + data |= PORT_AUTO_NEG_10BT_FD; + if (val & PHY_AUTO_NEG_10BT) + data |= PORT_AUTO_NEG_10BT; + if (data != ctrl) + ksz_pwrite8(dev, p, P_LOCAL_CTRL, data); + break; + default: + break; + } +} + +static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + return DSA_TAG_PROTO_KSZ8795; +} + +static void ksz8795_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *buf) +{ + int i; + + for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { + memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string, + ETH_GSTRING_LEN); + } +} + +static void ksz8795_cfg_port_member(struct ksz_device *dev, int port, + u8 member) +{ + u8 data; + + ksz_pread8(dev, port, P_MIRROR_CTRL, &data); + data &= ~PORT_VLAN_MEMBERSHIP; + data |= (member & dev->port_mask); + ksz_pwrite8(dev, port, P_MIRROR_CTRL, data); + dev->ports[port].member = member; +} + +static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + struct ksz_device *dev = ds->priv; + int forward = dev->member; + struct ksz_port *p; + int member = -1; + u8 data; + + p = &dev->ports[port]; + + ksz_pread8(dev, port, P_STP_CTRL, &data); + data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); + + switch (state) { + case BR_STATE_DISABLED: + data |= PORT_LEARN_DISABLE; + if (port < SWITCH_PORT_NUM) + member = 0; + break; + case BR_STATE_LISTENING: + data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); + if (port < SWITCH_PORT_NUM && + p->stp_state == BR_STATE_DISABLED) + member = dev->host_mask | p->vid_member; + break; + case BR_STATE_LEARNING: + data |= PORT_RX_ENABLE; + break; + case BR_STATE_FORWARDING: + data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); + + /* This function is also used internally. */ + if (port == dev->cpu_port) + break; + + /* Port is a member of a bridge. */ + if (dev->br_member & BIT(port)) { + dev->member |= BIT(port); + member = dev->member; + } else { + member = dev->host_mask | p->vid_member; + } + break; + case BR_STATE_BLOCKING: + data |= PORT_LEARN_DISABLE; + if (port < SWITCH_PORT_NUM && + p->stp_state == BR_STATE_DISABLED) + member = dev->host_mask | p->vid_member; + break; + default: + dev_err(ds->dev, "invalid STP state: %d\n", state); + return; + } + + ksz_pwrite8(dev, port, P_STP_CTRL, data); + p->stp_state = state; + if (data & PORT_RX_ENABLE) + dev->rx_ports |= BIT(port); + else + dev->rx_ports &= ~BIT(port); + if (data & PORT_TX_ENABLE) + dev->tx_ports |= BIT(port); + else + dev->tx_ports &= ~BIT(port); + + /* Port membership may share register with STP state. */ + if (member >= 0 && member != p->member) + ksz8795_cfg_port_member(dev, port, (u8)member); + + /* Check if forwarding needs to be updated. */ + if (state != BR_STATE_FORWARDING) { + if (dev->br_member & BIT(port)) + dev->member &= ~BIT(port); + } + + /* When topology has changed the function ksz_update_port_member + * should be called to modify port forwarding behavior. + */ + if (forward != dev->member) + ksz_update_port_member(dev, port); +} + +static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port) +{ + u8 learn[TOTAL_PORT_NUM]; + int first, index, cnt; + struct ksz_port *p; + + if ((uint)port < TOTAL_PORT_NUM) { + first = port; + cnt = port + 1; + } else { + /* Flush all ports. */ + first = 0; + cnt = dev->mib_port_cnt; + } + for (index = first; index < cnt; index++) { + p = &dev->ports[index]; + if (!p->on) + continue; + ksz_pread8(dev, index, P_STP_CTRL, &learn[index]); + if (!(learn[index] & PORT_LEARN_DISABLE)) + ksz_pwrite8(dev, index, P_STP_CTRL, + learn[index] | PORT_LEARN_DISABLE); + } + ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true); + for (index = first; index < cnt; index++) { + p = &dev->ports[index]; + if (!p->on) + continue; + if (!(learn[index] & PORT_LEARN_DISABLE)) + ksz_pwrite8(dev, index, P_STP_CTRL, learn[index]); + } +} + +static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port, + bool flag) +{ + struct ksz_device *dev = ds->priv; + + ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag); + + return 0; +} + +static void ksz8795_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + struct ksz_device *dev = ds->priv; + u16 data, vid, new_pvid = 0; + u8 fid, member, valid; + + ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ksz8795_r_vlan_table(dev, vid, &data); + ksz8795_from_vlan(data, &fid, &member, &valid); + + /* First time to setup the VLAN entry. */ + if (!valid) { + /* Need to find a way to map VID to FID. */ + fid = 1; + valid = 1; + } + member |= BIT(port); + + ksz8795_to_vlan(fid, member, valid, &data); + ksz8795_w_vlan_table(dev, vid, data); + + /* change PVID */ + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) + new_pvid = vid; + } + + if (new_pvid) { + ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid); + vid &= 0xfff; + vid |= new_pvid; + ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid); + } +} + +static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + struct ksz_device *dev = ds->priv; + u16 data, vid, pvid, new_pvid = 0; + u8 fid, member, valid; + + ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid); + pvid = pvid & 0xFFF; + + ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ksz8795_r_vlan_table(dev, vid, &data); + ksz8795_from_vlan(data, &fid, &member, &valid); + + member &= ~BIT(port); + + /* Invalidate the entry if no more member. */ + if (!member) { + fid = 0; + valid = 0; + } + + if (pvid == vid) + new_pvid = 1; + + ksz8795_to_vlan(fid, member, valid, &data); + ksz8795_w_vlan_table(dev, vid, data); + } + + if (new_pvid != pvid) + ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid); + + return 0; +} + +static int ksz8795_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress) +{ + struct ksz_device *dev = ds->priv; + + if (ingress) { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); + dev->mirror_rx |= BIT(port); + } else { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); + dev->mirror_tx |= BIT(port); + } + + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false); + + /* configure mirror port */ + if (dev->mirror_rx || dev->mirror_tx) + ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, + PORT_MIRROR_SNIFFER, true); + + return 0; +} + +static void ksz8795_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct ksz_device *dev = ds->priv; + u8 data; + + if (mirror->ingress) { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); + dev->mirror_rx &= ~BIT(port); + } else { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); + dev->mirror_tx &= ~BIT(port); + } + + ksz_pread8(dev, port, P_MIRROR_CTRL, &data); + + if (!dev->mirror_rx && !dev->mirror_tx) + ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, + PORT_MIRROR_SNIFFER, false); +} + +static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port) +{ + struct ksz_port *p = &dev->ports[port]; + u8 data8, member; + + /* enable broadcast storm limit */ + ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); + + ksz8795_set_prio_queue(dev, port, 4); + + /* disable DiffServ priority */ + ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false); + + /* replace priority */ + ksz_port_cfg(dev, port, P_802_1P_CTRL, PORT_802_1P_REMAPPING, false); + + /* enable 802.1p priority */ + ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true); + + if (cpu_port) { + /* Configure MII interface for proper network communication. */ + ksz_read8(dev, REG_PORT_5_CTRL_6, &data8); + data8 &= ~PORT_INTERFACE_TYPE; + data8 &= ~PORT_GMII_1GPS_MODE; + switch (dev->interface) { + case PHY_INTERFACE_MODE_MII: + p->phydev.speed = SPEED_100; + break; + case PHY_INTERFACE_MODE_RMII: + data8 |= PORT_INTERFACE_RMII; + p->phydev.speed = SPEED_100; + break; + case PHY_INTERFACE_MODE_GMII: + data8 |= PORT_GMII_1GPS_MODE; + data8 |= PORT_INTERFACE_GMII; + p->phydev.speed = SPEED_1000; + break; + default: + data8 &= ~PORT_RGMII_ID_IN_ENABLE; + data8 &= ~PORT_RGMII_ID_OUT_ENABLE; + if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || + dev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + data8 |= PORT_RGMII_ID_IN_ENABLE; + if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || + dev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + data8 |= PORT_RGMII_ID_OUT_ENABLE; + data8 |= PORT_GMII_1GPS_MODE; + data8 |= PORT_INTERFACE_RGMII; + p->phydev.speed = SPEED_1000; + break; + } + ksz_write8(dev, REG_PORT_5_CTRL_6, data8); + p->phydev.duplex = 1; + + member = dev->port_mask; + dev->on_ports = dev->host_mask; + dev->live_ports = dev->host_mask; + } else { + member = dev->host_mask | p->vid_member; + dev->on_ports |= BIT(port); + + /* Link was detected before port is enabled. */ + if (p->phydev.link) + dev->live_ports |= BIT(port); + } + ksz8795_cfg_port_member(dev, port, member); +} + +static void ksz8795_config_cpu_port(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port *p; + u8 remote; + int i; + + ds->num_ports = dev->port_cnt + 1; + + /* Switch marks the maximum frame with extra byte as oversize. */ + ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true); + ksz_cfg(dev, S_TAIL_TAG_CTRL, SW_TAIL_TAG_ENABLE, true); + + p = &dev->ports[dev->cpu_port]; + p->vid_member = dev->port_mask; + p->on = 1; + + ksz8795_port_setup(dev, dev->cpu_port, true); + dev->member = dev->host_mask; + + for (i = 0; i < SWITCH_PORT_NUM; i++) { + p = &dev->ports[i]; + + /* Initialize to non-zero so that ksz_cfg_port_member() will + * be called. + */ + p->vid_member = BIT(i); + p->member = dev->port_mask; + ksz8795_port_stp_state_set(ds, i, BR_STATE_DISABLED); + + /* Last port may be disabled. */ + if (i == dev->port_cnt) + break; + p->on = 1; + p->phy = 1; + } + for (i = 0; i < dev->phy_port_cnt; i++) { + p = &dev->ports[i]; + if (!p->on) + continue; + ksz_pread8(dev, i, P_REMOTE_STATUS, &remote); + if (remote & PORT_FIBER_MODE) + p->fiber = 1; + if (p->fiber) + ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL, + true); + else + ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL, + false); + } +} + +static int ksz8795_setup(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + struct alu_struct alu; + int i, ret = 0; + + dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), + dev->num_vlans, GFP_KERNEL); + if (!dev->vlan_cache) + return -ENOMEM; + + ret = ksz8795_reset_switch(dev); + if (ret) { + dev_err(ds->dev, "failed to reset switch\n"); + return ret; + } + + ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true); + + /* Enable automatic fast aging when link changed detected. */ + ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true); + + /* Enable aggressive back off algorithm in half duplex mode. */ + regmap_update_bits(dev->regmap[0], REG_SW_CTRL_1, + SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); + + /* + * Make sure unicast VLAN boundary is set as default and + * enable no excessive collision drop. + */ + regmap_update_bits(dev->regmap[0], REG_SW_CTRL_2, + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); + + ksz8795_config_cpu_port(ds); + + ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true); + + ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false); + + ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); + + /* set broadcast storm protection 10% rate */ + regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL, + BROADCAST_STORM_RATE, + (BROADCAST_STORM_VALUE * + BROADCAST_STORM_PROT_RATE) / 100); + + for (i = 0; i < VLAN_TABLE_ENTRIES; i++) + ksz8795_r_vlan_entries(dev, i); + + /* Setup STP address for STP operation. */ + memset(&alu, 0, sizeof(alu)); + ether_addr_copy(alu.mac, eth_stp_addr); + alu.is_static = true; + alu.is_override = true; + alu.port_forward = dev->host_mask; + + ksz8795_w_sta_mac_table(dev, 0, &alu); + + ksz_init_mib_timer(dev); + + return 0; +} + +static const struct dsa_switch_ops ksz8795_switch_ops = { + .get_tag_protocol = ksz8795_get_tag_protocol, + .setup = ksz8795_setup, + .phy_read = ksz_phy_read16, + .phy_write = ksz_phy_write16, + .adjust_link = ksz_adjust_link, + .port_enable = ksz_enable_port, + .port_disable = ksz_disable_port, + .get_strings = ksz8795_get_strings, + .get_ethtool_stats = ksz_get_ethtool_stats, + .get_sset_count = ksz_sset_count, + .port_bridge_join = ksz_port_bridge_join, + .port_bridge_leave = ksz_port_bridge_leave, + .port_stp_state_set = ksz8795_port_stp_state_set, + .port_fast_age = ksz_port_fast_age, + .port_vlan_filtering = ksz8795_port_vlan_filtering, + .port_vlan_prepare = ksz_port_vlan_prepare, + .port_vlan_add = ksz8795_port_vlan_add, + .port_vlan_del = ksz8795_port_vlan_del, + .port_fdb_dump = ksz_port_fdb_dump, + .port_mdb_prepare = ksz_port_mdb_prepare, + .port_mdb_add = ksz_port_mdb_add, + .port_mdb_del = ksz_port_mdb_del, + .port_mirror_add = ksz8795_port_mirror_add, + .port_mirror_del = ksz8795_port_mirror_del, +}; + +static u32 ksz8795_get_port_addr(int port, int offset) +{ + return PORT_CTRL_ADDR(port, offset); +} + +static int ksz8795_switch_detect(struct ksz_device *dev) +{ + u8 id1, id2; + u16 id16; + int ret; + + /* read chip id */ + ret = ksz_read16(dev, REG_CHIP_ID0, &id16); + if (ret) + return ret; + + id1 = id16 >> 8; + id2 = id16 & SW_CHIP_ID_M; + if (id1 != FAMILY_ID || + (id2 != CHIP_ID_94 && id2 != CHIP_ID_95)) + return -ENODEV; + + dev->mib_port_cnt = TOTAL_PORT_NUM; + dev->phy_port_cnt = SWITCH_PORT_NUM; + dev->port_cnt = SWITCH_PORT_NUM; + + if (id2 == CHIP_ID_95) { + u8 val; + + id2 = 0x95; + ksz_read8(dev, REG_PORT_1_STATUS_0, &val); + if (val & PORT_FIBER_MODE) + id2 = 0x65; + } else if (id2 == CHIP_ID_94) { + dev->port_cnt--; + dev->last_port = dev->port_cnt; + id2 = 0x94; + } + id16 &= ~0xff; + id16 |= id2; + dev->chip_id = id16; + + dev->cpu_port = dev->mib_port_cnt - 1; + dev->host_mask = BIT(dev->cpu_port); + + return 0; +} + +struct ksz_chip_data { + u16 chip_id; + const char *dev_name; + int num_vlans; + int num_alus; + int num_statics; + int cpu_ports; + int port_cnt; +}; + +static const struct ksz_chip_data ksz8795_switch_chips[] = { + { + .chip_id = 0x8795, + .dev_name = "KSZ8795", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 4, /* total physical port count */ + }, + { + .chip_id = 0x8794, + .dev_name = "KSZ8794", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 3, /* total physical port count */ + }, + { + .chip_id = 0x8765, + .dev_name = "KSZ8765", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 4, /* total physical port count */ + }, +}; + +static int ksz8795_switch_init(struct ksz_device *dev) +{ + int i; + + mutex_init(&dev->stats_mutex); + mutex_init(&dev->alu_mutex); + mutex_init(&dev->vlan_mutex); + + dev->ds->ops = &ksz8795_switch_ops; + + for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) { + const struct ksz_chip_data *chip = &ksz8795_switch_chips[i]; + + if (dev->chip_id == chip->chip_id) { + dev->name = chip->dev_name; + dev->num_vlans = chip->num_vlans; + dev->num_alus = chip->num_alus; + dev->num_statics = chip->num_statics; + dev->port_cnt = chip->port_cnt; + dev->cpu_ports = chip->cpu_ports; + + break; + } + } + + /* no switch found */ + if (!dev->cpu_ports) + return -ENODEV; + + dev->port_mask = BIT(dev->port_cnt) - 1; + dev->port_mask |= dev->host_mask; + + dev->reg_mib_cnt = SWITCH_COUNTER_NUM; + dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM; + + i = dev->mib_port_cnt; + dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i, + GFP_KERNEL); + if (!dev->ports) + return -ENOMEM; + for (i = 0; i < dev->mib_port_cnt; i++) { + mutex_init(&dev->ports[i].mib.cnt_mutex); + dev->ports[i].mib.counters = + devm_kzalloc(dev->dev, + sizeof(u64) * + (TOTAL_SWITCH_COUNTER_NUM + 1), + GFP_KERNEL); + if (!dev->ports[i].mib.counters) + return -ENOMEM; + } + + return 0; +} + +static void ksz8795_switch_exit(struct ksz_device *dev) +{ + ksz8795_reset_switch(dev); +} + +static const struct ksz_dev_ops ksz8795_dev_ops = { + .get_port_addr = ksz8795_get_port_addr, + .cfg_port_member = ksz8795_cfg_port_member, + .flush_dyn_mac_table = ksz8795_flush_dyn_mac_table, + .port_setup = ksz8795_port_setup, + .r_phy = ksz8795_r_phy, + .w_phy = ksz8795_w_phy, + .r_dyn_mac_table = ksz8795_r_dyn_mac_table, + .r_sta_mac_table = ksz8795_r_sta_mac_table, + .w_sta_mac_table = ksz8795_w_sta_mac_table, + .r_mib_cnt = ksz8795_r_mib_cnt, + .r_mib_pkt = ksz8795_r_mib_pkt, + .freeze_mib = ksz8795_freeze_mib, + .port_init_cnt = ksz8795_port_init_cnt, + .shutdown = ksz8795_reset_switch, + .detect = ksz8795_switch_detect, + .init = ksz8795_switch_init, + .exit = ksz8795_switch_exit, +}; + +int ksz8795_switch_register(struct ksz_device *dev) +{ + return ksz_switch_register(dev, &ksz8795_dev_ops); +} +EXPORT_SYMBOL(ksz8795_switch_register); + +MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>"); +MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h new file mode 100644 index 000000000000..3a50462df8fa --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8795_reg.h @@ -0,0 +1,1004 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Microchip KSZ8795 register definitions + * + * Copyright (c) 2017 Microchip Technology Inc. + * Tristram Ha <Tristram.Ha@microchip.com> + */ + +#ifndef __KSZ8795_REG_H +#define __KSZ8795_REG_H + +#define KS_PORT_M 0x1F + +#define KS_PRIO_M 0x3 +#define KS_PRIO_S 2 + +#define REG_CHIP_ID0 0x00 + +#define FAMILY_ID 0x87 + +#define REG_CHIP_ID1 0x01 + +#define SW_CHIP_ID_M 0xF0 +#define SW_CHIP_ID_S 4 +#define SW_REVISION_M 0x0E +#define SW_REVISION_S 1 +#define SW_START 0x01 + +#define CHIP_ID_94 0x60 +#define CHIP_ID_95 0x90 + +#define REG_SW_CTRL_0 0x02 + +#define SW_NEW_BACKOFF BIT(7) +#define SW_GLOBAL_RESET BIT(6) +#define SW_FLUSH_DYN_MAC_TABLE BIT(5) +#define SW_FLUSH_STA_MAC_TABLE BIT(4) +#define SW_LINK_AUTO_AGING BIT(0) + +#define REG_SW_CTRL_1 0x03 + +#define SW_HUGE_PACKET BIT(6) +#define SW_TX_FLOW_CTRL_DISABLE BIT(5) +#define SW_RX_FLOW_CTRL_DISABLE BIT(4) +#define SW_CHECK_LENGTH BIT(3) +#define SW_AGING_ENABLE BIT(2) +#define SW_FAST_AGING BIT(1) +#define SW_AGGR_BACKOFF BIT(0) + +#define REG_SW_CTRL_2 0x04 + +#define UNICAST_VLAN_BOUNDARY BIT(7) +#define MULTICAST_STORM_DISABLE BIT(6) +#define SW_BACK_PRESSURE BIT(5) +#define FAIR_FLOW_CTRL BIT(4) +#define NO_EXC_COLLISION_DROP BIT(3) +#define SW_LEGAL_PACKET_DISABLE BIT(1) + +#define REG_SW_CTRL_3 0x05 + #define WEIGHTED_FAIR_QUEUE_ENABLE BIT(3) + +#define SW_VLAN_ENABLE BIT(7) +#define SW_IGMP_SNOOP BIT(6) +#define SW_MIRROR_RX_TX BIT(0) + +#define REG_SW_CTRL_4 0x06 + +#define SW_HALF_DUPLEX_FLOW_CTRL BIT(7) +#define SW_HALF_DUPLEX BIT(6) +#define SW_FLOW_CTRL BIT(5) +#define SW_10_MBIT BIT(4) +#define SW_REPLACE_VID BIT(3) +#define BROADCAST_STORM_RATE_HI 0x07 + +#define REG_SW_CTRL_5 0x07 + +#define BROADCAST_STORM_RATE_LO 0xFF +#define BROADCAST_STORM_RATE 0x07FF + +#define REG_SW_CTRL_6 0x08 + +#define SW_MIB_COUNTER_FLUSH BIT(7) +#define SW_MIB_COUNTER_FREEZE BIT(6) +#define SW_MIB_COUNTER_CTRL_ENABLE KS_PORT_M + +#define REG_SW_CTRL_9 0x0B + +#define SPI_CLK_125_MHZ 0x80 +#define SPI_CLK_62_5_MHZ 0x40 +#define SPI_CLK_31_25_MHZ 0x00 + +#define SW_LED_MODE_M 0x3 +#define SW_LED_MODE_S 4 +#define SW_LED_LINK_ACT_SPEED 0 +#define SW_LED_LINK_ACT 1 +#define SW_LED_LINK_ACT_DUPLEX 2 +#define SW_LED_LINK_DUPLEX 3 + +#define REG_SW_CTRL_10 0x0C + +#define SW_TAIL_TAG_ENABLE BIT(1) +#define SW_PASS_PAUSE BIT(0) + +#define REG_SW_CTRL_11 0x0D + +#define REG_POWER_MANAGEMENT_1 0x0E + +#define SW_PLL_POWER_DOWN BIT(5) +#define SW_POWER_MANAGEMENT_MODE_M 0x3 +#define SW_POWER_MANAGEMENT_MODE_S 3 +#define SW_POWER_NORMAL 0 +#define SW_ENERGY_DETECTION 1 +#define SW_SOFTWARE_POWER_DOWN 2 + +#define REG_POWER_MANAGEMENT_2 0x0F + +#define REG_PORT_1_CTRL_0 0x10 +#define REG_PORT_2_CTRL_0 0x20 +#define REG_PORT_3_CTRL_0 0x30 +#define REG_PORT_4_CTRL_0 0x40 +#define REG_PORT_5_CTRL_0 0x50 + +#define PORT_BROADCAST_STORM BIT(7) +#define PORT_DIFFSERV_ENABLE BIT(6) +#define PORT_802_1P_ENABLE BIT(5) +#define PORT_BASED_PRIO_S 3 +#define PORT_BASED_PRIO_M KS_PRIO_M +#define PORT_BASED_PRIO_0 0 +#define PORT_BASED_PRIO_1 1 +#define PORT_BASED_PRIO_2 2 +#define PORT_BASED_PRIO_3 3 +#define PORT_INSERT_TAG BIT(2) +#define PORT_REMOVE_TAG BIT(1) +#define PORT_QUEUE_SPLIT_L BIT(0) + +#define REG_PORT_1_CTRL_1 0x11 +#define REG_PORT_2_CTRL_1 0x21 +#define REG_PORT_3_CTRL_1 0x31 +#define REG_PORT_4_CTRL_1 0x41 +#define REG_PORT_5_CTRL_1 0x51 + +#define PORT_MIRROR_SNIFFER BIT(7) +#define PORT_MIRROR_RX BIT(6) +#define PORT_MIRROR_TX BIT(5) +#define PORT_VLAN_MEMBERSHIP KS_PORT_M + +#define REG_PORT_1_CTRL_2 0x12 +#define REG_PORT_2_CTRL_2 0x22 +#define REG_PORT_3_CTRL_2 0x32 +#define REG_PORT_4_CTRL_2 0x42 +#define REG_PORT_5_CTRL_2 0x52 + +#define PORT_802_1P_REMAPPING BIT(7) +#define PORT_INGRESS_FILTER BIT(6) +#define PORT_DISCARD_NON_VID BIT(5) +#define PORT_FORCE_FLOW_CTRL BIT(4) +#define PORT_BACK_PRESSURE BIT(3) +#define PORT_TX_ENABLE BIT(2) +#define PORT_RX_ENABLE BIT(1) +#define PORT_LEARN_DISABLE BIT(0) + +#define REG_PORT_1_CTRL_3 0x13 +#define REG_PORT_2_CTRL_3 0x23 +#define REG_PORT_3_CTRL_3 0x33 +#define REG_PORT_4_CTRL_3 0x43 +#define REG_PORT_5_CTRL_3 0x53 +#define REG_PORT_1_CTRL_4 0x14 +#define REG_PORT_2_CTRL_4 0x24 +#define REG_PORT_3_CTRL_4 0x34 +#define REG_PORT_4_CTRL_4 0x44 +#define REG_PORT_5_CTRL_4 0x54 + +#define PORT_DEFAULT_VID 0x0001 + +#define REG_PORT_1_CTRL_5 0x15 +#define REG_PORT_2_CTRL_5 0x25 +#define REG_PORT_3_CTRL_5 0x35 +#define REG_PORT_4_CTRL_5 0x45 +#define REG_PORT_5_CTRL_5 0x55 + +#define PORT_ACL_ENABLE BIT(2) +#define PORT_AUTHEN_MODE 0x3 +#define PORT_AUTHEN_PASS 0 +#define PORT_AUTHEN_BLOCK 1 +#define PORT_AUTHEN_TRAP 2 + +#define REG_PORT_5_CTRL_6 0x56 + +#define PORT_MII_INTERNAL_CLOCK BIT(7) +#define PORT_GMII_1GPS_MODE BIT(6) +#define PORT_RGMII_ID_IN_ENABLE BIT(4) +#define PORT_RGMII_ID_OUT_ENABLE BIT(3) +#define PORT_GMII_MAC_MODE BIT(2) +#define PORT_INTERFACE_TYPE 0x3 +#define PORT_INTERFACE_MII 0 +#define PORT_INTERFACE_RMII 1 +#define PORT_INTERFACE_GMII 2 +#define PORT_INTERFACE_RGMII 3 + +#define REG_PORT_1_CTRL_7 0x17 +#define REG_PORT_2_CTRL_7 0x27 +#define REG_PORT_3_CTRL_7 0x37 +#define REG_PORT_4_CTRL_7 0x47 + +#define PORT_AUTO_NEG_ASYM_PAUSE BIT(5) +#define PORT_AUTO_NEG_SYM_PAUSE BIT(4) +#define PORT_AUTO_NEG_100BTX_FD BIT(3) +#define PORT_AUTO_NEG_100BTX BIT(2) +#define PORT_AUTO_NEG_10BT_FD BIT(1) +#define PORT_AUTO_NEG_10BT BIT(0) + +#define REG_PORT_1_STATUS_0 0x18 +#define REG_PORT_2_STATUS_0 0x28 +#define REG_PORT_3_STATUS_0 0x38 +#define REG_PORT_4_STATUS_0 0x48 + +/* For KSZ8765. */ +#define PORT_FIBER_MODE BIT(7) + +#define PORT_REMOTE_ASYM_PAUSE BIT(5) +#define PORT_REMOTE_SYM_PAUSE BIT(4) +#define PORT_REMOTE_100BTX_FD BIT(3) +#define PORT_REMOTE_100BTX BIT(2) +#define PORT_REMOTE_10BT_FD BIT(1) +#define PORT_REMOTE_10BT BIT(0) + +#define REG_PORT_1_STATUS_1 0x19 +#define REG_PORT_2_STATUS_1 0x29 +#define REG_PORT_3_STATUS_1 0x39 +#define REG_PORT_4_STATUS_1 0x49 + +#define PORT_HP_MDIX BIT(7) +#define PORT_REVERSED_POLARITY BIT(5) +#define PORT_TX_FLOW_CTRL BIT(4) +#define PORT_RX_FLOW_CTRL BIT(3) +#define PORT_STAT_SPEED_100MBIT BIT(2) +#define PORT_STAT_FULL_DUPLEX BIT(1) + +#define PORT_REMOTE_FAULT BIT(0) + +#define REG_PORT_1_LINK_MD_CTRL 0x1A +#define REG_PORT_2_LINK_MD_CTRL 0x2A +#define REG_PORT_3_LINK_MD_CTRL 0x3A +#define REG_PORT_4_LINK_MD_CTRL 0x4A + +#define PORT_CABLE_10M_SHORT BIT(7) +#define PORT_CABLE_DIAG_RESULT_M 0x3 +#define PORT_CABLE_DIAG_RESULT_S 5 +#define PORT_CABLE_STAT_NORMAL 0 +#define PORT_CABLE_STAT_OPEN 1 +#define PORT_CABLE_STAT_SHORT 2 +#define PORT_CABLE_STAT_FAILED 3 +#define PORT_START_CABLE_DIAG BIT(4) +#define PORT_FORCE_LINK BIT(3) +#define PORT_POWER_SAVING BIT(2) +#define PORT_PHY_REMOTE_LOOPBACK BIT(1) +#define PORT_CABLE_FAULT_COUNTER_H 0x01 + +#define REG_PORT_1_LINK_MD_RESULT 0x1B +#define REG_PORT_2_LINK_MD_RESULT 0x2B +#define REG_PORT_3_LINK_MD_RESULT 0x3B +#define REG_PORT_4_LINK_MD_RESULT 0x4B + +#define PORT_CABLE_FAULT_COUNTER_L 0xFF +#define PORT_CABLE_FAULT_COUNTER 0x1FF + +#define REG_PORT_1_CTRL_9 0x1C +#define REG_PORT_2_CTRL_9 0x2C +#define REG_PORT_3_CTRL_9 0x3C +#define REG_PORT_4_CTRL_9 0x4C + +#define PORT_AUTO_NEG_DISABLE BIT(7) +#define PORT_FORCE_100_MBIT BIT(6) +#define PORT_FORCE_FULL_DUPLEX BIT(5) + +#define REG_PORT_1_CTRL_10 0x1D +#define REG_PORT_2_CTRL_10 0x2D +#define REG_PORT_3_CTRL_10 0x3D +#define REG_PORT_4_CTRL_10 0x4D + +#define PORT_LED_OFF BIT(7) +#define PORT_TX_DISABLE BIT(6) +#define PORT_AUTO_NEG_RESTART BIT(5) +#define PORT_POWER_DOWN BIT(3) +#define PORT_AUTO_MDIX_DISABLE BIT(2) +#define PORT_FORCE_MDIX BIT(1) +#define PORT_MAC_LOOPBACK BIT(0) + +#define REG_PORT_1_STATUS_2 0x1E +#define REG_PORT_2_STATUS_2 0x2E +#define REG_PORT_3_STATUS_2 0x3E +#define REG_PORT_4_STATUS_2 0x4E + +#define PORT_MDIX_STATUS BIT(7) +#define PORT_AUTO_NEG_COMPLETE BIT(6) +#define PORT_STAT_LINK_GOOD BIT(5) + +#define REG_PORT_1_STATUS_3 0x1F +#define REG_PORT_2_STATUS_3 0x2F +#define REG_PORT_3_STATUS_3 0x3F +#define REG_PORT_4_STATUS_3 0x4F + +#define PORT_PHY_LOOPBACK BIT(7) +#define PORT_PHY_ISOLATE BIT(5) +#define PORT_PHY_SOFT_RESET BIT(4) +#define PORT_PHY_FORCE_LINK BIT(3) +#define PORT_PHY_MODE_M 0x7 +#define PHY_MODE_IN_AUTO_NEG 1 +#define PHY_MODE_10BT_HALF 2 +#define PHY_MODE_100BT_HALF 3 +#define PHY_MODE_10BT_FULL 5 +#define PHY_MODE_100BT_FULL 6 +#define PHY_MODE_ISOLDATE 7 + +#define REG_PORT_CTRL_0 0x00 +#define REG_PORT_CTRL_1 0x01 +#define REG_PORT_CTRL_2 0x02 +#define REG_PORT_CTRL_VID 0x03 + +#define REG_PORT_CTRL_5 0x05 + +#define REG_PORT_CTRL_7 0x07 +#define REG_PORT_STATUS_0 0x08 +#define REG_PORT_STATUS_1 0x09 +#define REG_PORT_LINK_MD_CTRL 0x0A +#define REG_PORT_LINK_MD_RESULT 0x0B +#define REG_PORT_CTRL_9 0x0C +#define REG_PORT_CTRL_10 0x0D +#define REG_PORT_STATUS_2 0x0E +#define REG_PORT_STATUS_3 0x0F + +#define REG_PORT_CTRL_12 0xA0 +#define REG_PORT_CTRL_13 0xA1 +#define REG_PORT_RATE_CTRL_3 0xA2 +#define REG_PORT_RATE_CTRL_2 0xA3 +#define REG_PORT_RATE_CTRL_1 0xA4 +#define REG_PORT_RATE_CTRL_0 0xA5 +#define REG_PORT_RATE_LIMIT 0xA6 +#define REG_PORT_IN_RATE_0 0xA7 +#define REG_PORT_IN_RATE_1 0xA8 +#define REG_PORT_IN_RATE_2 0xA9 +#define REG_PORT_IN_RATE_3 0xAA +#define REG_PORT_OUT_RATE_0 0xAB +#define REG_PORT_OUT_RATE_1 0xAC +#define REG_PORT_OUT_RATE_2 0xAD +#define REG_PORT_OUT_RATE_3 0xAE + +#define PORT_CTRL_ADDR(port, addr) \ + ((addr) + REG_PORT_1_CTRL_0 + (port) * \ + (REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0)) + +#define REG_SW_MAC_ADDR_0 0x68 +#define REG_SW_MAC_ADDR_1 0x69 +#define REG_SW_MAC_ADDR_2 0x6A +#define REG_SW_MAC_ADDR_3 0x6B +#define REG_SW_MAC_ADDR_4 0x6C +#define REG_SW_MAC_ADDR_5 0x6D + +#define REG_IND_CTRL_0 0x6E + +#define TABLE_EXT_SELECT_S 5 +#define TABLE_EEE_V 1 +#define TABLE_ACL_V 2 +#define TABLE_PME_V 4 +#define TABLE_LINK_MD_V 5 +#define TABLE_EEE (TABLE_EEE_V << TABLE_EXT_SELECT_S) +#define TABLE_ACL (TABLE_ACL_V << TABLE_EXT_SELECT_S) +#define TABLE_PME (TABLE_PME_V << TABLE_EXT_SELECT_S) +#define TABLE_LINK_MD (TABLE_LINK_MD << TABLE_EXT_SELECT_S) +#define TABLE_READ BIT(4) +#define TABLE_SELECT_S 2 +#define TABLE_STATIC_MAC_V 0 +#define TABLE_VLAN_V 1 +#define TABLE_DYNAMIC_MAC_V 2 +#define TABLE_MIB_V 3 +#define TABLE_STATIC_MAC (TABLE_STATIC_MAC_V << TABLE_SELECT_S) +#define TABLE_VLAN (TABLE_VLAN_V << TABLE_SELECT_S) +#define TABLE_DYNAMIC_MAC (TABLE_DYNAMIC_MAC_V << TABLE_SELECT_S) +#define TABLE_MIB (TABLE_MIB_V << TABLE_SELECT_S) + +#define REG_IND_CTRL_1 0x6F + +#define TABLE_ENTRY_MASK 0x03FF +#define TABLE_EXT_ENTRY_MASK 0x0FFF + +#define REG_IND_DATA_8 0x70 +#define REG_IND_DATA_7 0x71 +#define REG_IND_DATA_6 0x72 +#define REG_IND_DATA_5 0x73 +#define REG_IND_DATA_4 0x74 +#define REG_IND_DATA_3 0x75 +#define REG_IND_DATA_2 0x76 +#define REG_IND_DATA_1 0x77 +#define REG_IND_DATA_0 0x78 + +#define REG_IND_DATA_PME_EEE_ACL 0xA0 + +#define REG_IND_DATA_CHECK REG_IND_DATA_6 +#define REG_IND_MIB_CHECK REG_IND_DATA_4 +#define REG_IND_DATA_HI REG_IND_DATA_7 +#define REG_IND_DATA_LO REG_IND_DATA_3 + +#define REG_INT_STATUS 0x7C +#define REG_INT_ENABLE 0x7D + +#define INT_PME BIT(4) + +#define REG_ACL_INT_STATUS 0x7E +#define REG_ACL_INT_ENABLE 0x7F + +#define INT_PORT_5 BIT(4) +#define INT_PORT_4 BIT(3) +#define INT_PORT_3 BIT(2) +#define INT_PORT_2 BIT(1) +#define INT_PORT_1 BIT(0) + +#define INT_PORT_ALL \ + (INT_PORT_5 | INT_PORT_4 | INT_PORT_3 | INT_PORT_2 | INT_PORT_1) + +#define REG_SW_CTRL_12 0x80 +#define REG_SW_CTRL_13 0x81 + +#define SWITCH_802_1P_MASK 3 +#define SWITCH_802_1P_BASE 3 +#define SWITCH_802_1P_SHIFT 2 + +#define SW_802_1P_MAP_M KS_PRIO_M +#define SW_802_1P_MAP_S KS_PRIO_S + +#define REG_SWITCH_CTRL_14 0x82 + +#define SW_PRIO_MAPPING_M KS_PRIO_M +#define SW_PRIO_MAPPING_S 6 +#define SW_PRIO_MAP_3_HI 0 +#define SW_PRIO_MAP_2_HI 2 +#define SW_PRIO_MAP_0_LO 3 + +#define REG_SW_CTRL_15 0x83 +#define REG_SW_CTRL_16 0x84 +#define REG_SW_CTRL_17 0x85 +#define REG_SW_CTRL_18 0x86 + +#define SW_SELF_ADDR_FILTER_ENABLE BIT(6) + +#define REG_SW_UNK_UCAST_CTRL 0x83 +#define REG_SW_UNK_MCAST_CTRL 0x84 +#define REG_SW_UNK_VID_CTRL 0x85 +#define REG_SW_UNK_IP_MCAST_CTRL 0x86 + +#define SW_UNK_FWD_ENABLE BIT(5) +#define SW_UNK_FWD_MAP KS_PORT_M + +#define REG_SW_CTRL_19 0x87 + +#define SW_IN_RATE_LIMIT_PERIOD_M 0x3 +#define SW_IN_RATE_LIMIT_PERIOD_S 4 +#define SW_IN_RATE_LIMIT_16_MS 0 +#define SW_IN_RATE_LIMIT_64_MS 1 +#define SW_IN_RATE_LIMIT_256_MS 2 +#define SW_OUT_RATE_LIMIT_QUEUE_BASED BIT(3) +#define SW_INS_TAG_ENABLE BIT(2) + +#define REG_TOS_PRIO_CTRL_0 0x90 +#define REG_TOS_PRIO_CTRL_1 0x91 +#define REG_TOS_PRIO_CTRL_2 0x92 +#define REG_TOS_PRIO_CTRL_3 0x93 +#define REG_TOS_PRIO_CTRL_4 0x94 +#define REG_TOS_PRIO_CTRL_5 0x95 +#define REG_TOS_PRIO_CTRL_6 0x96 +#define REG_TOS_PRIO_CTRL_7 0x97 +#define REG_TOS_PRIO_CTRL_8 0x98 +#define REG_TOS_PRIO_CTRL_9 0x99 +#define REG_TOS_PRIO_CTRL_10 0x9A +#define REG_TOS_PRIO_CTRL_11 0x9B +#define REG_TOS_PRIO_CTRL_12 0x9C +#define REG_TOS_PRIO_CTRL_13 0x9D +#define REG_TOS_PRIO_CTRL_14 0x9E +#define REG_TOS_PRIO_CTRL_15 0x9F + +#define TOS_PRIO_M KS_PRIO_M +#define TOS_PRIO_S KS_PRIO_S + +#define REG_SW_CTRL_20 0xA3 + +#define SW_GMII_DRIVE_STRENGTH_S 4 +#define SW_DRIVE_STRENGTH_M 0x7 +#define SW_DRIVE_STRENGTH_2MA 0 +#define SW_DRIVE_STRENGTH_4MA 1 +#define SW_DRIVE_STRENGTH_8MA 2 +#define SW_DRIVE_STRENGTH_12MA 3 +#define SW_DRIVE_STRENGTH_16MA 4 +#define SW_DRIVE_STRENGTH_20MA 5 +#define SW_DRIVE_STRENGTH_24MA 6 +#define SW_DRIVE_STRENGTH_28MA 7 +#define SW_MII_DRIVE_STRENGTH_S 0 + +#define REG_SW_CTRL_21 0xA4 + +#define SW_IPV6_MLD_OPTION BIT(3) +#define SW_IPV6_MLD_SNOOP BIT(2) + +#define REG_PORT_1_CTRL_12 0xB0 +#define REG_PORT_2_CTRL_12 0xC0 +#define REG_PORT_3_CTRL_12 0xD0 +#define REG_PORT_4_CTRL_12 0xE0 +#define REG_PORT_5_CTRL_12 0xF0 + +#define PORT_PASS_ALL BIT(6) +#define PORT_INS_TAG_FOR_PORT_5_S 3 +#define PORT_INS_TAG_FOR_PORT_5 BIT(3) +#define PORT_INS_TAG_FOR_PORT_4 BIT(2) +#define PORT_INS_TAG_FOR_PORT_3 BIT(1) +#define PORT_INS_TAG_FOR_PORT_2 BIT(0) + +#define REG_PORT_1_CTRL_13 0xB1 +#define REG_PORT_2_CTRL_13 0xC1 +#define REG_PORT_3_CTRL_13 0xD1 +#define REG_PORT_4_CTRL_13 0xE1 +#define REG_PORT_5_CTRL_13 0xF1 + +#define PORT_QUEUE_SPLIT_H BIT(1) +#define PORT_QUEUE_SPLIT_1 0 +#define PORT_QUEUE_SPLIT_2 1 +#define PORT_QUEUE_SPLIT_4 2 +#define PORT_DROP_TAG BIT(0) + +#define REG_PORT_1_CTRL_14 0xB2 +#define REG_PORT_2_CTRL_14 0xC2 +#define REG_PORT_3_CTRL_14 0xD2 +#define REG_PORT_4_CTRL_14 0xE2 +#define REG_PORT_5_CTRL_14 0xF2 +#define REG_PORT_1_CTRL_15 0xB3 +#define REG_PORT_2_CTRL_15 0xC3 +#define REG_PORT_3_CTRL_15 0xD3 +#define REG_PORT_4_CTRL_15 0xE3 +#define REG_PORT_5_CTRL_15 0xF3 +#define REG_PORT_1_CTRL_16 0xB4 +#define REG_PORT_2_CTRL_16 0xC4 +#define REG_PORT_3_CTRL_16 0xD4 +#define REG_PORT_4_CTRL_16 0xE4 +#define REG_PORT_5_CTRL_16 0xF4 +#define REG_PORT_1_CTRL_17 0xB5 +#define REG_PORT_2_CTRL_17 0xC5 +#define REG_PORT_3_CTRL_17 0xD5 +#define REG_PORT_4_CTRL_17 0xE5 +#define REG_PORT_5_CTRL_17 0xF5 + +#define REG_PORT_1_RATE_CTRL_3 0xB2 +#define REG_PORT_1_RATE_CTRL_2 0xB3 +#define REG_PORT_1_RATE_CTRL_1 0xB4 +#define REG_PORT_1_RATE_CTRL_0 0xB5 +#define REG_PORT_2_RATE_CTRL_3 0xC2 +#define REG_PORT_2_RATE_CTRL_2 0xC3 +#define REG_PORT_2_RATE_CTRL_1 0xC4 +#define REG_PORT_2_RATE_CTRL_0 0xC5 +#define REG_PORT_3_RATE_CTRL_3 0xD2 +#define REG_PORT_3_RATE_CTRL_2 0xD3 +#define REG_PORT_3_RATE_CTRL_1 0xD4 +#define REG_PORT_3_RATE_CTRL_0 0xD5 +#define REG_PORT_4_RATE_CTRL_3 0xE2 +#define REG_PORT_4_RATE_CTRL_2 0xE3 +#define REG_PORT_4_RATE_CTRL_1 0xE4 +#define REG_PORT_4_RATE_CTRL_0 0xE5 +#define REG_PORT_5_RATE_CTRL_3 0xF2 +#define REG_PORT_5_RATE_CTRL_2 0xF3 +#define REG_PORT_5_RATE_CTRL_1 0xF4 +#define REG_PORT_5_RATE_CTRL_0 0xF5 + +#define RATE_CTRL_ENABLE BIT(7) +#define RATE_RATIO_M (BIT(7) - 1) + +#define PORT_OUT_RATE_ENABLE BIT(7) + +#define REG_PORT_1_RATE_LIMIT 0xB6 +#define REG_PORT_2_RATE_LIMIT 0xC6 +#define REG_PORT_3_RATE_LIMIT 0xD6 +#define REG_PORT_4_RATE_LIMIT 0xE6 +#define REG_PORT_5_RATE_LIMIT 0xF6 + +#define PORT_IN_PORT_BASED_S 6 +#define PORT_RATE_PACKET_BASED_S 5 +#define PORT_IN_FLOW_CTRL_S 4 +#define PORT_IN_LIMIT_MODE_M 0x3 +#define PORT_IN_LIMIT_MODE_S 2 +#define PORT_COUNT_IFG_S 1 +#define PORT_COUNT_PREAMBLE_S 0 +#define PORT_IN_PORT_BASED BIT(PORT_IN_PORT_BASED_S) +#define PORT_RATE_PACKET_BASED BIT(PORT_RATE_PACKET_BASED_S) +#define PORT_IN_FLOW_CTRL BIT(PORT_IN_FLOW_CTRL_S) +#define PORT_IN_ALL 0 +#define PORT_IN_UNICAST 1 +#define PORT_IN_MULTICAST 2 +#define PORT_IN_BROADCAST 3 +#define PORT_COUNT_IFG BIT(PORT_COUNT_IFG_S) +#define PORT_COUNT_PREAMBLE BIT(PORT_COUNT_PREAMBLE_S) + +#define REG_PORT_1_IN_RATE_0 0xB7 +#define REG_PORT_2_IN_RATE_0 0xC7 +#define REG_PORT_3_IN_RATE_0 0xD7 +#define REG_PORT_4_IN_RATE_0 0xE7 +#define REG_PORT_5_IN_RATE_0 0xF7 +#define REG_PORT_1_IN_RATE_1 0xB8 +#define REG_PORT_2_IN_RATE_1 0xC8 +#define REG_PORT_3_IN_RATE_1 0xD8 +#define REG_PORT_4_IN_RATE_1 0xE8 +#define REG_PORT_5_IN_RATE_1 0xF8 +#define REG_PORT_1_IN_RATE_2 0xB9 +#define REG_PORT_2_IN_RATE_2 0xC9 +#define REG_PORT_3_IN_RATE_2 0xD9 +#define REG_PORT_4_IN_RATE_2 0xE9 +#define REG_PORT_5_IN_RATE_2 0xF9 +#define REG_PORT_1_IN_RATE_3 0xBA +#define REG_PORT_2_IN_RATE_3 0xCA +#define REG_PORT_3_IN_RATE_3 0xDA +#define REG_PORT_4_IN_RATE_3 0xEA +#define REG_PORT_5_IN_RATE_3 0xFA + +#define PORT_IN_RATE_ENABLE BIT(7) +#define PORT_RATE_LIMIT_M (BIT(7) - 1) + +#define REG_PORT_1_OUT_RATE_0 0xBB +#define REG_PORT_2_OUT_RATE_0 0xCB +#define REG_PORT_3_OUT_RATE_0 0xDB +#define REG_PORT_4_OUT_RATE_0 0xEB +#define REG_PORT_5_OUT_RATE_0 0xFB +#define REG_PORT_1_OUT_RATE_1 0xBC +#define REG_PORT_2_OUT_RATE_1 0xCC +#define REG_PORT_3_OUT_RATE_1 0xDC +#define REG_PORT_4_OUT_RATE_1 0xEC +#define REG_PORT_5_OUT_RATE_1 0xFC +#define REG_PORT_1_OUT_RATE_2 0xBD +#define REG_PORT_2_OUT_RATE_2 0xCD +#define REG_PORT_3_OUT_RATE_2 0xDD +#define REG_PORT_4_OUT_RATE_2 0xED +#define REG_PORT_5_OUT_RATE_2 0xFD +#define REG_PORT_1_OUT_RATE_3 0xBE +#define REG_PORT_2_OUT_RATE_3 0xCE +#define REG_PORT_3_OUT_RATE_3 0xDE +#define REG_PORT_4_OUT_RATE_3 0xEE +#define REG_PORT_5_OUT_RATE_3 0xFE + +/* PME */ + +#define SW_PME_OUTPUT_ENABLE BIT(1) +#define SW_PME_ACTIVE_HIGH BIT(0) + +#define PORT_MAGIC_PACKET_DETECT BIT(2) +#define PORT_LINK_UP_DETECT BIT(1) +#define PORT_ENERGY_DETECT BIT(0) + +/* ACL */ + +#define ACL_FIRST_RULE_M 0xF + +#define ACL_MODE_M 0x3 +#define ACL_MODE_S 4 +#define ACL_MODE_DISABLE 0 +#define ACL_MODE_LAYER_2 1 +#define ACL_MODE_LAYER_3 2 +#define ACL_MODE_LAYER_4 3 +#define ACL_ENABLE_M 0x3 +#define ACL_ENABLE_S 2 +#define ACL_ENABLE_2_COUNT 0 +#define ACL_ENABLE_2_TYPE 1 +#define ACL_ENABLE_2_MAC 2 +#define ACL_ENABLE_2_BOTH 3 +#define ACL_ENABLE_3_IP 1 +#define ACL_ENABLE_3_SRC_DST_COMP 2 +#define ACL_ENABLE_4_PROTOCOL 0 +#define ACL_ENABLE_4_TCP_PORT_COMP 1 +#define ACL_ENABLE_4_UDP_PORT_COMP 2 +#define ACL_ENABLE_4_TCP_SEQN_COMP 3 +#define ACL_SRC BIT(1) +#define ACL_EQUAL BIT(0) + +#define ACL_MAX_PORT 0xFFFF + +#define ACL_MIN_PORT 0xFFFF +#define ACL_IP_ADDR 0xFFFFFFFF +#define ACL_TCP_SEQNUM 0xFFFFFFFF + +#define ACL_RESERVED 0xF8 +#define ACL_PORT_MODE_M 0x3 +#define ACL_PORT_MODE_S 1 +#define ACL_PORT_MODE_DISABLE 0 +#define ACL_PORT_MODE_EITHER 1 +#define ACL_PORT_MODE_IN_RANGE 2 +#define ACL_PORT_MODE_OUT_OF_RANGE 3 + +#define ACL_TCP_FLAG_ENABLE BIT(0) + +#define ACL_TCP_FLAG_M 0xFF + +#define ACL_TCP_FLAG 0xFF +#define ACL_ETH_TYPE 0xFFFF +#define ACL_IP_M 0xFFFFFFFF + +#define ACL_PRIO_MODE_M 0x3 +#define ACL_PRIO_MODE_S 6 +#define ACL_PRIO_MODE_DISABLE 0 +#define ACL_PRIO_MODE_HIGHER 1 +#define ACL_PRIO_MODE_LOWER 2 +#define ACL_PRIO_MODE_REPLACE 3 +#define ACL_PRIO_M 0x7 +#define ACL_PRIO_S 3 +#define ACL_VLAN_PRIO_REPLACE BIT(2) +#define ACL_VLAN_PRIO_M 0x7 +#define ACL_VLAN_PRIO_HI_M 0x3 + +#define ACL_VLAN_PRIO_LO_M 0x8 +#define ACL_VLAN_PRIO_S 7 +#define ACL_MAP_MODE_M 0x3 +#define ACL_MAP_MODE_S 5 +#define ACL_MAP_MODE_DISABLE 0 +#define ACL_MAP_MODE_OR 1 +#define ACL_MAP_MODE_AND 2 +#define ACL_MAP_MODE_REPLACE 3 +#define ACL_MAP_PORT_M 0x1F + +#define ACL_CNT_M (BIT(11) - 1) +#define ACL_CNT_S 5 +#define ACL_MSEC_UNIT BIT(4) +#define ACL_INTR_MODE BIT(3) + +#define REG_PORT_ACL_BYTE_EN_MSB 0x10 + +#define ACL_BYTE_EN_MSB_M 0x3F + +#define REG_PORT_ACL_BYTE_EN_LSB 0x11 + +#define ACL_ACTION_START 0xA +#define ACL_ACTION_LEN 2 +#define ACL_INTR_CNT_START 0xB +#define ACL_RULESET_START 0xC +#define ACL_RULESET_LEN 2 +#define ACL_TABLE_LEN 14 + +#define ACL_ACTION_ENABLE 0x000C +#define ACL_MATCH_ENABLE 0x1FF0 +#define ACL_RULESET_ENABLE 0x2003 +#define ACL_BYTE_ENABLE ((ACL_BYTE_EN_MSB_M << 8) | 0xFF) +#define ACL_MODE_ENABLE (0x10 << 8) + +#define REG_PORT_ACL_CTRL_0 0x12 + +#define PORT_ACL_WRITE_DONE BIT(6) +#define PORT_ACL_READ_DONE BIT(5) +#define PORT_ACL_WRITE BIT(4) +#define PORT_ACL_INDEX_M 0xF + +#define REG_PORT_ACL_CTRL_1 0x13 + +#define PORT_ACL_FORCE_DLR_MISS BIT(0) + +#ifndef PHY_REG_CTRL +#define PHY_REG_CTRL 0 + +#define PHY_RESET BIT(15) +#define PHY_LOOPBACK BIT(14) +#define PHY_SPEED_100MBIT BIT(13) +#define PHY_AUTO_NEG_ENABLE BIT(12) +#define PHY_POWER_DOWN BIT(11) +#define PHY_MII_DISABLE BIT(10) +#define PHY_AUTO_NEG_RESTART BIT(9) +#define PHY_FULL_DUPLEX BIT(8) +#define PHY_COLLISION_TEST_NOT BIT(7) +#define PHY_HP_MDIX BIT(5) +#define PHY_FORCE_MDIX BIT(4) +#define PHY_AUTO_MDIX_DISABLE BIT(3) +#define PHY_REMOTE_FAULT_DISABLE BIT(2) +#define PHY_TRANSMIT_DISABLE BIT(1) +#define PHY_LED_DISABLE BIT(0) + +#define PHY_REG_STATUS 1 + +#define PHY_100BT4_CAPABLE BIT(15) +#define PHY_100BTX_FD_CAPABLE BIT(14) +#define PHY_100BTX_CAPABLE BIT(13) +#define PHY_10BT_FD_CAPABLE BIT(12) +#define PHY_10BT_CAPABLE BIT(11) +#define PHY_MII_SUPPRESS_CAPABLE_NOT BIT(6) +#define PHY_AUTO_NEG_ACKNOWLEDGE BIT(5) +#define PHY_REMOTE_FAULT BIT(4) +#define PHY_AUTO_NEG_CAPABLE BIT(3) +#define PHY_LINK_STATUS BIT(2) +#define PHY_JABBER_DETECT_NOT BIT(1) +#define PHY_EXTENDED_CAPABILITY BIT(0) + +#define PHY_REG_ID_1 2 +#define PHY_REG_ID_2 3 + +#define PHY_REG_AUTO_NEGOTIATION 4 + +#define PHY_AUTO_NEG_NEXT_PAGE_NOT BIT(15) +#define PHY_AUTO_NEG_REMOTE_FAULT_NOT BIT(13) +#define PHY_AUTO_NEG_SYM_PAUSE BIT(10) +#define PHY_AUTO_NEG_100BT4 BIT(9) +#define PHY_AUTO_NEG_100BTX_FD BIT(8) +#define PHY_AUTO_NEG_100BTX BIT(7) +#define PHY_AUTO_NEG_10BT_FD BIT(6) +#define PHY_AUTO_NEG_10BT BIT(5) +#define PHY_AUTO_NEG_SELECTOR 0x001F +#define PHY_AUTO_NEG_802_3 0x0001 + +#define PHY_REG_REMOTE_CAPABILITY 5 + +#define PHY_REMOTE_NEXT_PAGE_NOT BIT(15) +#define PHY_REMOTE_ACKNOWLEDGE_NOT BIT(14) +#define PHY_REMOTE_REMOTE_FAULT_NOT BIT(13) +#define PHY_REMOTE_SYM_PAUSE BIT(10) +#define PHY_REMOTE_100BTX_FD BIT(8) +#define PHY_REMOTE_100BTX BIT(7) +#define PHY_REMOTE_10BT_FD BIT(6) +#define PHY_REMOTE_10BT BIT(5) +#endif + +#define KSZ8795_ID_HI 0x0022 +#define KSZ8795_ID_LO 0x1550 + +#define KSZ8795_SW_ID 0x8795 + +#define PHY_REG_LINK_MD 0x1D + +#define PHY_START_CABLE_DIAG BIT(15) +#define PHY_CABLE_DIAG_RESULT 0x6000 +#define PHY_CABLE_STAT_NORMAL 0x0000 +#define PHY_CABLE_STAT_OPEN 0x2000 +#define PHY_CABLE_STAT_SHORT 0x4000 +#define PHY_CABLE_STAT_FAILED 0x6000 +#define PHY_CABLE_10M_SHORT BIT(12) +#define PHY_CABLE_FAULT_COUNTER 0x01FF + +#define PHY_REG_PHY_CTRL 0x1F + +#define PHY_MODE_M 0x7 +#define PHY_MODE_S 8 +#define PHY_STAT_REVERSED_POLARITY BIT(5) +#define PHY_STAT_MDIX BIT(4) +#define PHY_FORCE_LINK BIT(3) +#define PHY_POWER_SAVING_ENABLE BIT(2) +#define PHY_REMOTE_LOOPBACK BIT(1) + +/* Chip resource */ + +#define PRIO_QUEUES 4 + +#define KS_PRIO_IN_REG 4 + +#define TOTAL_PORT_NUM 5 + +/* Host port can only be last of them. */ +#define SWITCH_PORT_NUM (TOTAL_PORT_NUM - 1) + +#define KSZ8795_COUNTER_NUM 0x20 +#define TOTAL_KSZ8795_COUNTER_NUM (KSZ8795_COUNTER_NUM + 4) + +#define SWITCH_COUNTER_NUM KSZ8795_COUNTER_NUM +#define TOTAL_SWITCH_COUNTER_NUM TOTAL_KSZ8795_COUNTER_NUM + +/* Common names used by other drivers */ + +#define P_BCAST_STORM_CTRL REG_PORT_CTRL_0 +#define P_PRIO_CTRL REG_PORT_CTRL_0 +#define P_TAG_CTRL REG_PORT_CTRL_0 +#define P_MIRROR_CTRL REG_PORT_CTRL_1 +#define P_802_1P_CTRL REG_PORT_CTRL_2 +#define P_STP_CTRL REG_PORT_CTRL_2 +#define P_LOCAL_CTRL REG_PORT_CTRL_7 +#define P_REMOTE_STATUS REG_PORT_STATUS_0 +#define P_FORCE_CTRL REG_PORT_CTRL_9 +#define P_NEG_RESTART_CTRL REG_PORT_CTRL_10 +#define P_SPEED_STATUS REG_PORT_STATUS_1 +#define P_LINK_STATUS REG_PORT_STATUS_2 +#define P_PASS_ALL_CTRL REG_PORT_CTRL_12 +#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12 +#define P_DROP_TAG_CTRL REG_PORT_CTRL_13 +#define P_RATE_LIMIT_CTRL REG_PORT_RATE_LIMIT + +#define S_UNKNOWN_DA_CTRL REG_SWITCH_CTRL_12 +#define S_FORWARD_INVALID_VID_CTRL REG_FORWARD_INVALID_VID + +#define S_FLUSH_TABLE_CTRL REG_SW_CTRL_0 +#define S_LINK_AGING_CTRL REG_SW_CTRL_0 +#define S_HUGE_PACKET_CTRL REG_SW_CTRL_1 +#define S_MIRROR_CTRL REG_SW_CTRL_3 +#define S_REPLACE_VID_CTRL REG_SW_CTRL_4 +#define S_PASS_PAUSE_CTRL REG_SW_CTRL_10 +#define S_TAIL_TAG_CTRL REG_SW_CTRL_10 +#define S_802_1P_PRIO_CTRL REG_SW_CTRL_12 +#define S_TOS_PRIO_CTRL REG_TOS_PRIO_CTRL_0 +#define S_IPV6_MLD_CTRL REG_SW_CTRL_21 + +#define IND_ACC_TABLE(table) ((table) << 8) + +/* Driver set switch broadcast storm protection at 10% rate. */ +#define BROADCAST_STORM_PROT_RATE 10 + +/* 148,800 frames * 67 ms / 100 */ +#define BROADCAST_STORM_VALUE 9969 + +/** + * STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF + * STATIC_MAC_TABLE_FWD_PORTS 00-001F0000-00000000 + * STATIC_MAC_TABLE_VALID 00-00200000-00000000 + * STATIC_MAC_TABLE_OVERRIDE 00-00400000-00000000 + * STATIC_MAC_TABLE_USE_FID 00-00800000-00000000 + * STATIC_MAC_TABLE_FID 00-7F000000-00000000 + */ + +#define STATIC_MAC_TABLE_ADDR 0x0000FFFF +#define STATIC_MAC_TABLE_FWD_PORTS 0x001F0000 +#define STATIC_MAC_TABLE_VALID 0x00200000 +#define STATIC_MAC_TABLE_OVERRIDE 0x00400000 +#define STATIC_MAC_TABLE_USE_FID 0x00800000 +#define STATIC_MAC_TABLE_FID 0x7F000000 + +#define STATIC_MAC_FWD_PORTS_S 16 +#define STATIC_MAC_FID_S 24 + +/** + * VLAN_TABLE_FID 00-007F007F-007F007F + * VLAN_TABLE_MEMBERSHIP 00-0F800F80-0F800F80 + * VLAN_TABLE_VALID 00-10001000-10001000 + */ + +#define VLAN_TABLE_FID 0x007F +#define VLAN_TABLE_MEMBERSHIP 0x0F80 +#define VLAN_TABLE_VALID 0x1000 + +#define VLAN_TABLE_MEMBERSHIP_S 7 +#define VLAN_TABLE_S 16 + +/** + * DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF + * DYNAMIC_MAC_TABLE_FID 00-007F0000-00000000 + * DYNAMIC_MAC_TABLE_NOT_READY 00-00800000-00000000 + * DYNAMIC_MAC_TABLE_SRC_PORT 00-07000000-00000000 + * DYNAMIC_MAC_TABLE_TIMESTAMP 00-18000000-00000000 + * DYNAMIC_MAC_TABLE_ENTRIES 7F-E0000000-00000000 + * DYNAMIC_MAC_TABLE_MAC_EMPTY 80-00000000-00000000 + */ + +#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF +#define DYNAMIC_MAC_TABLE_FID 0x007F0000 +#define DYNAMIC_MAC_TABLE_SRC_PORT 0x07000000 +#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x18000000 +#define DYNAMIC_MAC_TABLE_ENTRIES 0xE0000000 + +#define DYNAMIC_MAC_TABLE_NOT_READY 0x80 + +#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x7F +#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x80 + +#define DYNAMIC_MAC_FID_S 16 +#define DYNAMIC_MAC_SRC_PORT_S 24 +#define DYNAMIC_MAC_TIMESTAMP_S 27 +#define DYNAMIC_MAC_ENTRIES_S 29 +#define DYNAMIC_MAC_ENTRIES_H_S 3 + +/** + * MIB_COUNTER_VALUE 00-00000000-3FFFFFFF + * MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF + * MIB_PACKET_DROPPED 00-00000000-0000FFFF + * MIB_COUNTER_VALID 00-00000020-00000000 + * MIB_COUNTER_OVERFLOW 00-00000040-00000000 + */ + +#define MIB_COUNTER_OVERFLOW BIT(6) +#define MIB_COUNTER_VALID BIT(5) + +#define MIB_COUNTER_VALUE 0x3FFFFFFF + +#define KS_MIB_TOTAL_RX_0 0x100 +#define KS_MIB_TOTAL_TX_0 0x101 +#define KS_MIB_PACKET_DROPPED_RX_0 0x102 +#define KS_MIB_PACKET_DROPPED_TX_0 0x103 +#define KS_MIB_TOTAL_RX_1 0x104 +#define KS_MIB_TOTAL_TX_1 0x105 +#define KS_MIB_PACKET_DROPPED_TX_1 0x106 +#define KS_MIB_PACKET_DROPPED_RX_1 0x107 +#define KS_MIB_TOTAL_RX_2 0x108 +#define KS_MIB_TOTAL_TX_2 0x109 +#define KS_MIB_PACKET_DROPPED_TX_2 0x10A +#define KS_MIB_PACKET_DROPPED_RX_2 0x10B +#define KS_MIB_TOTAL_RX_3 0x10C +#define KS_MIB_TOTAL_TX_3 0x10D +#define KS_MIB_PACKET_DROPPED_TX_3 0x10E +#define KS_MIB_PACKET_DROPPED_RX_3 0x10F +#define KS_MIB_TOTAL_RX_4 0x110 +#define KS_MIB_TOTAL_TX_4 0x111 +#define KS_MIB_PACKET_DROPPED_TX_4 0x112 +#define KS_MIB_PACKET_DROPPED_RX_4 0x113 + +#define MIB_PACKET_DROPPED 0x0000FFFF + +#define MIB_TOTAL_BYTES_H 0x0000000F + +#define TAIL_TAG_OVERRIDE BIT(6) +#define TAIL_TAG_LOOKUP BIT(7) + +#define VLAN_TABLE_ENTRIES (4096 / 4) +#define FID_ENTRIES 128 + +#endif diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c new file mode 100644 index 000000000000..d0f8153e86b7 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Microchip KSZ8795 series register access through SPI + * + * Copyright (C) 2017 Microchip Technology Inc. + * Tristram Ha <Tristram.Ha@microchip.com> + */ + +#include <asm/unaligned.h> + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/spi/spi.h> + +#include "ksz_common.h" + +#define SPI_ADDR_SHIFT 12 +#define SPI_ADDR_ALIGN 3 +#define SPI_TURNAROUND_SHIFT 1 + +KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT, + SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN); + +static int ksz8795_spi_probe(struct spi_device *spi) +{ + struct ksz_device *dev; + int i, ret; + + dev = ksz_switch_alloc(&spi->dev, spi); + if (!dev) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) { + dev->regmap[i] = devm_regmap_init_spi(spi, + &ksz8795_regmap_config + [i]); + if (IS_ERR(dev->regmap[i])) { + ret = PTR_ERR(dev->regmap[i]); + dev_err(&spi->dev, + "Failed to initialize regmap%i: %d\n", + ksz8795_regmap_config[i].val_bits, ret); + return ret; + } + } + + if (spi->dev.platform_data) + dev->pdata = spi->dev.platform_data; + + ret = ksz8795_switch_register(dev); + + /* Main DSA driver may not be started yet. */ + if (ret) + return ret; + + spi_set_drvdata(spi, dev); + + return 0; +} + +static int ksz8795_spi_remove(struct spi_device *spi) +{ + struct ksz_device *dev = spi_get_drvdata(spi); + + if (dev) + ksz_switch_remove(dev); + + return 0; +} + +static void ksz8795_spi_shutdown(struct spi_device *spi) +{ + struct ksz_device *dev = spi_get_drvdata(spi); + + if (dev && dev->dev_ops->shutdown) + dev->dev_ops->shutdown(dev); +} + +static const struct of_device_id ksz8795_dt_ids[] = { + { .compatible = "microchip,ksz8765" }, + { .compatible = "microchip,ksz8794" }, + { .compatible = "microchip,ksz8795" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ksz8795_dt_ids); + +static struct spi_driver ksz8795_spi_driver = { + .driver = { + .name = "ksz8795-switch", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(ksz8795_dt_ids), + }, + .probe = ksz8795_spi_probe, + .remove = ksz8795_spi_remove, + .shutdown = ksz8795_spi_shutdown, +}; + +module_spi_driver(ksz8795_spi_driver); + +MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>"); +MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index a8c97f7a79b7..187be42de5f1 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -14,7 +14,6 @@ #include <net/dsa.h> #include <net/switchdev.h> -#include "ksz_priv.h" #include "ksz9477_reg.h" #include "ksz_common.h" diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index 5a9e27b337a8..a226b389e12d 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -13,7 +13,6 @@ #include <linux/regmap.h> #include <linux/spi/spi.h> -#include "ksz_priv.h" #include "ksz_common.h" #define SPI_ADDR_SHIFT 24 diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index a3d2d67894bd..b45c7b972cec 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -18,17 +18,7 @@ #include <net/dsa.h> #include <net/switchdev.h> -#include "ksz_priv.h" - -void ksz_port_cleanup(struct ksz_device *dev, int port) -{ - /* Common code for port cleanup. */ - mutex_lock(&dev->dev_mutex); - dev->on_ports &= ~(1 << port); - dev->live_ports &= ~(1 << port); - mutex_unlock(&dev->dev_mutex); -} -EXPORT_SYMBOL_GPL(ksz_port_cleanup); +#include "ksz_common.h" void ksz_update_port_member(struct ksz_device *dev, int port) { @@ -373,7 +363,8 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) /* setup slave port */ dev->dev_ops->port_setup(dev, port, false); - dev->dev_ops->phy_setup(dev, port, phy); + if (dev->dev_ops->phy_setup) + dev->dev_ops->phy_setup(dev, port, phy); /* port_stp_state_set() will be called after to enable the port so * there is no need to do anything. diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index ee7096d8af07..c44a8d23d973 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -7,9 +7,152 @@ #ifndef __KSZ_COMMON_H #define __KSZ_COMMON_H +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/phy.h> #include <linux/regmap.h> +#include <net/dsa.h> + +struct vlan_table { + u32 table[3]; +}; + +struct ksz_port_mib { + struct mutex cnt_mutex; /* structure access */ + u8 cnt_ptr; + u64 *counters; +}; + +struct ksz_port { + u16 member; + u16 vid_member; + int stp_state; + struct phy_device phydev; + + u32 on:1; /* port is not disabled by hardware */ + u32 phy:1; /* port has a PHY */ + u32 fiber:1; /* port is fiber */ + u32 sgmii:1; /* port is SGMII */ + u32 force:1; + u32 read:1; /* read MIB counters in background */ + u32 freeze:1; /* MIB counter freeze is enabled */ + + struct ksz_port_mib mib; +}; + +struct ksz_device { + struct dsa_switch *ds; + struct ksz_platform_data *pdata; + const char *name; + + struct mutex dev_mutex; /* device access */ + struct mutex stats_mutex; /* status access */ + struct mutex alu_mutex; /* ALU access */ + struct mutex vlan_mutex; /* vlan access */ + const struct ksz_dev_ops *dev_ops; + + struct device *dev; + struct regmap *regmap[3]; + + void *priv; + + struct gpio_desc *reset_gpio; /* Optional reset GPIO */ + + /* chip specific data */ + u32 chip_id; + int num_vlans; + int num_alus; + int num_statics; + int cpu_port; /* port connected to CPU */ + int cpu_ports; /* port bitmap can be cpu port */ + int phy_port_cnt; + int port_cnt; + int reg_mib_cnt; + int mib_cnt; + int mib_port_cnt; + int last_port; /* ports after that not used */ + phy_interface_t interface; + u32 regs_size; + bool phy_errata_9477; + bool synclko_125; + + struct vlan_table *vlan_cache; + + struct ksz_port *ports; + struct timer_list mib_read_timer; + struct work_struct mib_read; + unsigned long mib_read_interval; + u16 br_member; + u16 member; + u16 live_ports; + u16 on_ports; /* ports enabled by DSA */ + u16 rx_ports; + u16 tx_ports; + u16 mirror_rx; + u16 mirror_tx; + u32 features; /* chip specific features */ + u32 overrides; /* chip functions set by user */ + u16 host_mask; + u16 port_mask; +}; + +struct alu_struct { + /* entry 1 */ + u8 is_static:1; + u8 is_src_filter:1; + u8 is_dst_filter:1; + u8 prio_age:3; + u32 _reserv_0_1:23; + u8 mstp:3; + /* entry 2 */ + u8 is_override:1; + u8 is_use_fid:1; + u32 _reserv_1_1:23; + u8 port_forward:7; + /* entry 3 & 4*/ + u32 _reserv_2_1:9; + u8 fid:7; + u8 mac[ETH_ALEN]; +}; + +struct ksz_dev_ops { + u32 (*get_port_addr)(int port, int offset); + void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); + void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); + void (*phy_setup)(struct ksz_device *dev, int port, + struct phy_device *phy); + void (*port_cleanup)(struct ksz_device *dev, int port); + void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); + void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); + void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val); + int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr, + u8 *fid, u8 *src_port, u8 *timestamp, + u16 *entries); + int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr, + struct alu_struct *alu); + void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr, + struct alu_struct *alu); + void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr, + u64 *cnt); + void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt); + void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze); + void (*port_init_cnt)(struct ksz_device *dev, int port); + int (*shutdown)(struct ksz_device *dev); + int (*detect)(struct ksz_device *dev); + int (*init)(struct ksz_device *dev); + void (*exit)(struct ksz_device *dev); +}; + +struct ksz_device *ksz_switch_alloc(struct device *base, void *priv); +int ksz_switch_register(struct ksz_device *dev, + const struct ksz_dev_ops *ops); +void ksz_switch_remove(struct ksz_device *dev); + +int ksz8795_switch_register(struct ksz_device *dev); +int ksz9477_switch_register(struct ksz_device *dev); -void ksz_port_cleanup(struct ksz_device *dev, int port); void ksz_update_port_member(struct ksz_device *dev, int port); void ksz_init_mib_timer(struct ksz_device *dev); @@ -68,6 +211,22 @@ static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val) return ret; } +static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val) +{ + u32 value[2]; + int ret; + + ret = regmap_bulk_read(dev->regmap[2], reg, value, 2); + if (!ret) { + /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */ + value[0] = swab32(value[0]); + value[1] = swab32(value[1]); + *val = swab64((u64)*value); + } + + return ret; +} + static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value) { return regmap_write(dev->regmap[0], reg, value); @@ -83,6 +242,18 @@ static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value) return regmap_write(dev->regmap[2], reg, value); } +static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value) +{ + u32 val[2]; + + /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */ + value = swab64(value); + val[0] = swab32(value & 0xffffffffULL); + val[1] = swab32(value >> 32ULL); + + return regmap_bulk_write(dev->regmap[2], reg, val, 2); +} + static inline void ksz_pread8(struct ksz_device *dev, int port, int offset, u8 *data) { diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h deleted file mode 100644 index beacf0e40f42..000000000000 --- a/drivers/net/dsa/microchip/ksz_priv.h +++ /dev/null @@ -1,155 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Microchip KSZ series switch common definitions - * - * Copyright (C) 2017-2019 Microchip Technology Inc. - */ - -#ifndef __KSZ_PRIV_H -#define __KSZ_PRIV_H - -#include <linux/kernel.h> -#include <linux/mutex.h> -#include <linux/phy.h> -#include <linux/etherdevice.h> -#include <net/dsa.h> - -struct vlan_table { - u32 table[3]; -}; - -struct ksz_port_mib { - struct mutex cnt_mutex; /* structure access */ - u8 cnt_ptr; - u64 *counters; -}; - -struct ksz_port { - u16 member; - u16 vid_member; - int stp_state; - struct phy_device phydev; - - u32 on:1; /* port is not disabled by hardware */ - u32 phy:1; /* port has a PHY */ - u32 fiber:1; /* port is fiber */ - u32 sgmii:1; /* port is SGMII */ - u32 force:1; - u32 read:1; /* read MIB counters in background */ - u32 freeze:1; /* MIB counter freeze is enabled */ - - struct ksz_port_mib mib; -}; - -struct ksz_device { - struct dsa_switch *ds; - struct ksz_platform_data *pdata; - const char *name; - - struct mutex dev_mutex; /* device access */ - struct mutex stats_mutex; /* status access */ - struct mutex alu_mutex; /* ALU access */ - struct mutex vlan_mutex; /* vlan access */ - const struct ksz_dev_ops *dev_ops; - - struct device *dev; - struct regmap *regmap[3]; - - void *priv; - - struct gpio_desc *reset_gpio; /* Optional reset GPIO */ - - /* chip specific data */ - u32 chip_id; - int num_vlans; - int num_alus; - int num_statics; - int cpu_port; /* port connected to CPU */ - int cpu_ports; /* port bitmap can be cpu port */ - int phy_port_cnt; - int port_cnt; - int reg_mib_cnt; - int mib_cnt; - int mib_port_cnt; - int last_port; /* ports after that not used */ - phy_interface_t interface; - u32 regs_size; - bool phy_errata_9477; - bool synclko_125; - - struct vlan_table *vlan_cache; - - struct ksz_port *ports; - struct timer_list mib_read_timer; - struct work_struct mib_read; - unsigned long mib_read_interval; - u16 br_member; - u16 member; - u16 live_ports; - u16 on_ports; /* ports enabled by DSA */ - u16 rx_ports; - u16 tx_ports; - u16 mirror_rx; - u16 mirror_tx; - u32 features; /* chip specific features */ - u32 overrides; /* chip functions set by user */ - u16 host_mask; - u16 port_mask; -}; - -struct alu_struct { - /* entry 1 */ - u8 is_static:1; - u8 is_src_filter:1; - u8 is_dst_filter:1; - u8 prio_age:3; - u32 _reserv_0_1:23; - u8 mstp:3; - /* entry 2 */ - u8 is_override:1; - u8 is_use_fid:1; - u32 _reserv_1_1:23; - u8 port_forward:7; - /* entry 3 & 4*/ - u32 _reserv_2_1:9; - u8 fid:7; - u8 mac[ETH_ALEN]; -}; - -struct ksz_dev_ops { - u32 (*get_port_addr)(int port, int offset); - void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); - void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); - void (*phy_setup)(struct ksz_device *dev, int port, - struct phy_device *phy); - void (*port_cleanup)(struct ksz_device *dev, int port); - void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); - void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); - void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val); - int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr, - u8 *fid, u8 *src_port, u8 *timestamp, - u16 *entries); - int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr, - struct alu_struct *alu); - void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr, - struct alu_struct *alu); - void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr, - u64 *cnt); - void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr, - u64 *dropped, u64 *cnt); - void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze); - void (*port_init_cnt)(struct ksz_device *dev, int port); - int (*shutdown)(struct ksz_device *dev); - int (*detect)(struct ksz_device *dev); - int (*init)(struct ksz_device *dev); - void (*exit)(struct ksz_device *dev); -}; - -struct ksz_device *ksz_switch_alloc(struct device *base, void *priv); -int ksz_switch_register(struct ksz_device *dev, - const struct ksz_dev_ops *ops); -void ksz_switch_remove(struct ksz_device *dev); - -int ksz9477_switch_register(struct ksz_device *dev); - -#endif diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d0a97eb73a37..d3804ffd3d2a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1336,9 +1336,7 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) { DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); - struct mv88e6xxx_vtu_entry vlan = { - .vid = chip->info->max_vid, - }; + struct mv88e6xxx_vtu_entry vlan; int i, err; bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); @@ -1353,6 +1351,9 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) } /* Set every FID bit used by the VLAN entries */ + vlan.vid = chip->info->max_vid; + vlan.valid = false; + do { err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) @@ -1375,51 +1376,11 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) return mv88e6xxx_g1_atu_flush(chip, *fid, true); } -static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, - struct mv88e6xxx_vtu_entry *entry, bool new) -{ - int err; - - if (!vid) - return -EOPNOTSUPP; - - entry->vid = vid - 1; - entry->valid = false; - - err = mv88e6xxx_vtu_getnext(chip, entry); - if (err) - return err; - - if (entry->vid == vid && entry->valid) - return 0; - - if (new) { - int i; - - /* Initialize a fresh VLAN entry */ - memset(entry, 0, sizeof(*entry)); - entry->valid = true; - entry->vid = vid; - - /* Exclude all ports */ - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) - entry->member[i] = - MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER; - - return mv88e6xxx_atu_new(chip, &entry->fid); - } - - /* switchdev expects -EOPNOTSUPP to honor software VLANs */ - return -EOPNOTSUPP; -} - static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, u16 vid_begin, u16 vid_end) { struct mv88e6xxx_chip *chip = ds->priv; - struct mv88e6xxx_vtu_entry vlan = { - .vid = vid_begin - 1, - }; + struct mv88e6xxx_vtu_entry vlan; int i, err; /* DSA and CPU ports have to be members of multiple vlans */ @@ -1429,12 +1390,13 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, if (!vid_begin) return -EOPNOTSUPP; - mv88e6xxx_reg_lock(chip); + vlan.vid = vid_begin - 1; + vlan.valid = false; do { err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) - goto unlock; + return err; if (!vlan.valid) break; @@ -1463,15 +1425,11 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n", port, vlan.vid, i, netdev_name(dsa_to_port(ds, i)->bridge_dev)); - err = -EOPNOTSUPP; - goto unlock; + return -EOPNOTSUPP; } } while (vlan.vid < vid_end); -unlock: - mv88e6xxx_reg_unlock(chip); - - return err; + return 0; } static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, @@ -1505,38 +1463,51 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, /* If the requested port doesn't belong to the same bridge as the VLAN * members, do not support it (yet) and fallback to software VLAN. */ + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin, vlan->vid_end); - if (err) - return err; + mv88e6xxx_reg_unlock(chip); /* We don't need any dynamic resource from the kernel (yet), * so skip the prepare phase. */ - return 0; + return err; } static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, const unsigned char *addr, u16 vid, u8 state) { - struct mv88e6xxx_vtu_entry vlan; struct mv88e6xxx_atu_entry entry; + struct mv88e6xxx_vtu_entry vlan; + u16 fid; int err; /* Null VLAN ID corresponds to the port private database */ - if (vid == 0) - err = mv88e6xxx_port_get_fid(chip, port, &vlan.fid); - else - err = mv88e6xxx_vtu_get(chip, vid, &vlan, false); - if (err) - return err; + if (vid == 0) { + err = mv88e6xxx_port_get_fid(chip, port, &fid); + if (err) + return err; + } else { + vlan.vid = vid - 1; + vlan.valid = false; + + err = mv88e6xxx_vtu_getnext(chip, &vlan); + if (err) + return err; + + /* switchdev expects -EOPNOTSUPP to honor software VLANs */ + if (vlan.vid != vid || !vlan.valid) + return -EOPNOTSUPP; + + fid = vlan.fid; + } entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED; ether_addr_copy(entry.mac, addr); eth_addr_dec(entry.mac); - err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry); + err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry); if (err) return err; @@ -1557,7 +1528,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, entry.state = state; } - return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry); + return mv88e6xxx_g1_atu_loadpurge(chip, fid, &entry); } static int mv88e6xxx_port_add_broadcast(struct mv88e6xxx_chip *chip, int port, @@ -1583,23 +1554,58 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid) return 0; } -static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port, +static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port, u16 vid, u8 member) { + const u8 non_member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER; struct mv88e6xxx_vtu_entry vlan; - int err; + int i, err; - err = mv88e6xxx_vtu_get(chip, vid, &vlan, true); - if (err) - return err; + if (!vid) + return -EOPNOTSUPP; - vlan.member[port] = member; + vlan.vid = vid - 1; + vlan.valid = false; - err = mv88e6xxx_vtu_loadpurge(chip, &vlan); + err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) return err; - return mv88e6xxx_broadcast_setup(chip, vid); + if (vlan.vid != vid || !vlan.valid) { + memset(&vlan, 0, sizeof(vlan)); + + err = mv88e6xxx_atu_new(chip, &vlan.fid); + if (err) + return err; + + for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) + if (i == port) + vlan.member[i] = member; + else + vlan.member[i] = non_member; + + vlan.vid = vid; + vlan.valid = true; + + err = mv88e6xxx_vtu_loadpurge(chip, &vlan); + if (err) + return err; + + err = mv88e6xxx_broadcast_setup(chip, vlan.vid); + if (err) + return err; + } else if (vlan.member[port] != member) { + vlan.member[port] = member; + + err = mv88e6xxx_vtu_loadpurge(chip, &vlan); + if (err) + return err; + } else { + dev_info(chip->dev, "p%d: already a member of VLAN %d\n", + port, vid); + } + + return 0; } static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, @@ -1624,7 +1630,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, mv88e6xxx_reg_lock(chip); for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) - if (_mv88e6xxx_port_vlan_add(chip, port, vid, member)) + if (mv88e6xxx_port_vlan_join(chip, port, vid, member)) dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port, vid, untagged ? 'u' : 't'); @@ -1635,18 +1641,27 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, mv88e6xxx_reg_unlock(chip); } -static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip, - int port, u16 vid) +static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip, + int port, u16 vid) { struct mv88e6xxx_vtu_entry vlan; int i, err; - err = mv88e6xxx_vtu_get(chip, vid, &vlan, false); + if (!vid) + return -EOPNOTSUPP; + + vlan.vid = vid - 1; + vlan.valid = false; + + err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) return err; - /* Tell switchdev if this VLAN is handled in software */ - if (vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) + /* If the VLAN doesn't exist in hardware or the port isn't a member, + * tell switchdev that this VLAN is likely handled in software. + */ + if (vlan.vid != vid || !vlan.valid || + vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) return -EOPNOTSUPP; vlan.member[port] = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER; @@ -1685,7 +1700,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, goto unlock; for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - err = _mv88e6xxx_port_vlan_del(chip, port, vid); + err = mv88e6xxx_port_vlan_leave(chip, port, vid); if (err) goto unlock; @@ -1768,9 +1783,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, dsa_fdb_dump_cb_t *cb, void *data) { - struct mv88e6xxx_vtu_entry vlan = { - .vid = chip->info->max_vid, - }; + struct mv88e6xxx_vtu_entry vlan; u16 fid; int err; @@ -1784,6 +1797,9 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, return err; /* Dump VLANs' Filtering Information Databases */ + vlan.vid = chip->info->max_vid; + vlan.valid = false; + do { err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) @@ -2227,9 +2243,11 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) return err; } - err = mv88e6xxx_setup_message_port(chip, port); - if (err) - return err; + if (chip->info->ops->port_setup_message_port) { + err = chip->info->ops->port_setup_message_port(chip, port); + if (err) + return err; + } /* Port based VLAN map: give each port the same default address * database, and allow bidirectional communication between the @@ -2444,6 +2462,14 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) /* Setup Switch Port Registers */ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { + /* Prevent the use of an invalid port. */ + if (mv88e6xxx_is_invalid_port(chip, i) && + !dsa_is_unused_port(ds, i)) { + dev_err(chip->dev, "port %d is invalid\n", i); + err = -EINVAL; + goto unlock; + } + if (dsa_is_unused_port(ds, i)) { err = mv88e6xxx_port_set_state(chip, i, BR_STATE_DISABLED); @@ -2773,6 +2799,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2807,6 +2834,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .port_set_upstream_port = mv88e6095_port_set_upstream_port, .port_link_state = mv88e6185_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2843,6 +2871,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2877,6 +2906,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2914,6 +2944,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .port_set_pause = mv88e6185_port_set_pause, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2958,6 +2989,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -2998,6 +3030,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3031,6 +3064,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3072,6 +3106,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3113,6 +3148,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3155,6 +3191,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3196,6 +3233,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3234,6 +3272,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .port_set_pause = mv88e6185_port_set_pause, .port_link_state = mv88e6185_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3276,6 +3315,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3321,6 +3361,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390x_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3366,6 +3407,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3413,6 +3455,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3471,6 +3514,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = { .reset = mv88e6250_g1_reset, .vtu_getnext = mv88e6250_g1_vtu_getnext, .vtu_loadpurge = mv88e6250_g1_vtu_loadpurge, + .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6250_ptp_ops, .phylink_validate = mv88e6065_phylink_validate, }; @@ -3498,6 +3543,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3545,6 +3591,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3588,6 +3635,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3631,6 +3679,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3674,6 +3723,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3713,6 +3763,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3756,6 +3807,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3808,6 +3860,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3857,6 +3910,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390x_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -4235,6 +4289,33 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .ops = &mv88e6191_ops, }, + [MV88E6220] = { + .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6220, + .family = MV88E6XXX_FAMILY_6250, + .name = "Marvell 88E6220", + .num_databases = 64, + + /* Ports 2-4 are not routed to pins + * => usable ports 0, 1, 5, 6 + */ + .num_ports = 7, + .num_internal_phys = 2, + .invalid_port_mask = BIT(2) | BIT(3) | BIT(4), + .max_vid = 4095, + .port_base_addr = 0x08, + .phy_base_addr = 0x00, + .global1_addr = 0x0f, + .global2_addr = 0x07, + .age_time_coeff = 15000, + .g1_irqs = 9, + .g2_irqs = 10, + .atu_move_port_mask = 0xf, + .dual_chip = true, + .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, + .ops = &mv88e6250_ops, + }, + [MV88E6240] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6240, .family = MV88E6XXX_FAMILY_6352, @@ -4277,6 +4358,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .dual_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, .ops = &mv88e6250_ops, }, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 4646e46d47f2..8c6d3c906197 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -57,6 +57,7 @@ enum mv88e6xxx_model { MV88E6190, MV88E6190X, MV88E6191, + MV88E6220, MV88E6240, MV88E6250, MV88E6290, @@ -77,7 +78,7 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */ MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */ MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */ - MV88E6XXX_FAMILY_6250, /* 6250 */ + MV88E6XXX_FAMILY_6250, /* 6220 6250 */ MV88E6XXX_FAMILY_6320, /* 6320 6321 */ MV88E6XXX_FAMILY_6341, /* 6141 6341 */ MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */ @@ -105,6 +106,11 @@ struct mv88e6xxx_info { unsigned int g2_irqs; bool pvt; + /* Mark certain ports as invalid. This is required for example for the + * MV88E6220 (which is in general a MV88E6250 with 7 ports) but the + * ports 2-4 are not routet to pins. + */ + unsigned int invalid_port_mask; /* Multi-chip Addressing Mode. * Some chips respond to only 2 registers of its own SMI device address * when it is non-zero, and use indirect access to internal registers. @@ -389,6 +395,7 @@ struct mv88e6xxx_ops { u8 out); int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port); int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port); + int (*port_setup_message_port)(struct mv88e6xxx_chip *chip, int port); /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc. * Some chips allow this to be configured on specific ports. @@ -532,6 +539,10 @@ struct mv88e6xxx_ptp_ops { int arr1_sts_reg; int dep_sts_reg; u32 rx_filters; + u32 cc_shift; + u32 cc_mult; + u32 cc_mult_num; + u32 cc_mult_dem; }; #define STATS_TYPE_PORT BIT(0) @@ -570,6 +581,11 @@ static inline unsigned int mv88e6xxx_num_gpio(struct mv88e6xxx_chip *chip) return chip->info->num_gpio; } +static inline bool mv88e6xxx_is_invalid_port(struct mv88e6xxx_chip *chip, int port) +{ + return (chip->info->invalid_port_mask & BIT(port)) != 0; +} + int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val); int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 8d5a6cd6fb19..ceec771f8bfc 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -117,6 +117,7 @@ #define MV88E6XXX_PORT_SWITCH_ID_PROD_6190 0x1900 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70 +#define MV88E6XXX_PORT_SWITCH_ID_PROD_6220 0x2200 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6250 0x2500 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6290 0x2900 diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index 768d256f7c9f..073cbd0bb91b 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -15,11 +15,31 @@ #include "hwtstamp.h" #include "ptp.h" -/* Raw timestamps are in units of 8-ns clock periods. */ -#define CC_SHIFT 28 -#define CC_MULT (8 << CC_SHIFT) -#define CC_MULT_NUM (1 << 9) -#define CC_MULT_DEM 15625ULL +#define MV88E6XXX_MAX_ADJ_PPB 1000000 + +/* Family MV88E6250: + * Raw timestamps are in units of 10-ns clock periods. + * + * clkadj = scaled_ppm * 10*2^28 / (10^6 * 2^16) + * simplifies to + * clkadj = scaled_ppm * 2^7 / 5^5 + */ +#define MV88E6250_CC_SHIFT 28 +#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT) +#define MV88E6250_CC_MULT_NUM (1 << 7) +#define MV88E6250_CC_MULT_DEM 3125ULL + +/* Other families: + * Raw timestamps are in units of 8-ns clock periods. + * + * clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16) + * simplifies to + * clkadj = scaled_ppm * 2^9 / 5^6 + */ +#define MV88E6XXX_CC_SHIFT 28 +#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT) +#define MV88E6XXX_CC_MULT_NUM (1 << 9) +#define MV88E6XXX_CC_MULT_DEM 15625ULL #define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100) @@ -179,6 +199,7 @@ out: static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; int neg_adj = 0; u32 diff, mult; u64 adj; @@ -187,10 +208,11 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) neg_adj = 1; scaled_ppm = -scaled_ppm; } - mult = CC_MULT; - adj = CC_MULT_NUM; + + mult = ptp_ops->cc_mult; + adj = ptp_ops->cc_mult_num; adj *= scaled_ppm; - diff = div_u64(adj, CC_MULT_DEM); + diff = div_u64(adj, ptp_ops->cc_mult_dem); mv88e6xxx_reg_lock(chip); @@ -310,7 +332,27 @@ static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, return 0; } -const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { +const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { + .clock_read = mv88e6165_ptp_clock_read, + .global_enable = mv88e6165_global_enable, + .global_disable = mv88e6165_global_disable, + .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS, + .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), + .cc_shift = MV88E6XXX_CC_SHIFT, + .cc_mult = MV88E6XXX_CC_MULT, + .cc_mult_num = MV88E6XXX_CC_MULT_NUM, + .cc_mult_dem = MV88E6XXX_CC_MULT_DEM, +}; + +const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = { .clock_read = mv88e6352_ptp_clock_read, .ptp_enable = mv88e6352_ptp_enable, .ptp_verify = mv88e6352_ptp_verify, @@ -331,22 +373,37 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), + .cc_shift = MV88E6250_CC_SHIFT, + .cc_mult = MV88E6250_CC_MULT, + .cc_mult_num = MV88E6250_CC_MULT_NUM, + .cc_mult_dem = MV88E6250_CC_MULT_DEM, }; -const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { - .clock_read = mv88e6165_ptp_clock_read, - .global_enable = mv88e6165_global_enable, - .global_disable = mv88e6165_global_disable, - .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS, - .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS, - .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS, +const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { + .clock_read = mv88e6352_ptp_clock_read, + .ptp_enable = mv88e6352_ptp_enable, + .ptp_verify = mv88e6352_ptp_verify, + .event_work = mv88e6352_tai_event_work, + .port_enable = mv88e6352_hwtstamp_port_enable, + .port_disable = mv88e6352_hwtstamp_port_disable, + .n_ext_ts = 1, + .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS, .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), + .cc_shift = MV88E6XXX_CC_SHIFT, + .cc_mult = MV88E6XXX_CC_MULT, + .cc_mult_num = MV88E6XXX_CC_MULT_NUM, + .cc_mult_dem = MV88E6XXX_CC_MULT_DEM, }; static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) @@ -384,8 +441,8 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc)); chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read; chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32); - chip->tstamp_cc.mult = CC_MULT; - chip->tstamp_cc.shift = CC_SHIFT; + chip->tstamp_cc.mult = ptp_ops->cc_mult; + chip->tstamp_cc.shift = ptp_ops->cc_shift; timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc, ktime_to_ns(ktime_get_real())); @@ -397,7 +454,6 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) chip->ptp_clock_info.owner = THIS_MODULE; snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name), "%s", dev_name(chip->dev)); - chip->ptp_clock_info.max_adj = 1000000; chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts; chip->ptp_clock_info.n_per_out = 0; @@ -413,6 +469,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) } chip->ptp_clock_info.pin_config = chip->pin_config; + chip->ptp_clock_info.max_adj = MV88E6XXX_MAX_ADJ_PPB; chip->ptp_clock_info.adjfine = mv88e6xxx_ptp_adjfine; chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime; chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime; diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h index 0a1f8de8f062..269d5d16a466 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.h +++ b/drivers/net/dsa/mv88e6xxx/ptp.h @@ -148,8 +148,9 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip); #define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \ ptp_clock_info) -extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops; extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops; +extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops; +extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops; #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ @@ -167,8 +168,9 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) { } -static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {}; static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {}; +static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {}; +static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {}; #endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */ diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 147051404194..8785c2ff3825 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -847,8 +847,7 @@ static void poll_vortex(struct net_device *dev) static int vortex_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *ndev = pci_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); if (!ndev || !netif_running(ndev)) return 0; @@ -861,8 +860,7 @@ static int vortex_suspend(struct device *dev) static int vortex_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *ndev = pci_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); int err; if (!ndev || !netif_running(ndev)) @@ -2175,7 +2173,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr = skb_frag_dma_map(vp->gendev, frag, 0, - frag->size, + skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(vp->gendev, dma_addr)) { for(i = i-1; i >= 0; i--) diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 010a2f48aea5..2a9f8643629c 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -110,7 +110,7 @@ static void greth_print_tx_packet(struct sk_buff *skb) print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1, skb_frag_address(&skb_shinfo(skb)->frags[i]), - skb_shinfo(skb)->frags[i].size, true); + skb_frag_size(&skb_shinfo(skb)->frags[i]), true); } } diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index edbb4b3604c7..174344c450af 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -2426,7 +2426,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) u32 thiscopy, remainder; struct sk_buff *skb = tcb->skb; u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; - struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frags = &skb_shinfo(skb)->frags[0]; struct phy_device *phydev = adapter->netdev->phydev; dma_addr_t dma_addr; struct tx_ring *tx_ring = &adapter->tx_ring; @@ -2488,11 +2488,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) frag++; } } else { - desc[frag].len_vlan = frags[i - 1].size; + desc[frag].len_vlan = skb_frag_size(&frags[i - 1]); dma_addr = skb_frag_dma_map(&adapter->pdev->dev, &frags[i - 1], 0, - frags[i - 1].size, + desc[frag].len_vlan, DMA_TO_DEVICE); desc[frag].addr_lo = lower_32_bits(dma_addr); desc[frag].addr_hi = upper_32_bits(dma_addr); diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 650d1bae5f56..1793950f0582 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1100,7 +1100,6 @@ static int au1000_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "failed to retrieve IRQ\n"); err = -ENODEV; goto out; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 533094233659..230726d7b74f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -526,7 +526,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata; struct xgbe_packet_data *packet; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t skb_dma; unsigned int start_index, cur_index; unsigned int offset, tso, vlan, datalen, len; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 3dd0cecddba8..98f8f2033154 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1833,7 +1833,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, struct sk_buff *skb, struct xgbe_packet_data *packet) { - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int context_desc; unsigned int len; unsigned int i; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index d0f3dfb88202..dce9e59e8881 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -467,10 +467,8 @@ static int xgbe_platform_probe(struct platform_device *pdev) /* Get the device interrupt */ ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(dev, "platform_get_irq 0 failed\n"); + if (ret < 0) goto err_io; - } pdata->dev_irq = ret; /* Get the per channel DMA interrupts */ @@ -479,12 +477,8 @@ static int xgbe_platform_probe(struct platform_device *pdev) for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) { ret = platform_get_irq(pdata->platdev, dma_irqnum++); - if (ret < 0) { - netdev_err(pdata->netdev, - "platform_get_irq %u failed\n", - dma_irqnum - 1); + if (ret < 0) goto err_io; - } pdata->channel_irq[i] = ret; } @@ -496,10 +490,8 @@ static int xgbe_platform_probe(struct platform_device *pdev) /* Get the auto-negotiation interrupt */ ret = platform_get_irq(phy_pdev, phy_irqnum++); - if (ret < 0) { - dev_err(dev, "platform_get_irq phy 0 failed\n"); + if (ret < 0) goto err_io; - } pdata->an_irq = ret; /* Configure the netdev resource */ diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 79048cc46703..02b4f3af02b5 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -54,10 +54,8 @@ static int xge_get_resources(struct xge_pdata *pdata) } ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(dev, "Unable to get irq\n"); + if (ret < 0) return ret; - } pdata->resources.irq = ret; return 0; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 61a465097cb8..5f657879134e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -712,11 +712,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) udelay(5); } else { #ifdef CONFIG_ACPI - if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) { - acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), - "_RST", NULL, NULL); - } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), - "_INI")) { + acpi_status status; + + status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_RST", NULL, NULL); + if (ACPI_FAILURE(status)) { acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), "_INI", NULL, NULL); } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 10b1c053e70a..d8612131c55e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -340,7 +340,8 @@ static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo) nr_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < 2 && i < nr_frags; i++) - len += skb_shinfo(skb)->frags[i].size; + len += skb_frag_size( + &skb_shinfo(skb)->frags[i]); /* HW requires header must reside in 3 buffer */ if (unlikely(hdr_len > len)) { @@ -1616,7 +1617,6 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) { struct platform_device *pdev = pdata->pdev; - struct device *dev = &pdev->dev; int i, ret, max_irqs; if (phy_interface_mode_is_rgmii(pdata->phy_mode)) @@ -1636,9 +1636,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) pdata->cq_cnt = max_irqs / 2; break; } - dev_err(dev, "Unable to get ENET IRQ\n"); - ret = ret ? : -ENXIO; - return ret; + return ret ? : -ENXIO; } pdata->irqs[i] = ret; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index 6453fc2ebb1f..f482ced2cadd 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -460,12 +460,14 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p) } } else { #ifdef CONFIG_ACPI - if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST")) - acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), - "_RST", NULL, NULL); - else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI")) + acpi_status status; + + status = acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), + "_RST", NULL, NULL); + if (ACPI_FAILURE(status)) { acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), "_INI", NULL, NULL); + } #endif } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 133eb91c542e..304b5d43f236 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -393,11 +393,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) udelay(5); } else { #ifdef CONFIG_ACPI - if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) { - acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), - "_RST", NULL, NULL); - } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), - "_INI")) { + acpi_status status; + + status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_RST", NULL, NULL); + if (ACPI_FAILURE(status)) { acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), "_INI", NULL, NULL); } diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 6703960c7cf5..7548247455d7 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1148,7 +1148,7 @@ static int ag71xx_rings_init(struct ag71xx *ag) return -ENOMEM; } - rx->buf = &tx->buf[BIT(tx->order)]; + rx->buf = &tx->buf[tx_size]; rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; @@ -1686,7 +1686,7 @@ static int ag71xx_probe(struct platform_device *pdev) } ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start, - res->end - res->start + 1); + resource_size(res)); if (!ag->mac_base) { err = -ENOMEM; goto err_free; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index e3538ba7d0e7..d4bbcdfd691a 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1465,9 +1465,7 @@ static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb) tpd->len = cpu_to_le16(maplen); for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; if (++txq->write_idx == txq->count) txq->write_idx = 0; @@ -1879,8 +1877,7 @@ static void alx_remove(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int alx_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_priv *alx = dev_get_drvdata(dev); if (!netif_running(alx->dev)) return 0; @@ -1891,8 +1888,7 @@ static int alx_suspend(struct device *dev) static int alx_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_priv *alx = dev_get_drvdata(dev); struct alx_hw *hw = &alx->hw; int err; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index be7f9cebb675..2b239ecea05f 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2150,9 +2150,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter, } for (f = 0; f < nr_frags; f++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; use_tpd = atl1c_get_tpd(adapter, type); memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); @@ -2422,8 +2420,7 @@ static int atl1c_close(struct net_device *netdev) static int atl1c_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; u32 wufc = adapter->wol; @@ -2437,7 +2434,7 @@ static int atl1c_suspend(struct device *dev) if (wufc) if (atl1c_phy_to_ps_link(hw) != 0) - dev_dbg(&pdev->dev, "phy power saving failed"); + dev_dbg(dev, "phy power saving failed"); atl1c_power_saving(hw, wufc); @@ -2447,8 +2444,7 @@ static int atl1c_suspend(struct device *dev) #ifdef CONFIG_PM_SLEEP static int atl1c_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct atl1c_adapter *adapter = netdev_priv(netdev); AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 7f14e010bfeb..4f7b65825c15 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1770,11 +1770,10 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; u16 i; u16 seg_num; - frag = &skb_shinfo(skb)->frags[f]; buf_len = skb_frag_size(frag); seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index b5c6dc914720..b498fd6a47d0 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2256,10 +2256,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; u16 i, nseg; - frag = &skb_shinfo(skb)->frags[f]; buf_len = skb_frag_size(frag); nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / @@ -2754,8 +2753,7 @@ static int atl1_close(struct net_device *netdev) #ifdef CONFIG_PM_SLEEP static int atl1_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; u32 ctrl = 0; @@ -2780,7 +2778,7 @@ static int atl1_suspend(struct device *dev) val = atl1_get_speed_and_duplex(hw, &speed, &duplex); if (val) { if (netif_msg_ifdown(adapter)) - dev_printk(KERN_DEBUG, &pdev->dev, + dev_printk(KERN_DEBUG, dev, "error getting speed/duplex\n"); goto disable_wol; } @@ -2837,8 +2835,7 @@ static int atl1_suspend(struct device *dev) static int atl1_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct atl1_adapter *adapter = netdev_priv(netdev); iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 3b3370a94a9c..37752d9514e7 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -1351,10 +1351,8 @@ static int nb8800_probe(struct platform_device *pdev) ops = match->data; irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(&pdev->dev, "No IRQ\n"); + if (irq <= 0) return -EINVAL; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 291e4afd4a1a..620cd3fc1fbc 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1693,7 +1693,7 @@ static int bcm_enet_probe(struct platform_device *pdev) struct bcm_enet_priv *priv; struct net_device *dev; struct bcm63xx_enet_platform_data *pd; - struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; + struct resource *res_irq, *res_irq_rx, *res_irq_tx; struct mii_bus *bus; int i, ret; @@ -1719,8 +1719,7 @@ static int bcm_enet_probe(struct platform_device *pdev) if (ret) goto out; - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, res_mem); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto out; @@ -2762,15 +2761,13 @@ struct platform_driver bcm63xx_enetsw_driver = { /* reserve & remap memory space shared between all macs */ static int bcm_enet_shared_probe(struct platform_device *pdev) { - struct resource *res; void __iomem *p[3]; unsigned int i; memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); for (i = 0; i < 3; i++) { - res = platform_get_resource(pdev, IORESOURCE_MEM, i); - p[i] = devm_ioremap_resource(&pdev->dev, res); + p[i] = devm_platform_ioremap_resource(pdev, i); if (IS_ERR(p[i])) return PTR_ERR(p[i]); } diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 6dc0dd91ad11..c46c1b1416f7 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -199,10 +199,8 @@ static int bgmac_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "MAC address not present in device tree\n"); bgmac->irq = platform_get_irq(pdev, 0); - if (bgmac->irq < 0) { - dev_err(&pdev->dev, "Unable to obtain IRQ\n"); + if (bgmac->irq < 0) return bgmac->irq; - } regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base"); if (!regs) { diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 4632dd5dbad1..148734b166f0 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -172,7 +172,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, flags = 0; for (i = 0; i < nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = skb_frag_size(frag); index = (index + 1) % BGMAC_TX_RING_SLOTS; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index dfdd14eadd57..fbc196b480b6 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8673,8 +8673,7 @@ bnx2_remove_one(struct pci_dev *pdev) static int bnx2_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnx2 *bp = netdev_priv(dev); if (netif_running(dev)) { @@ -8693,8 +8692,7 @@ bnx2_suspend(struct device *device) static int bnx2_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnx2 *bp = netdev_priv(dev); if (!netif_running(dev)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7070349915bc..94be97b7952c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -116,6 +116,9 @@ enum board_idx { BCM57508, BCM57504, BCM57502, + BCM57508_NPAR, + BCM57504_NPAR, + BCM57502_NPAR, BCM58802, BCM58804, BCM58808, @@ -161,6 +164,9 @@ static const struct { [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, + [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, + [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, + [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, @@ -209,6 +215,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, + { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV @@ -828,16 +840,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, return 0; } -static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, - u32 agg_bufs) +static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + u16 cp_cons, u16 curr) +{ + struct rx_agg_cmp *agg; + + cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + return agg; +} + +static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 agg_id, u16 curr) +{ + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; + + return &tpa_info->agg_arr[curr]; +} + +static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, + u16 start, u32 agg_bufs, bool tpa) { struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt *bp = bnapi->bp; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; u16 sw_prod = rxr->rx_sw_agg_prod; + bool p5_tpa = false; u32 i; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) + p5_tpa = true; + for (i = 0; i < agg_bufs; i++) { u16 cons; struct rx_agg_cmp *agg; @@ -845,8 +882,10 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, struct rx_bd *prod_bd; struct page *page; - agg = (struct rx_agg_cmp *) - &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + if (p5_tpa) + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); + else + agg = bnxt_get_agg(bp, cpr, idx, start + i); cons = agg->rx_agg_cmp_opaque; __clear_bit(cons, rxr->rx_agg_bmap); @@ -874,7 +913,6 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, prod = NEXT_RX_AGG(prod); sw_prod = NEXT_RX_AGG(sw_prod); - cp_cons = NEXT_CMP(cp_cons); } rxr->rx_agg_prod = prod; rxr->rx_sw_agg_prod = sw_prod; @@ -888,7 +926,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, { unsigned int payload = offset_and_len >> 16; unsigned int len = offset_and_len & 0xffff; - struct skb_frag_struct *frag; + skb_frag_t *frag; struct page *page = data; u16 prod = rxr->rx_prod; struct sk_buff *skb; @@ -919,7 +957,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, frag = &skb_shinfo(skb)->frags[0]; skb_frag_size_sub(frag, payload); - frag->page_offset += payload; + skb_frag_off_add(frag, payload); skb->data_len -= payload; skb->tail += payload; @@ -957,15 +995,19 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - struct sk_buff *skb, u16 cp_cons, - u32 agg_bufs) + struct sk_buff *skb, u16 idx, + u32 agg_bufs, bool tpa) { struct bnxt_napi *bnapi = cpr->bnapi; struct pci_dev *pdev = bp->pdev; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; + bool p5_tpa = false; u32 i; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) + p5_tpa = true; + for (i = 0; i < agg_bufs; i++) { u16 cons, frag_len; struct rx_agg_cmp *agg; @@ -973,8 +1015,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct page *page; dma_addr_t mapping; - agg = (struct rx_agg_cmp *) - &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + if (p5_tpa) + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); + else + agg = bnxt_get_agg(bp, cpr, idx, i); cons = agg->rx_agg_cmp_opaque; frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; @@ -1008,7 +1052,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, * allocated already. */ rxr->rx_agg_prod = prod; - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i); + bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); return NULL; } @@ -1021,7 +1065,6 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, skb->truesize += PAGE_SIZE; prod = NEXT_RX_AGG(prod); - cp_cons = NEXT_CMP(cp_cons); } rxr->rx_agg_prod = prod; return skb; @@ -1081,9 +1124,10 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { struct rx_tpa_end_cmp *tpa_end = cmp; - agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_AGG_BUFS) >> - RX_TPA_END_CMP_AGG_BUFS_SHIFT; + if (bp->flags & BNXT_FLAG_CHIP_P5) + return 0; + + agg_bufs = TPA_END_AGG_BUFS(tpa_end); } if (agg_bufs) { @@ -1120,26 +1164,60 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) rxr->rx_next_cons = 0xffff; } +static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + u16 idx = agg_id & MAX_TPA_P5_MASK; + + if (test_bit(idx, map->agg_idx_bmap)) + idx = find_first_zero_bit(map->agg_idx_bmap, + BNXT_AGG_IDX_BMAP_SIZE); + __set_bit(idx, map->agg_idx_bmap); + map->agg_id_tbl[agg_id] = idx; + return idx; +} + +static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + + __clear_bit(idx, map->agg_idx_bmap); +} + +static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + + return map->agg_id_tbl[agg_id]; +} + static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, struct rx_tpa_start_cmp *tpa_start, struct rx_tpa_start_cmp_ext *tpa_start1) { - u8 agg_id = TPA_START_AGG_ID(tpa_start); - u16 cons, prod; - struct bnxt_tpa_info *tpa_info; struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct bnxt_tpa_info *tpa_info; + u16 cons, prod, agg_id; struct rx_bd *prod_bd; dma_addr_t mapping; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + agg_id = TPA_START_AGG_ID_P5(tpa_start); + agg_id = bnxt_alloc_agg_idx(rxr, agg_id); + } else { + agg_id = TPA_START_AGG_ID(tpa_start); + } cons = tpa_start->rx_tpa_start_cmp_opaque; prod = rxr->rx_prod; cons_rx_buf = &rxr->rx_buf_ring[cons]; prod_rx_buf = &rxr->rx_buf_ring[prod]; tpa_info = &rxr->rx_tpa[agg_id]; - if (unlikely(cons != rxr->rx_next_cons)) { - netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", - cons, rxr->rx_next_cons); + if (unlikely(cons != rxr->rx_next_cons || + TPA_START_ERROR(tpa_start))) { + netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", + cons, rxr->rx_next_cons, + TPA_START_ERROR_CODE(tpa_start1)); bnxt_sched_reset(bp, rxr); return; } @@ -1184,6 +1262,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); + tpa_info->agg_count = 0; rxr->rx_prod = NEXT_RX(prod); cons = NEXT_RX(cons); @@ -1195,13 +1274,37 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, cons_rx_buf->data = NULL; } -static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons, - u32 agg_bufs) +static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) { if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); } +#ifdef CONFIG_INET +static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) +{ + struct udphdr *uh = NULL; + + if (ip_proto == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + + if (iph->nexthdr == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } + if (uh) { + if (uh->check) + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; + else + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } +} +#endif + static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, int payload_off, int tcp_ts, struct sk_buff *skb) @@ -1259,28 +1362,39 @@ static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, } if (inner_mac_off) { /* tunnel */ - struct udphdr *uh = NULL; __be16 proto = *((__be16 *)(skb->data + outer_ip_off - ETH_HLEN - 2)); - if (proto == htons(ETH_P_IP)) { - struct iphdr *iph = (struct iphdr *)skb->data; + bnxt_gro_tunnel(skb, proto); + } +#endif + return skb; +} - if (iph->protocol == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } else { - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; +static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, + int payload_off, int tcp_ts, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + u16 outer_ip_off, inner_ip_off, inner_mac_off; + u32 hdr_info = tpa_info->hdr_info; + int iphdr_len, nw_off; - if (iph->nexthdr == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } - if (uh) { - if (uh->check) - skb_shinfo(skb)->gso_type |= - SKB_GSO_UDP_TUNNEL_CSUM; - else - skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; - } + inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); + inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); + outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); + + nw_off = inner_ip_off - ETH_HLEN; + skb_set_network_header(skb, nw_off); + iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? + sizeof(struct ipv6hdr) : sizeof(struct iphdr); + skb_set_transport_header(skb, nw_off + iphdr_len); + + if (inner_mac_off) { /* tunnel */ + __be16 proto = *((__be16 *)(skb->data + outer_ip_off - + ETH_HLEN - 2)); + + bnxt_gro_tunnel(skb, proto); } #endif return skb; @@ -1327,28 +1441,8 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, return NULL; } - if (nw_off) { /* tunnel */ - struct udphdr *uh = NULL; - - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = (struct iphdr *)skb->data; - - if (iph->protocol == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } else { - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; - - if (iph->nexthdr == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } - if (uh) { - if (uh->check) - skb_shinfo(skb)->gso_type |= - SKB_GSO_UDP_TUNNEL_CSUM; - else - skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; - } - } + if (nw_off) /* tunnel */ + bnxt_gro_tunnel(skb, skb->protocol); #endif return skb; } @@ -1371,9 +1465,10 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, skb_shinfo(skb)->gso_size = le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); skb_shinfo(skb)->gso_type = tpa_info->gso_type; - payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_PAYLOAD_OFFSET) >> - RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; + if (bp->flags & BNXT_FLAG_CHIP_P5) + payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); + else + payload_off = TPA_END_PAYLOAD_OFF(tpa_end); skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); if (likely(skb)) tcp_gro_complete(skb); @@ -1401,14 +1496,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, { struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - u8 agg_id = TPA_END_AGG_ID(tpa_end); u8 *data_ptr, agg_bufs; - u16 cp_cons = RING_CMP(*raw_cons); unsigned int len; struct bnxt_tpa_info *tpa_info; dma_addr_t mapping; struct sk_buff *skb; + u16 idx = 0, agg_id; void *data; + bool gro; if (unlikely(bnapi->in_reset)) { int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); @@ -1418,26 +1513,43 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return NULL; } - tpa_info = &rxr->rx_tpa[agg_id]; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + agg_id = TPA_END_AGG_ID_P5(tpa_end); + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); + agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); + tpa_info = &rxr->rx_tpa[agg_id]; + if (unlikely(agg_bufs != tpa_info->agg_count)) { + netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", + agg_bufs, tpa_info->agg_count); + agg_bufs = tpa_info->agg_count; + } + tpa_info->agg_count = 0; + *event |= BNXT_AGG_EVENT; + bnxt_free_agg_idx(rxr, agg_id); + idx = agg_id; + gro = !!(bp->flags & BNXT_FLAG_GRO); + } else { + agg_id = TPA_END_AGG_ID(tpa_end); + agg_bufs = TPA_END_AGG_BUFS(tpa_end); + tpa_info = &rxr->rx_tpa[agg_id]; + idx = RING_CMP(*raw_cons); + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) + return ERR_PTR(-EBUSY); + + *event |= BNXT_AGG_EVENT; + idx = NEXT_CMP(idx); + } + gro = !!TPA_END_GRO(tpa_end); + } data = tpa_info->data; data_ptr = tpa_info->data_ptr; prefetch(data_ptr); len = tpa_info->len; mapping = tpa_info->mapping; - agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; - - if (agg_bufs) { - if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) - return ERR_PTR(-EBUSY); - - *event |= BNXT_AGG_EVENT; - cp_cons = NEXT_CMP(cp_cons); - } - if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); if (agg_bufs > MAX_SKB_FRAGS) netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", agg_bufs, (int)MAX_SKB_FRAGS); @@ -1447,7 +1559,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (len <= bp->rx_copy_thresh) { skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } } else { @@ -1456,7 +1568,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } @@ -1471,7 +1583,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!skb) { kfree(data); - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } skb_reserve(skb, bp->rx_offset); @@ -1479,7 +1591,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs); + skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ return NULL; @@ -1508,12 +1620,24 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; } - if (TPA_END_GRO(tpa_end)) + if (gro) skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); return skb; } +static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + struct rx_agg_cmp *rx_agg) +{ + u16 agg_id = TPA_AGG_AGG_ID(rx_agg); + struct bnxt_tpa_info *tpa_info; + + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); + tpa_info = &rxr->rx_tpa[agg_id]; + BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); + tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; +} + static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, struct sk_buff *skb) { @@ -1555,6 +1679,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rxcmp = (struct rx_cmp *) &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + cmp_type = RX_CMP_TYPE(rxcmp); + + if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { + bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); + goto next_rx_no_prod_no_len; + } + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); cp_cons = RING_CMP(tmp_raw_cons); rxcmp1 = (struct rx_cmp_ext *) @@ -1563,8 +1694,6 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) return -EBUSY; - cmp_type = RX_CMP_TYPE(rxcmp); - prod = rxr->rx_prod; if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { @@ -1623,7 +1752,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, bnxt_reuse_rx_data(rxr, cons, data); if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, + false); rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { @@ -1646,7 +1776,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, + agg_bufs, false); rc = -ENOMEM; goto next_rx; } @@ -1666,7 +1797,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs); + skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); if (!skb) { rc = -ENOMEM; goto next_rx; @@ -2325,10 +2456,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_tpa_idx_map *map; int j; if (rxr->rx_tpa) { - for (j = 0; j < MAX_TPA; j++) { + for (j = 0; j < bp->max_tpa; j++) { struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[j]; u8 *data = tpa_info->data; @@ -2395,6 +2527,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) __free_page(rxr->rx_page); rxr->rx_page = NULL; } + map = rxr->rx_tpa_idx_map; + if (map) + memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); } } @@ -2483,6 +2618,61 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) return 0; } +static void bnxt_free_tpa_info(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + + kfree(rxr->rx_tpa_idx_map); + rxr->rx_tpa_idx_map = NULL; + if (rxr->rx_tpa) { + kfree(rxr->rx_tpa[0].agg_arr); + rxr->rx_tpa[0].agg_arr = NULL; + } + kfree(rxr->rx_tpa); + rxr->rx_tpa = NULL; + } +} + +static int bnxt_alloc_tpa_info(struct bnxt *bp) +{ + int i, j, total_aggs = 0; + + bp->max_tpa = MAX_TPA; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (!bp->max_tpa_v2) + return 0; + bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); + total_aggs = bp->max_tpa * MAX_SKB_FRAGS; + } + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct rx_agg_cmp *agg; + + rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), + GFP_KERNEL); + if (!rxr->rx_tpa) + return -ENOMEM; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + continue; + agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); + rxr->rx_tpa[0].agg_arr = agg; + if (!agg) + return -ENOMEM; + for (j = 1; j < bp->max_tpa; j++) + rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), + GFP_KERNEL); + if (!rxr->rx_tpa_idx_map) + return -ENOMEM; + } + return 0; +} + static void bnxt_free_rx_rings(struct bnxt *bp) { int i; @@ -2490,6 +2680,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp) if (!bp->rx_ring) return; + bnxt_free_tpa_info(bp); for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; @@ -2503,9 +2694,6 @@ static void bnxt_free_rx_rings(struct bnxt *bp) page_pool_destroy(rxr->page_pool); rxr->page_pool = NULL; - kfree(rxr->rx_tpa); - rxr->rx_tpa = NULL; - kfree(rxr->rx_agg_bmap); rxr->rx_agg_bmap = NULL; @@ -2539,7 +2727,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, static int bnxt_alloc_rx_rings(struct bnxt *bp) { - int i, rc, agg_rings = 0, tpa_rings = 0; + int i, rc = 0, agg_rings = 0; if (!bp->rx_ring) return -ENOMEM; @@ -2547,9 +2735,6 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) if (bp->flags & BNXT_FLAG_AGG_RINGS) agg_rings = 1; - if (bp->flags & BNXT_FLAG_TPA) - tpa_rings = 1; - for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; @@ -2591,17 +2776,11 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); if (!rxr->rx_agg_bmap) return -ENOMEM; - - if (tpa_rings) { - rxr->rx_tpa = kcalloc(MAX_TPA, - sizeof(struct bnxt_tpa_info), - GFP_KERNEL); - if (!rxr->rx_tpa) - return -ENOMEM; - } } } - return 0; + if (bp->flags & BNXT_FLAG_TPA) + rc = bnxt_alloc_tpa_info(bp); + return rc; } static void bnxt_free_tx_rings(struct bnxt *bp) @@ -2953,7 +3132,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) u8 *data; dma_addr_t mapping; - for (i = 0; i < MAX_TPA; i++) { + for (i = 0; i < bp->max_tpa; i++) { data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL); if (!data) @@ -3468,7 +3647,7 @@ static void bnxt_free_ring_stats(struct bnxt *bp) if (!bp->bnapi) return; - size = sizeof(struct ctx_hw_stats); + size = bp->hw_ring_stats_size; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; @@ -3487,7 +3666,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) u32 size, i; struct pci_dev *pdev = bp->pdev; - size = sizeof(struct ctx_hw_stats); + size = bp->hw_ring_stats_size; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; @@ -4414,6 +4593,7 @@ static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; struct hwrm_vnic_tpa_cfg_input req = {0}; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) @@ -4453,9 +4633,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) nsegs = (MAX_SKB_FRAGS - n) / n; } - segs = ilog2(nsegs); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + segs = MAX_TPA_SEGS_P5; + max_aggs = bp->max_tpa; + } else { + segs = ilog2(nsegs); + } req.max_agg_segs = cpu_to_le16(segs); - req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); + req.max_aggs = cpu_to_le16(max_aggs); req.min_agg_len = cpu_to_le32(512); } @@ -4815,6 +5000,12 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (flags & VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; + bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); + if (bp->max_tpa_v2) + bp->hw_ring_stats_size = + sizeof(struct ctx_hw_stats_ext); + else + bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -6012,6 +6203,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); + req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); mutex_lock(&bp->hwrm_cmd_lock); @@ -9292,7 +9484,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (changes & BNXT_FLAG_TPA) { update_tpa = true; if ((bp->flags & BNXT_FLAG_TPA) == 0 || - (flags & BNXT_FLAG_TPA) == 0) + (flags & BNXT_FLAG_TPA) == 0 || + (bp->flags & BNXT_FLAG_CHIP_P5)) re_init = true; } @@ -9302,9 +9495,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (flags != bp->flags) { u32 old_flags = bp->flags; - bp->flags = flags; - if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + bp->flags = flags; if (update_tpa) bnxt_set_ring_params(bp); return rc; @@ -9312,12 +9504,14 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (re_init) { bnxt_close_nic(bp, false, false); + bp->flags = flags; if (update_tpa) bnxt_set_ring_params(bp); return bnxt_open_nic(bp, false, false); } if (update_tpa) { + bp->flags = flags; rc = bnxt_set_tpa(bp, (flags & BNXT_FLAG_TPA) ? true : false); @@ -9714,6 +9908,68 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } +static int bnxt_fw_init_one_p1(struct bnxt *bp) +{ + int rc; + + bp->fw_cap = 0; + rc = bnxt_hwrm_ver_get(bp); + if (rc) + return rc; + + if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { + rc = bnxt_alloc_kong_hwrm_resources(bp); + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; + } + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { + rc = bnxt_alloc_hwrm_short_cmd_req(bp); + if (rc) + return rc; + } + rc = bnxt_hwrm_func_reset(bp); + if (rc) + return -ENODEV; + + bnxt_hwrm_fw_set_time(bp); + return 0; +} + +static int bnxt_fw_init_one_p2(struct bnxt *bp) +{ + int rc; + + /* Get the MAX capabilities for this function */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", + rc); + return -ENODEV; + } + + rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", + rc); + + rc = bnxt_hwrm_func_drv_rgtr(bp); + if (rc) + return -ENODEV; + + rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); + if (rc) + return -ENODEV; + + bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_vnic_qcaps(bp); + bnxt_hwrm_port_led_qcaps(bp); + bnxt_ethtool_init(bp); + bnxt_dcb_init(bp); + return 0; +} + static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) { int rc; @@ -10669,32 +10925,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_err_pci_clean; mutex_init(&bp->hwrm_cmd_lock); - rc = bnxt_hwrm_ver_get(bp); + + rc = bnxt_fw_init_one_p1(bp); if (rc) goto init_err_pci_clean; - if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { - rc = bnxt_alloc_kong_hwrm_resources(bp); - if (rc) - bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; - } - - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { - rc = bnxt_alloc_hwrm_short_cmd_req(bp); - if (rc) - goto init_err_pci_clean; - } - if (BNXT_CHIP_P5(bp)) bp->flags |= BNXT_FLAG_CHIP_P5; - rc = bnxt_hwrm_func_reset(bp); + rc = bnxt_fw_init_one_p2(bp); if (rc) goto init_err_pci_clean; - bnxt_hwrm_fw_set_time(bp); - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | @@ -10732,41 +10974,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->gro_func = bnxt_gro_func_5730x; if (BNXT_CHIP_P4(bp)) bp->gro_func = bnxt_gro_func_5731x; + else if (BNXT_CHIP_P5(bp)) + bp->gro_func = bnxt_gro_func_5750x; } if (!BNXT_CHIP_P4_PLUS(bp)) bp->flags |= BNXT_FLAG_DOUBLE_DB; - rc = bnxt_hwrm_func_drv_rgtr(bp); - if (rc) - goto init_err_pci_clean; - - rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); - if (rc) - goto init_err_pci_clean; - bp->ulp_probe = bnxt_ulp_probe; - rc = bnxt_hwrm_queue_qportcfg(bp); - if (rc) { - netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", - rc); - rc = -1; - goto init_err_pci_clean; - } - /* Get the MAX capabilities for this function */ - rc = bnxt_hwrm_func_qcaps(bp); - if (rc) { - netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", - rc); - rc = -1; - goto init_err_pci_clean; - } - - rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); - if (rc) - netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", - rc); - rc = bnxt_init_mac_addr(bp); if (rc) { dev_err(&pdev->dev, "Unable to initialize mac address.\n"); @@ -10780,11 +10995,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; } - bnxt_hwrm_func_qcfg(bp); - bnxt_hwrm_vnic_qcaps(bp); - bnxt_hwrm_port_led_qcaps(bp); - bnxt_ethtool_init(bp); - bnxt_dcb_init(bp); /* MTU range: 60 - FW defined max */ dev->min_mtu = ETH_ZLEN; @@ -10920,8 +11130,7 @@ shutdown_exit: #ifdef CONFIG_PM_SLEEP static int bnxt_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnxt *bp = netdev_priv(dev); int rc = 0; @@ -10937,8 +11146,7 @@ static int bnxt_suspend(struct device *device) static int bnxt_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnxt *bp = netdev_priv(dev); int rc = 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 16694b704d15..e3262089b751 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -113,6 +113,7 @@ struct tx_cmp { #define CMP_TYPE_RX_AGG_CMP 18 #define CMP_TYPE_RX_L2_TPA_START_CMP 19 #define CMP_TYPE_RX_L2_TPA_END_CMP 21 + #define CMP_TYPE_RX_TPA_AGG_CMP 22 #define CMP_TYPE_STATUS_CMP 32 #define CMP_TYPE_REMOTE_DRIVER_REQ 34 #define CMP_TYPE_REMOTE_DRIVER_RESP 36 @@ -263,14 +264,21 @@ struct rx_agg_cmp { u32 rx_agg_cmp_opaque; __le32 rx_agg_cmp_v; #define RX_AGG_CMP_V (1 << 0) + #define RX_AGG_CMP_AGG_ID (0xffff << 16) + #define RX_AGG_CMP_AGG_ID_SHIFT 16 __le32 rx_agg_cmp_unused; }; +#define TPA_AGG_AGG_ID(rx_agg) \ + ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \ + RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT) + struct rx_tpa_start_cmp { __le32 rx_tpa_start_cmp_len_flags_type; #define RX_TPA_START_CMP_TYPE (0x3f << 0) #define RX_TPA_START_CMP_FLAGS (0x3ff << 6) #define RX_TPA_START_CMP_FLAGS_SHIFT 6 + #define RX_TPA_START_CMP_FLAGS_ERROR (0x1 << 6) #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7) #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7) @@ -278,6 +286,7 @@ struct rx_tpa_start_cmp { #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10) + #define RX_TPA_START_CMP_FLAGS_TIMESTAMP (0x1 << 11) #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12) #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12 #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12) @@ -291,6 +300,8 @@ struct rx_tpa_start_cmp { #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9 #define RX_TPA_START_CMP_AGG_ID (0x7f << 25) #define RX_TPA_START_CMP_AGG_ID_SHIFT 25 + #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16) + #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16 __le32 rx_tpa_start_cmp_rss_hash; }; @@ -308,6 +319,14 @@ struct rx_tpa_start_cmp { ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT) +#define TPA_START_AGG_ID_P5(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5) + +#define TPA_START_ERROR(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR)) + struct rx_tpa_start_cmp_ext { __le32 rx_tpa_start_cmp_flags2; #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0) @@ -315,10 +334,20 @@ struct rx_tpa_start_cmp_ext { #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8) + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9) + #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10) + #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10 + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16) + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16 __le32 rx_tpa_start_cmp_metadata; __le32 rx_tpa_start_cmp_cfa_code_v2; #define RX_TPA_START_CMP_V2 (0x1 << 0) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK (0x7 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1 + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 __le32 rx_tpa_start_cmp_hdr_info; @@ -332,6 +361,11 @@ struct rx_tpa_start_cmp_ext { (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \ cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE))) +#define TPA_START_ERROR_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \ + RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT) + struct rx_tpa_end_cmp { __le32 rx_tpa_end_cmp_len_flags_type; #define RX_TPA_END_CMP_TYPE (0x3f << 0) @@ -361,6 +395,8 @@ struct rx_tpa_end_cmp { #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16 #define RX_TPA_END_CMP_AGG_ID (0x7f << 25) #define RX_TPA_END_CMP_AGG_ID_SHIFT 25 + #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16) + #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16 __le32 rx_tpa_end_cmp_tsdelta; #define RX_TPA_END_GRO_TS (0x1 << 31) @@ -370,6 +406,18 @@ struct rx_tpa_end_cmp { ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT) +#define TPA_END_AGG_ID_P5(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5) + +#define TPA_END_PAYLOAD_OFF(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT) + +#define TPA_END_AGG_BUFS(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT) + #define TPA_END_TPA_SEGS(rx_tpa_end) \ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT) @@ -389,6 +437,10 @@ struct rx_tpa_end_cmp { struct rx_tpa_end_cmp_ext { __le32 rx_tpa_end_cmp_dup_acks; #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5 (0xff << 16) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5 16 + #define RX_TPA_END_CMP_AGG_BUFS_P5 (0xff << 24) + #define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5 24 __le32 rx_tpa_end_cmp_seg_len; #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0) @@ -396,7 +448,13 @@ struct rx_tpa_end_cmp_ext { __le32 rx_tpa_end_cmp_errors_v2; #define RX_TPA_END_CMP_V2 (0x1 << 0) #define RX_TPA_END_CMP_ERRORS (0x3 << 1) + #define RX_TPA_END_CMP_ERRORS_P5 (0x7 << 1) #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) u32 rx_tpa_end_cmp_start_opaque; }; @@ -405,6 +463,15 @@ struct rx_tpa_end_cmp_ext { ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ cpu_to_le32(RX_TPA_END_CMP_ERRORS)) +#define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext) \ + ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \ + RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >> \ + RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5) + +#define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext) \ + ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \ + RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5) + struct nqe_cn { __le16 type; #define NQ_CN_TYPE_MASK 0x3fUL @@ -487,6 +554,9 @@ struct nqe_cn { #define BNXT_DEFAULT_TX_RING_SIZE 511 #define MAX_TPA 64 +#define MAX_TPA_P5 256 +#define MAX_TPA_P5_MASK (MAX_TPA_P5 - 1) +#define MAX_TPA_SEGS_P5 0x3f #if (BNXT_PAGE_SHIFT == 16) #define MAX_RX_PAGES 1 @@ -768,6 +838,15 @@ struct bnxt_tpa_info { ((hdr_info) & 0x1ff) u16 cfa_code; /* cfa_code in TPA start compl */ + u8 agg_count; + struct rx_agg_cmp *agg_arr; +}; + +#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG) + +struct bnxt_tpa_idx_map { + u16 agg_id_tbl[1024]; + unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE]; }; struct bnxt_rx_ring_info { @@ -797,6 +876,7 @@ struct bnxt_rx_ring_info { dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; struct bnxt_tpa_info *rx_tpa; + struct bnxt_tpa_idx_map *rx_tpa_idx_map; struct bnxt_ring_struct rx_ring_struct; struct bnxt_ring_struct rx_agg_ring_struct; @@ -1282,7 +1362,9 @@ struct bnxt { #define CHIP_NUM_5745X 0xd730 -#define CHIP_NUM_57500 0x1750 +#define CHIP_NUM_57508 0x1750 +#define CHIP_NUM_57504 0x1751 +#define CHIP_NUM_57502 0x1752 #define CHIP_NUM_58802 0xd802 #define CHIP_NUM_58804 0xd804 @@ -1379,12 +1461,14 @@ struct bnxt { #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ - !(bp->flags & BNXT_FLAG_CHIP_P5) && \ - !is_kdump_kernel()) + (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \ + (bp)->max_tpa_v2) && !is_kdump_kernel()) /* Chip class phase 5 */ #define BNXT_CHIP_P5(bp) \ - ((bp)->chip_num == CHIP_NUM_57500) + ((bp)->chip_num == CHIP_NUM_57508 || \ + (bp)->chip_num == CHIP_NUM_57504 || \ + (bp)->chip_num == CHIP_NUM_57502) /* Chip class phase 4.x */ #define BNXT_CHIP_P4(bp) \ @@ -1414,6 +1498,8 @@ struct bnxt { u16, void *, u8 *, dma_addr_t, unsigned int); + u16 max_tpa_v2; + u16 max_tpa; u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ u16 rx_offset; @@ -1525,6 +1611,7 @@ struct bnxt { int hw_port_stats_size; u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; + u16 hw_ring_stats_size; u8 pri2cos[8]; u8 pri2cos_valid; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index c7ee63d69679..3a3d8a9be5ed 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -137,7 +137,44 @@ reset_coalesce: return rc; } -#define BNXT_NUM_STATS 22 +static const char * const bnxt_ring_stats_str[] = { + "rx_ucast_packets", + "rx_mcast_packets", + "rx_bcast_packets", + "rx_discards", + "rx_drops", + "rx_ucast_bytes", + "rx_mcast_bytes", + "rx_bcast_bytes", + "tx_ucast_packets", + "tx_mcast_packets", + "tx_bcast_packets", + "tx_discards", + "tx_drops", + "tx_ucast_bytes", + "tx_mcast_bytes", + "tx_bcast_bytes", +}; + +static const char * const bnxt_ring_tpa_stats_str[] = { + "tpa_packets", + "tpa_bytes", + "tpa_events", + "tpa_aborts", +}; + +static const char * const bnxt_ring_tpa2_stats_str[] = { + "rx_tpa_eligible_pkt", + "rx_tpa_eligible_bytes", + "rx_tpa_pkt", + "rx_tpa_bytes", + "rx_tpa_errors", +}; + +static const char * const bnxt_ring_sw_stats_str[] = { + "rx_l4_csum_errors", + "missed_irqs", +}; #define BNXT_RX_STATS_ENTRY(counter) \ { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } @@ -207,6 +244,20 @@ reset_coalesce: BNXT_TX_STATS_EXT_COS_ENTRY(6), \ BNXT_TX_STATS_EXT_COS_ENTRY(7) \ +#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ + BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ + BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) + +#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) + #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ __stringify(counter##_pri##n) } @@ -352,6 +403,7 @@ static const struct { BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, }; static const struct { @@ -417,9 +469,29 @@ static const struct { ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) #define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr) +static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) +{ + if (BNXT_SUPPORTS_TPA(bp)) { + if (bp->max_tpa_v2) + return ARRAY_SIZE(bnxt_ring_tpa2_stats_str); + return ARRAY_SIZE(bnxt_ring_tpa_stats_str); + } + return 0; +} + +static int bnxt_get_num_ring_stats(struct bnxt *bp) +{ + int num_stats; + + num_stats = ARRAY_SIZE(bnxt_ring_stats_str) + + ARRAY_SIZE(bnxt_ring_sw_stats_str) + + bnxt_get_num_tpa_ring_stats(bp); + return num_stats * bp->cp_nr_rings; +} + static int bnxt_get_num_stats(struct bnxt *bp) { - int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + int num_stats = bnxt_get_num_ring_stats(bp); num_stats += BNXT_NUM_SW_FUNC_STATS; @@ -460,10 +532,11 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, { u32 i, j = 0; struct bnxt *bp = netdev_priv(dev); - u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; + u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) + + bnxt_get_num_tpa_ring_stats(bp); if (!bp->bnapi) { - j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS; + j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS; goto skip_ring_stats; } @@ -551,56 +624,39 @@ skip_ring_stats: static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnxt *bp = netdev_priv(dev); - u32 i; + static const char * const *str; + u32 i, j, num_str; switch (stringset) { - /* The number of strings must match BNXT_NUM_STATS defined above. */ case ETH_SS_STATS: for (i = 0; i < bp->cp_nr_rings; i++) { - sprintf(buf, "[%d]: rx_ucast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_mcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_bcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_discards", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_drops", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_ucast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_mcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_bcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_ucast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_mcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_bcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_discards", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_drops", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_ucast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_mcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_bcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_events", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_aborts", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_l4_csum_errors", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: missed_irqs", i); - buf += ETH_GSTRING_LEN; + num_str = ARRAY_SIZE(bnxt_ring_stats_str); + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + if (!BNXT_SUPPORTS_TPA(bp)) + goto skip_tpa_stats; + + if (bp->max_tpa_v2) { + num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str); + str = bnxt_ring_tpa2_stats_str; + } else { + num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str); + str = bnxt_ring_tpa_stats_str; + } + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, str[j]); + buf += ETH_GSTRING_LEN; + } +skip_tpa_stats: + num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str); + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_sw_stats_str[j]); + buf += ETH_GSTRING_LEN; + } } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { strcpy(buf, bnxt_sw_func_stats[i].string); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 12bbb2a207d0..2cdef753a1bc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,7 +1,8 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2019 Broadcom Limited + * Copyright (c) 2014-2018 Broadcom Limited + * Copyright (c) 2018-2019 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -39,15 +40,15 @@ struct hwrm_resp_hdr { #define TLV_TYPE_ROCE_SP_COMMAND 0x3UL #define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL #define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL -#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL -#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL +#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL #define TLV_TYPE_ENGINE_CKV_IV 0x8003UL #define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL #define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL #define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL -#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL +#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL #define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL -#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE +#define TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY 0x8009UL +#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY /* tlv (size:64b/8B) */ @@ -267,7 +268,6 @@ struct cmd_nums { #define HWRM_CFA_EEM_OP 0x123UL #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL #define HWRM_CFA_TFLIB 0x125UL - #define HWRM_ENGINE_CKV_HELLO 0x12dUL #define HWRM_ENGINE_CKV_STATUS 0x12eUL #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL @@ -313,6 +313,7 @@ struct cmd_nums { #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL #define HWRM_FUNC_VF_BW_CFG 0x195UL #define HWRM_FUNC_VF_BW_QCFG 0x196UL + #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -410,8 +411,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 69 -#define HWRM_VERSION_STR "1.10.0.69" +#define HWRM_VERSION_RSVD 89 +#define HWRM_VERSION_STR "1.10.0.89" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -624,6 +625,8 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -1122,6 +1125,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL + #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1241,6 +1245,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL + #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -2916,7 +2921,7 @@ struct tx_port_stats_ext { __le64 pfc_pri7_tx_transitions; }; -/* rx_port_stats_ext (size:2624b/328B) */ +/* rx_port_stats_ext (size:3648b/456B) */ struct rx_port_stats_ext { __le64 link_down_events; __le64 continuous_pause_events; @@ -2959,6 +2964,22 @@ struct rx_port_stats_ext { __le64 rx_buffer_passed_threshold; __le64 rx_pcs_symbol_err; __le64 rx_corrected_bits; + __le64 rx_discard_bytes_cos0; + __le64 rx_discard_bytes_cos1; + __le64 rx_discard_bytes_cos2; + __le64 rx_discard_bytes_cos3; + __le64 rx_discard_bytes_cos4; + __le64 rx_discard_bytes_cos5; + __le64 rx_discard_bytes_cos6; + __le64 rx_discard_bytes_cos7; + __le64 rx_discard_packets_cos0; + __le64 rx_discard_packets_cos1; + __le64 rx_discard_packets_cos2; + __le64 rx_discard_packets_cos3; + __le64 rx_discard_packets_cos4; + __le64 rx_discard_packets_cos5; + __le64 rx_discard_packets_cos6; + __le64 rx_discard_packets_cos7; }; /* hwrm_port_qstats_ext_input (size:320b/40B) */ @@ -6115,6 +6136,21 @@ struct hwrm_cfa_flow_alloc_output { u8 valid; }; +/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_flow_alloc_cmd_err { + u8 code; + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB + u8 unused_0[7]; +}; + /* hwrm_cfa_flow_free_input (size:256b/32B) */ struct hwrm_cfa_flow_free_input { __le16 req_type; @@ -6305,7 +6341,7 @@ struct hwrm_cfa_eem_qcaps_input { __le32 unused_0; }; -/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */ +/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */ struct hwrm_cfa_eem_qcaps_output { __le16 error_code; __le16 req_type; @@ -6322,15 +6358,17 @@ struct hwrm_cfa_eem_qcaps_output { #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL __le32 max_entries_supported; __le16 key_entry_size; __le16 record_entry_size; __le16 efc_entry_size; - u8 unused_1; + __le16 fid_entry_size; + u8 unused_1[7]; u8 valid; }; -/* hwrm_cfa_eem_cfg_input (size:320b/40B) */ +/* hwrm_cfa_eem_cfg_input (size:384b/48B) */ struct hwrm_cfa_eem_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -6350,6 +6388,9 @@ struct hwrm_cfa_eem_cfg_input { __le16 key1_ctx_id; __le16 record_ctx_id; __le16 efc_ctx_id; + __le16 fid_ctx_id; + __le16 unused_2; + __le32 unused_3; }; /* hwrm_cfa_eem_cfg_output (size:128b/16B) */ @@ -6375,7 +6416,7 @@ struct hwrm_cfa_eem_qcfg_input { __le32 unused_0; }; -/* hwrm_cfa_eem_qcfg_output (size:192b/24B) */ +/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */ struct hwrm_cfa_eem_qcfg_output { __le16 error_code; __le16 req_type; @@ -6386,7 +6427,12 @@ struct hwrm_cfa_eem_qcfg_output { #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL __le32 num_entries; - u8 unused_0[7]; + __le16 key0_ctx_id; + __le16 key1_ctx_id; + __le16 record_ctx_id; + __le16 efc_ctx_id; + __le16 fid_ctx_id; + u8 unused_2[5]; u8 valid; }; @@ -6567,6 +6613,31 @@ struct ctx_hw_stats { __le64 tpa_aborts; }; +/* ctx_hw_stats_ext (size:1344b/168B) */ +struct ctx_hw_stats_ext { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; +}; + /* hwrm_stat_ctx_alloc_input (size:256b/32B) */ struct hwrm_stat_ctx_alloc_input { __le16 req_type; @@ -6578,7 +6649,8 @@ struct hwrm_stat_ctx_alloc_input { __le32 update_period_ms; u8 stat_ctx_flags; #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL - u8 unused_0[3]; + u8 unused_0; + __le16 stats_dma_length; }; /* hwrm_stat_ctx_alloc_output (size:128b/16B) */ @@ -7204,7 +7276,9 @@ struct coredump_segment_record { u8 version_hi; u8 version_low; u8 seg_flags; - u8 unused_0[7]; + u8 compress_flags; + #define SFLAG_COMPRESSED_ZLIB 0x1UL + u8 unused_0[6]; }; /* hwrm_dbg_coredump_list_input (size:256b/32B) */ @@ -7729,6 +7803,9 @@ struct hwrm_nvm_set_variable_input { #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1) #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1) #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH + #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL + #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4 + #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL u8 unused_0; }; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 57dc3cbff36e..155599dcee76 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -4096,12 +4096,16 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; u32 port_id; + int i; cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock), GFP_KERNEL); if (!cp->csk_tbl) return -ENOMEM; + for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) + atomic_set(&cp->csk_tbl[i].ref_count, 0); + port_id = prandom_u32(); port_id %= CNIC_LOCAL_PORT_RANGE; if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, @@ -5480,6 +5484,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, cdev->unregister_device = cnic_unregister_device; cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl; + atomic_set(&cdev->ref_count, 0); cp = cdev->cnic_priv; cp->dev = cdev; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 4c404d2213f9..77f3511b97de 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18041,8 +18041,7 @@ static void tg3_remove_one(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int tg3_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct tg3 *tp = netdev_priv(dev); int err = 0; @@ -18098,8 +18097,7 @@ unlock: static int tg3_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct tg3 *tp = netdev_priv(dev); int err = 0; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 7767ae6fa1fd..e338272931d1 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3032,7 +3032,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) head_unmap->nvecs++; for (i = 0, vect_id = 0; i < vectors - 1; i++) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; u32 size = skb_frag_size(frag); if (unlikely(size == 0)) { diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 99f49d059414..f96a42af1014 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1104,7 +1104,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - len = frag->size; + len = skb_frag_size(frag); paddr = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index eab805579f96..7f3b2e3b0868 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1492,11 +1492,11 @@ static void free_netsgbuf(void *buf) i = 1; while (frags--) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; pci_unmap_page((lio->oct_dev)->pci_dev, g->sg[(i >> 2)].ptr[(i & 3)], - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); i++; } @@ -1535,11 +1535,11 @@ static void free_netsgbuf_with_resp(void *buf) i = 1; while (frags--) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; pci_unmap_page((lio->oct_dev)->pci_dev, g->sg[(i >> 2)].ptr[(i & 3)], - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); i++; } @@ -2424,7 +2424,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) } else { int i, frags; - struct skb_frag_struct *frag; + skb_frag_t *frag; struct octnic_gather *g; spin_lock(&lio->glist_lock[q_idx]); @@ -2462,11 +2462,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[i - 1]; g->sg[(i >> 2)].ptr[(i & 3)] = - dma_map_page(&oct->pci_dev->dev, - frag->page.p, - frag->page_offset, - frag->size, - DMA_TO_DEVICE); + skb_frag_dma_map(&oct->pci_dev->dev, + frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); if (dma_mapping_error(&oct->pci_dev->dev, g->sg[i >> 2].ptr[i & 3])) { @@ -2478,7 +2476,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[j - 1]; dma_unmap_page(&oct->pci_dev->dev, g->sg[j >> 2].ptr[j & 3], - frag->size, + skb_frag_size(frag), DMA_TO_DEVICE); } dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", @@ -2486,7 +2484,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } - add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); + add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), + (i & 3)); i++; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index db0b90555acb..370d76822ee0 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -837,11 +837,11 @@ static void free_netsgbuf(void *buf) i = 1; while (frags--) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; pci_unmap_page((lio->oct_dev)->pci_dev, g->sg[(i >> 2)].ptr[(i & 3)], - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); i++; } @@ -881,11 +881,11 @@ static void free_netsgbuf_with_resp(void *buf) i = 1; while (frags--) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; pci_unmap_page((lio->oct_dev)->pci_dev, g->sg[(i >> 2)].ptr[(i & 3)], - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); i++; } @@ -1497,7 +1497,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ndata.reqtype = REQTYPE_NORESP_NET; } else { - struct skb_frag_struct *frag; + skb_frag_t *frag; struct octnic_gather *g; int i, frags; @@ -1535,11 +1535,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[i - 1]; g->sg[(i >> 2)].ptr[(i & 3)] = - dma_map_page(&oct->pci_dev->dev, - frag->page.p, - frag->page_offset, - frag->size, - DMA_TO_DEVICE); + skb_frag_dma_map(&oct->pci_dev->dev, + frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); if (dma_mapping_error(&oct->pci_dev->dev, g->sg[i >> 2].ptr[i & 3])) { dma_unmap_single(&oct->pci_dev->dev, @@ -1550,7 +1548,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[j - 1]; dma_unmap_page(&oct->pci_dev->dev, g->sg[j >> 2].ptr[j & 3], - frag->size, + skb_frag_size(frag), DMA_TO_DEVICE); } dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", @@ -1558,7 +1556,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } - add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); + add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), + (i & 3)); i++; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 192bc92da881..4ab57d33a87e 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1588,15 +1588,13 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, goto doorbell; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; qentry = nicvf_get_nxt_sqentry(sq, qentry); size = skb_frag_size(frag); dma_addr = dma_map_page_attrs(&nic->pdev->dev, skb_frag_page(frag), - frag->page_offset, size, + skb_frag_off(frag), size, DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 89db739b7819..6dabbf1502c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, struct port_info *pi = netdev_priv(qs->netdev); struct sk_buff *skb = NULL; struct cpl_rx_pkt *cpl; - struct skb_frag_struct *rx_frag; + skb_frag_t *rx_frag; int nr_frags; int offset = 0; @@ -2182,7 +2182,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, rx_frag += nr_frags; __skb_frag_set_page(rx_frag, sd->pg_chunk.page); - rx_frag->page_offset = sd->pg_chunk.offset + offset; + skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset); skb_frag_size_set(rx_frag, len); skb->len += len; diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 9003eb6716cd..e736ce2c58ca 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1182,9 +1182,8 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, buflen = skb_headlen(skb); } else { skb_frag = skb_si->frags + frag; - buffer = page_address(skb_frag_page(skb_frag)) + - skb_frag->page_offset; - buflen = skb_frag->size; + buffer = skb_frag_address(skb_frag); + buflen = skb_frag_size(skb_frag); } if (frag == last_frag) { @@ -2423,10 +2422,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) /* Interrupt */ irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(dev, "no IRQ\n"); + if (irq <= 0) return irq ? irq : -ENODEV; - } port->irq = irq; /* Clock the port */ diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 386bdc1378d1..cce90b5925d9 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1500,8 +1500,6 @@ dm9000_probe(struct platform_device *pdev) ndev->irq = platform_get_irq(pdev, 0); if (ndev->irq < 0) { - dev_err(db->dev, "interrupt resource unavailable: %d\n", - ndev->irq); ret = ndev->irq; goto out; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 4d8e40ac66d2..314e9868b861 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1014,7 +1014,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); @@ -2346,8 +2346,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, memcpy(skb->data, start, hdr_len); skb_shinfo(skb)->nr_frags = 1; skb_frag_set_page(skb, 0, page_info->page); - skb_shinfo(skb)->frags[0].page_offset = - page_info->page_offset + hdr_len; + skb_frag_off_set(&skb_shinfo(skb)->frags[0], + page_info->page_offset + hdr_len); skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); skb->data_len = curr_frag_len - hdr_len; @@ -2372,8 +2372,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, /* Fresh page */ j++; skb_frag_set_page(skb, j, page_info->page); - skb_shinfo(skb)->frags[j].page_offset = - page_info->page_offset; + skb_frag_off_set(&skb_shinfo(skb)->frags[j], + page_info->page_offset); skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); skb_shinfo(skb)->nr_frags++; } else { @@ -2454,8 +2454,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, /* First frag or Fresh page */ j++; skb_frag_set_page(skb, j, page_info->page); - skb_shinfo(skb)->frags[j].page_offset = - page_info->page_offset; + skb_frag_off_set(&skb_shinfo(skb)->frags[j], + page_info->page_offset); skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); } else { put_page(page_info->page); diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig index a9b105803fb7..73e4f2648e49 100644 --- a/drivers/net/ethernet/faraday/Kconfig +++ b/drivers/net/ethernet/faraday/Kconfig @@ -32,6 +32,7 @@ config FTGMAC100 depends on ARM || NDS32 || COMPILE_TEST depends on !64BIT || BROKEN select PHYLIB + select MDIO_ASPEED if MACH_ASPEED_G6 ---help--- This driver supports the FTGMAC100 Gigabit Ethernet controller from Faraday. It is used on Faraday A369, Andes AG102 and some diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 030fed65393e..9b7af94a40bb 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> +#include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/property.h> @@ -774,7 +775,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - len = frag->size; + len = skb_frag_size(frag); /* Map it */ map = skb_frag_dma_map(priv->dev, frag, 0, len, @@ -1619,8 +1620,13 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) if (!priv->mii_bus) return -EIO; - if (priv->is_aspeed) { - /* This driver supports the old MDIO interface */ + if (of_device_is_compatible(np, "aspeed,ast2400-mac") || + of_device_is_compatible(np, "aspeed,ast2500-mac")) { + /* The AST2600 has a separate MDIO controller */ + + /* For the AST2400 and AST2500 this driver only supports the + * old MDIO interface + */ reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); @@ -1797,7 +1803,8 @@ static int ftgmac100_probe(struct platform_device *pdev) np = pdev->dev.of_node; if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || - of_device_is_compatible(np, "aspeed,ast2500-mac"))) { + of_device_is_compatible(np, "aspeed,ast2500-mac") || + of_device_is_compatible(np, "aspeed,ast2600-mac"))) { priv->rxdes0_edorr_mask = BIT(30); priv->txdes0_edotr_mask = BIT(30); priv->is_aspeed = true; @@ -1817,7 +1824,29 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); if (!priv->ndev) goto err_ncsi_dev; - } else { + } else if (np && of_get_property(np, "phy-handle", NULL)) { + struct phy_device *phy; + + phy = of_phy_get_and_connect(priv->netdev, np, + &ftgmac100_adjust_link); + if (!phy) { + dev_err(&pdev->dev, "Failed to connect to phy\n"); + goto err_setup_mdio; + } + + /* Indicate that we support PAUSE frames (see comment in + * Documentation/networking/phy.txt) + */ + phy_support_asym_pause(phy); + + /* Display what we found */ + phy_attached_info(phy); + } else if (np && !of_get_child_by_name(np, "mdio")) { + /* Support legacy ASPEED devicetree descriptions that decribe a + * MAC with an embedded MDIO controller but have no "mdio" + * child node. Automatically scan the MDIO bus for available + * PHYs. + */ priv->use_ncsi = false; err = ftgmac100_setup_mdio(netdev); if (err) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index f38c3fa7d705..9c4d1afa34e5 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1958,7 +1958,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, /* populate the rest of SGT entries */ for (i = 0; i < nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; - frag_len = frag->size; + frag_len = skb_frag_size(frag); WARN_ON(!skb_frag_page(frag)); addr = skb_frag_dma_map(dev, frag, 0, frag_len, dma_dir); diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index 04a59db03f2b..c219587bd334 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -20,6 +20,15 @@ config FSL_ENETC_VF If compiled as module (M), the module name is fsl-enetc-vf. +config FSL_ENETC_MDIO + tristate "ENETC MDIO driver" + depends on PCI && (ARCH_LAYERSCAPE || COMPILE_TEST) + help + This driver supports NXP ENETC Central MDIO controller as a PCIe + physical function (PF) device. + + If compiled as module (M), the module name is fsl-enetc-mdio. + config FSL_ENETC_PTP_CLOCK tristate "ENETC PTP clock driver" depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF) diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile index 7139e414dccf..d200c27c3bf6 100644 --- a/drivers/net/ethernet/freescale/enetc/Makefile +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -1,19 +1,16 @@ # SPDX-License-Identifier: GPL-2.0 + +common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o + obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o -fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o \ - enetc_mdio.o +fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs) fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o -fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y) obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o +fsl-enetc-vf-y := enetc_vf.o $(common-objs) -ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy) -fsl-enetc-vf-objs := enetc_vf.o -else -fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \ - enetc_ethtool.o -fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y) -endif +obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o +fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o -fsl-enetc-ptp-$(CONFIG_FSL_ENETC_PTP_CLOCK) += enetc_ptp.o +fsl-enetc-ptp-y := enetc_ptp.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 223709443ea4..b6ff89307409 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -110,7 +110,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, int active_offloads) { struct enetc_tx_swbd *tx_swbd; - struct skb_frag_struct *frag; + skb_frag_t *frag; int len = skb_headlen(skb); union enetc_tx_bd temp_bd; union enetc_tx_bd *txbd; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c index 77b9cd10ba2b..149883c8f0b8 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c @@ -6,18 +6,20 @@ #include <linux/iopoll.h> #include <linux/of.h> -#include "enetc_pf.h" +#include "enetc_mdio.h" -struct enetc_mdio_regs { - u32 mdio_cfg; /* MDIO configuration and status */ - u32 mdio_ctl; /* MDIO control */ - u32 mdio_data; /* MDIO data */ - u32 mdio_addr; /* MDIO address */ -}; +#define ENETC_MDIO_REG_OFFSET 0x1c00 +#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */ +#define ENETC_MDIO_CTL 0x4 /* MDIO control */ +#define ENETC_MDIO_DATA 0x8 /* MDIO data */ +#define ENETC_MDIO_ADDR 0xc /* MDIO address */ -#define bus_to_enetc_regs(bus) (struct enetc_mdio_regs __iomem *)((bus)->priv) +#define enetc_mdio_rd(hw, off) \ + enetc_port_rd(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET) +#define enetc_mdio_wr(hw, off, val) \ + enetc_port_wr(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET, val) +#define enetc_mdio_rd_reg(off) enetc_mdio_rd(hw, off) -#define ENETC_MDIO_REG_OFFSET 0x1c00 #define ENETC_MDC_DIV 258 #define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8) @@ -33,18 +35,18 @@ struct enetc_mdio_regs { #define MDIO_DATA(x) ((x) & 0xffff) #define TIMEOUT 1000 -static int enetc_mdio_wait_complete(struct enetc_mdio_regs __iomem *regs) +static int enetc_mdio_wait_complete(struct enetc_hw *hw) { u32 val; - return readx_poll_timeout(enetc_rd_reg, ®s->mdio_cfg, val, + return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val, !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT); } -static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, - u16 value) +int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) { - struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus); + struct enetc_mdio_priv *mdio_priv = bus->priv; + struct enetc_hw *hw = mdio_priv->hw; u32 mdio_ctl, mdio_cfg; u16 dev_addr; int ret; @@ -59,38 +61,39 @@ static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, mdio_cfg &= ~MDIO_CFG_ENC45; } - enetc_wr_reg(®s->mdio_cfg, mdio_cfg); + enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; /* set port and dev addr */ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); - enetc_wr_reg(®s->mdio_ctl, mdio_ctl); + enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl); /* set the register address */ if (regnum & MII_ADDR_C45) { - enetc_wr_reg(®s->mdio_addr, regnum & 0xffff); + enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; } /* write the value */ - enetc_wr_reg(®s->mdio_data, MDIO_DATA(value)); + enetc_mdio_wr(hw, MDIO_DATA, MDIO_DATA(value)); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; return 0; } -static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { - struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus); + struct enetc_mdio_priv *mdio_priv = bus->priv; + struct enetc_hw *hw = mdio_priv->hw; u32 mdio_ctl, mdio_cfg; u16 dev_addr, value; int ret; @@ -104,41 +107,41 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) mdio_cfg &= ~MDIO_CFG_ENC45; } - enetc_wr_reg(®s->mdio_cfg, mdio_cfg); + enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; /* set port and device addr */ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); - enetc_wr_reg(®s->mdio_ctl, mdio_ctl); + enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl); /* set the register address */ if (regnum & MII_ADDR_C45) { - enetc_wr_reg(®s->mdio_addr, regnum & 0xffff); + enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; } /* initiate the read */ - enetc_wr_reg(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); + enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl | MDIO_CTL_READ); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; /* return all Fs if nothing was there */ - if (enetc_rd_reg(®s->mdio_cfg) & MDIO_CFG_RD_ER) { + if (enetc_mdio_rd(hw, MDIO_CFG) & MDIO_CFG_RD_ER) { dev_dbg(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); return 0xffff; } - value = enetc_rd_reg(®s->mdio_data) & 0xffff; + value = enetc_mdio_rd(hw, MDIO_DATA) & 0xffff; return value; } @@ -146,12 +149,12 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) int enetc_mdio_probe(struct enetc_pf *pf) { struct device *dev = &pf->si->pdev->dev; - struct enetc_mdio_regs __iomem *regs; + struct enetc_mdio_priv *mdio_priv; struct device_node *np; struct mii_bus *bus; - int ret; + int err; - bus = mdiobus_alloc_size(sizeof(regs)); + bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); if (!bus) return -ENOMEM; @@ -159,41 +162,31 @@ int enetc_mdio_probe(struct enetc_pf *pf) bus->read = enetc_mdio_read; bus->write = enetc_mdio_write; bus->parent = dev; + mdio_priv = bus->priv; + mdio_priv->hw = &pf->si->hw; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); - /* store the enetc mdio base address for this bus */ - regs = pf->si->hw.port + ENETC_MDIO_REG_OFFSET; - bus->priv = regs; - np = of_get_child_by_name(dev->of_node, "mdio"); if (!np) { dev_err(dev, "MDIO node missing\n"); - ret = -EINVAL; - goto err_registration; + return -EINVAL; } - ret = of_mdiobus_register(bus, np); - if (ret) { + err = of_mdiobus_register(bus, np); + if (err) { of_node_put(np); dev_err(dev, "cannot register MDIO bus\n"); - goto err_registration; + return err; } of_node_put(np); pf->mdio = bus; return 0; - -err_registration: - mdiobus_free(bus); - - return ret; } void enetc_mdio_remove(struct enetc_pf *pf) { - if (pf->mdio) { + if (pf->mdio) mdiobus_unregister(pf->mdio); - mdiobus_free(pf->mdio); - } } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h new file mode 100644 index 000000000000..60c9a3889824 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2019 NXP */ + +#include <linux/phy.h> +#include "enetc_pf.h" + +struct enetc_mdio_priv { + struct enetc_hw *hw; +}; + +int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value); +int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c new file mode 100644 index 000000000000..fbd41ce01f06 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ +#include <linux/of_mdio.h> +#include "enetc_mdio.h" + +#define ENETC_MDIO_DEV_ID 0xee01 +#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO" +#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus" +#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver" + +static int enetc_pci_mdio_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct enetc_mdio_priv *mdio_priv; + struct device *dev = &pdev->dev; + struct enetc_hw *hw; + struct mii_bus *bus; + int err; + + hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL); + if (!hw) + return -ENOMEM; + + bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + if (!bus) + return -ENOMEM; + + bus->name = ENETC_MDIO_BUS_NAME; + bus->read = enetc_mdio_read; + bus->write = enetc_mdio_write; + bus->parent = dev; + mdio_priv = bus->priv; + mdio_priv->hw = hw; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + + pcie_flr(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(dev, "device enable failed\n"); + return err; + } + + err = pci_request_region(pdev, 0, KBUILD_MODNAME); + if (err) { + dev_err(dev, "pci_request_region failed\n"); + goto err_pci_mem_reg; + } + + hw->port = pci_iomap(pdev, 0, 0); + if (!hw->port) { + err = -ENXIO; + dev_err(dev, "iomap failed\n"); + goto err_ioremap; + } + + err = of_mdiobus_register(bus, dev->of_node); + if (err) + goto err_mdiobus_reg; + + pci_set_drvdata(pdev, bus); + + return 0; + +err_mdiobus_reg: + iounmap(mdio_priv->hw->port); +err_ioremap: + pci_release_mem_regions(pdev); +err_pci_mem_reg: + pci_disable_device(pdev); + + return err; +} + +static void enetc_pci_mdio_remove(struct pci_dev *pdev) +{ + struct mii_bus *bus = pci_get_drvdata(pdev); + struct enetc_mdio_priv *mdio_priv; + + mdiobus_unregister(bus); + mdio_priv = bus->priv; + iounmap(mdio_priv->hw->port); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id enetc_pci_mdio_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table); + +static struct pci_driver enetc_pci_mdio_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_pci_mdio_id_table, + .probe = enetc_pci_mdio_probe, + .remove = enetc_pci_mdio_remove, +}; +module_pci_driver(enetc_pci_mdio_driver); + +MODULE_DESCRIPTION(ENETC_MDIO_DRV_NAME); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 258b3cb38a6f..7d6513ff8507 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -750,6 +750,7 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv) { struct enetc_pf *pf = enetc_si_priv(priv->si); struct device_node *np = priv->dev->of_node; + struct device_node *mdio_np; int err; if (!np) { @@ -773,7 +774,9 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv) priv->phy_node = of_node_get(np); } - if (!of_phy_is_fixed_link(np)) { + mdio_np = of_get_child_by_name(np, "mdio"); + if (mdio_np) { + of_node_put(mdio_np); err = enetc_mdio_probe(pf); if (err) { of_node_put(priv->phy_node); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index e5610a4da539..c01d3ec3e9af 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -365,7 +365,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - frag_len = skb_shinfo(skb)->frags[frag].size; + frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]); /* Handle the last BD specially */ if (frag == nr_frags - 1) { @@ -387,7 +387,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ebdp->cbd_esc = cpu_to_fec32(estatus); } - bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; + bufaddr = skb_frag_address(this_frag); index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsigned long) bufaddr) & fep->tx_align || diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 5fad73b2e123..3981c06f082f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -501,7 +501,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) nr_frags = skb_shinfo(skb)->nr_frags; frag = skb_shinfo(skb)->frags; for (i = 0; i < nr_frags; i++, frag++) { - if (!IS_ALIGNED(frag->page_offset, 4)) { + if (!IS_ALIGNED(skb_frag_off(frag), 4)) { is_aligned = 0; break; } diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 689f18e3100f..90ab7ade44c4 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -877,7 +877,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) ndev->irq = platform_get_irq(pdev, 0); if (ndev->irq <= 0) { - dev_err(dev, "No irq resource\n"); ret = -ENODEV; goto out_disconnect_phy; } diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 349970557c52..95a6b0926170 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -719,7 +719,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - int len = frag->size; + int len = skb_frag_size(frag); addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE); ret = dma_mapping_error(priv->dev, addr); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 2235dd55fab2..1545536ef769 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -245,7 +245,7 @@ static int hns_nic_maybe_stop_tso( int frag_num; struct sk_buff *skb = *out_skb; struct sk_buff *new_skb = NULL; - struct skb_frag_struct *frag; + skb_frag_t *frag; size = skb_headlen(skb); buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; @@ -309,7 +309,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, struct hnae_ring *ring = ring_data->ring; struct device *dev = ring_to_dev(ring); struct netdev_queue *dev_queue; - struct skb_frag_struct *frag; + skb_frag_t *frag; int buf_num; int seg_num; dma_addr_t dma; diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 75329ab775a6..f8a87f8ca983 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -47,6 +47,8 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ + HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ + HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */ }; /* below are per-VF mac-vlan subcodes */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 48c7b70fc2c4..a4624db3b5d5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -179,6 +179,15 @@ struct hnae3_vector_info { #define HNAE3_RING_GL_RX 0 #define HNAE3_RING_GL_TX 1 +#define HNAE3_FW_VERSION_BYTE3_SHIFT 24 +#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24) +#define HNAE3_FW_VERSION_BYTE2_SHIFT 16 +#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16) +#define HNAE3_FW_VERSION_BYTE1_SHIFT 8 +#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8) +#define HNAE3_FW_VERSION_BYTE0_SHIFT 0 +#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0) + struct hnae3_ring_chain_node { struct hnae3_ring_chain_node *next; u32 tqp_index; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 310afa708831..ed05fb9f04ed 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -459,6 +459,9 @@ static int hns3_nic_net_open(struct net_device *netdev) h->ae_algo->ops->set_timer_task(priv->ae_handle, true); hns3_config_xps(priv); + + netif_dbg(h, drv, netdev, "net open\n"); + return 0; } @@ -519,6 +522,8 @@ static int hns3_nic_net_stop(struct net_device *netdev) if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) return 0; + netif_dbg(h, drv, netdev, "net stop\n"); + if (h->ae_algo->ops->set_timer_task) h->ae_algo->ops->set_timer_task(priv->ae_handle, false); @@ -1033,7 +1038,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; struct device *dev = ring_to_dev(ring); - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int frag_buf_num; int k, sizeoflast; dma_addr_t dma; @@ -1086,7 +1091,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); } else { - frag = (struct skb_frag_struct *)priv; + frag = (skb_frag_t *)priv; dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); } @@ -1159,7 +1164,7 @@ static int hns3_nic_bd_num(struct sk_buff *skb) bd_num = hns3_tx_bd_count(size); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int frag_bd_num; size = skb_frag_size(frag); @@ -1290,7 +1295,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) &tx_ring_data(priv, skb->queue_mapping); struct hns3_enet_ring *ring = ring_data->ring; struct netdev_queue *dev_queue; - struct skb_frag_struct *frag; + skb_frag_t *frag; int next_to_use_head; int buf_num; int seg_num; @@ -1550,6 +1555,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) h = hns3_get_handle(netdev); kinfo = &h->kinfo; + netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); + return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; } @@ -1593,6 +1600,10 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, struct hnae3_handle *h = hns3_get_handle(netdev); int ret = -EIO; + netif_dbg(h, drv, netdev, + "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n", + vf, vlan, qos, vlan_proto); + if (h->ae_algo->ops->set_vf_vlan_filter) ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, qos, vlan_proto); @@ -1611,6 +1622,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) if (!h->ae_algo->ops->set_mtu) return -EOPNOTSUPP; + netif_dbg(h, drv, netdev, + "change mtu from %u to %d\n", netdev->mtu, new_mtu); + ret = h->ae_algo->ops->set_mtu(h, new_mtu); if (ret) netdev_err(netdev, "failed to change MTU in hardware %d\n", @@ -1963,7 +1977,7 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) ops = ae_dev->ops; /* request the reset */ - if (ops->reset_event) { + if (ops->reset_event && ops->get_reset_level) { if (ae_dev->hw_err_reset_req) { reset_type = ops->get_reset_level(ae_dev, &ae_dev->hw_err_reset_req); @@ -2067,7 +2081,7 @@ static void hns3_set_default_feature(struct net_device *netdev) static int hns3_alloc_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { - unsigned int order = hnae3_page_order(ring); + unsigned int order = hns3_page_order(ring); struct page *p; p = dev_alloc_pages(order); @@ -2078,7 +2092,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, cb->page_offset = 0; cb->reuse_flag = 0; cb->buf = page_address(p); - cb->length = hnae3_page_size(ring); + cb->length = hns3_page_size(ring); cb->type = DESC_TYPE_PAGE; return 0; @@ -2381,7 +2395,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, { struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; int size = le16_to_cpu(desc->rx.size); - u32 truesize = hnae3_buf_size(ring); + u32 truesize = hns3_buf_size(ring); skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize); @@ -2396,7 +2410,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, /* Move offset up to the next cache line */ desc_cb->page_offset += truesize; - if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) { + if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { desc_cb->reuse_flag = 1; /* Bump ref count on page before it is given */ get_page(desc_cb->priv); @@ -2678,7 +2692,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, } if (ring->tail_skb) { - head_skb->truesize += hnae3_buf_size(ring); + head_skb->truesize += hns3_buf_size(ring); head_skb->data_len += le16_to_cpu(desc->rx.size); head_skb->len += le16_to_cpu(desc->rx.size); skb = ring->tail_skb; @@ -2895,24 +2909,22 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) { #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 - int recv_pkts, recv_bds, clean_count, err; int unused_count = hns3_desc_unused(ring); struct sk_buff *skb = ring->skb; - int num; + int recv_pkts = 0; + int recv_bds = 0; + int err, num; num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); rmb(); /* Make sure num taken effect before the other data is touched */ - recv_pkts = 0, recv_bds = 0, clean_count = 0; num -= unused_count; unused_count -= ring->pending_buf; while (recv_pkts < budget && recv_bds < num) { /* Reuse or realloc buffers */ - if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { - hns3_nic_alloc_rx_buffers(ring, - clean_count + unused_count); - clean_count = 0; + if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns3_nic_alloc_rx_buffers(ring, unused_count); unused_count = hns3_desc_unused(ring) - ring->pending_buf; } @@ -2926,7 +2938,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, goto out; } else if (unlikely(err)) { /* Do jump the err */ recv_bds += ring->pending_buf; - clean_count += ring->pending_buf; + unused_count += ring->pending_buf; ring->skb = NULL; ring->pending_buf = 0; continue; @@ -2934,7 +2946,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, rx_fn(ring, skb); recv_bds += ring->pending_buf; - clean_count += ring->pending_buf; + unused_count += ring->pending_buf; ring->skb = NULL; ring->pending_buf = 0; @@ -2943,8 +2955,8 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, out: /* Make all data has been write before submit */ - if (clean_count + unused_count > 0) - hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count); + if (unused_count > 0) + hns3_nic_alloc_rx_buffers(ring, unused_count); return recv_pkts; } @@ -3574,7 +3586,7 @@ out: return ret; } -static void hns3_fini_ring(struct hns3_enet_ring *ring) +void hns3_fini_ring(struct hns3_enet_ring *ring) { hns3_free_desc(ring); devm_kfree(ring_to_dev(ring), ring->desc_cb); @@ -4378,6 +4390,9 @@ int hns3_set_channels(struct net_device *netdev, u16 org_tqp_num; int ret; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (ch->rx_count || ch->tx_count) return -EINVAL; @@ -4392,6 +4407,10 @@ int hns3_set_channels(struct net_device *netdev, if (kinfo->rss_size == new_tqp_num) return 0; + netif_dbg(h, drv, netdev, + "set channels: tqp_num=%u, rxfh=%d\n", + new_tqp_num, rxfh_configured); + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 848b866761df..0a970f5fed10 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -75,7 +75,7 @@ enum hns3_nic_state { #define HNS3_TX_TIMEOUT (5 * HZ) #define HNS3_RING_NAME_LEN 16 #define HNS3_BUFFER_SIZE_2048 2048 -#define HNS3_RING_MAX_PENDING 32768 +#define HNS3_RING_MAX_PENDING 32760 #define HNS3_RING_MIN_PENDING 24 #define HNS3_RING_BD_MULTIPLE 8 /* max frame size of mac */ @@ -608,9 +608,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) -#define hnae3_buf_size(_ring) ((_ring)->buf_size) -#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring))) -#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring)) +#define hns3_buf_size(_ring) ((_ring)->buf_size) + +static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->buf_size > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring)) /* iterator for handling rings in ring group */ #define hns3_for_each_ring(pos, head) \ @@ -633,6 +642,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_nic_reset_all_ring(struct hnae3_handle *h); +void hns3_fini_ring(struct hns3_enet_ring *ring); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); bool hns3_is_phys_func(struct pci_dev *pdev); int hns3_clean_rx_ring( diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 5bff98a9b0dc..02f46c73ac3b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -311,6 +311,8 @@ static void hns3_self_test(struct net_device *ndev, if (eth_test->flags != ETH_TEST_FL_OFFLINE) return; + netif_dbg(h, drv, ndev, "self test start"); + st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; st_param[HNAE3_LOOP_APP][1] = h->flags & HNAE3_SUPPORT_APP_LOOPBACK; @@ -374,6 +376,8 @@ static void hns3_self_test(struct net_device *ndev, if (if_running) ndev->netdev_ops->ndo_open(ndev); + + netif_dbg(h, drv, ndev, "self test end\n"); } static int hns3_get_sset_count(struct net_device *netdev, int stringset) @@ -527,6 +531,7 @@ static void hns3_get_drvinfo(struct net_device *netdev, { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; + u32 fw_version; if (!h->ae_algo->ops->get_fw_version) { netdev_err(netdev, "could not get fw version!\n"); @@ -545,8 +550,18 @@ static void hns3_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->bus_info)); drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", - priv->ae_handle->ae_algo->ops->get_fw_version(h)); + fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%lu.%lu.%lu.%lu", + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); } static u32 hns3_get_link(struct net_device *netdev) @@ -593,6 +608,10 @@ static int hns3_set_pauseparam(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); + netif_dbg(h, drv, netdev, + "set pauseparam: autoneg=%u, rx:%u, tx:%u\n", + param->autoneg, param->rx_pause, param->tx_pause); + if (h->ae_algo->ops->set_pauseparam) return h->ae_algo->ops->set_pauseparam(h, param->autoneg, param->rx_pause, @@ -732,6 +751,11 @@ static int hns3_set_link_ksettings(struct net_device *netdev, if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) return -EINVAL; + netif_dbg(handle, drv, netdev, + "set link(%s): autoneg=%u, speed=%u, duplex=%u\n", + netdev->phydev ? "phy" : "mac", + cmd->base.autoneg, cmd->base.speed, cmd->base.duplex); + /* Only support ksettings_set for netdev with phy attached for now */ if (netdev->phydev) return phy_ethtool_ksettings_set(netdev->phydev, cmd); @@ -843,8 +867,8 @@ static int hns3_get_rxnfc(struct net_device *netdev, } } -static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, - u32 tx_desc_num, u32 rx_desc_num) +static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, + u32 tx_desc_num, u32 rx_desc_num) { struct hnae3_handle *h = priv->ae_handle; int i; @@ -857,21 +881,29 @@ static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num = rx_desc_num; } - - return hns3_init_all_ring(priv); } -static int hns3_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *param) +static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - bool if_running = netif_running(ndev); - u32 old_tx_desc_num, new_tx_desc_num; - u32 old_rx_desc_num, new_rx_desc_num; - int queue_num = h->kinfo.num_tqps; - int ret; + struct hnae3_handle *handle = priv->ae_handle; + struct hns3_enet_ring *tmp_rings; + int i; + + tmp_rings = kcalloc(handle->kinfo.num_tqps * 2, + sizeof(struct hns3_enet_ring), GFP_KERNEL); + if (!tmp_rings) + return NULL; + + for (i = 0; i < handle->kinfo.num_tqps * 2; i++) + memcpy(&tmp_rings[i], priv->ring_data[i].ring, + sizeof(struct hns3_enet_ring)); + + return tmp_rings; +} +static int hns3_check_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param) +{ if (hns3_nic_resetting(ndev)) return -EBUSY; @@ -887,6 +919,25 @@ static int hns3_set_ringparam(struct net_device *ndev, return -EINVAL; } + return 0; +} + +static int hns3_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_ring *tmp_rings; + bool if_running = netif_running(ndev); + u32 old_tx_desc_num, new_tx_desc_num; + u32 old_rx_desc_num, new_rx_desc_num; + u16 queue_num = h->kinfo.num_tqps; + int ret, i; + + ret = hns3_check_ringparam(ndev, param); + if (ret) + return ret; + /* Hardware requires that its descriptors must be multiple of eight */ new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); @@ -896,6 +947,13 @@ static int hns3_set_ringparam(struct net_device *ndev, old_rx_desc_num == new_rx_desc_num) return 0; + tmp_rings = hns3_backup_ringparam(priv); + if (!tmp_rings) { + netdev_err(ndev, + "backup ring param failed by allocating memory fail\n"); + return -ENOMEM; + } + netdev_info(ndev, "Changing Tx/Rx ring depth from %d/%d to %d/%d\n", old_tx_desc_num, old_rx_desc_num, @@ -904,22 +962,24 @@ static int hns3_set_ringparam(struct net_device *ndev, if (if_running) ndev->netdev_ops->ndo_stop(ndev); - ret = hns3_uninit_all_ring(priv); - if (ret) - return ret; - - ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num, - new_rx_desc_num); + hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); + ret = hns3_init_all_ring(priv); if (ret) { - ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num, - old_rx_desc_num); - if (ret) { - netdev_err(ndev, - "Revert to old bd num fail, ret=%d.\n", ret); - return ret; - } + netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n", + ret); + + hns3_change_all_ring_bd_num(priv, old_tx_desc_num, + old_rx_desc_num); + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + memcpy(priv->ring_data[i].ring, &tmp_rings[i], + sizeof(struct hns3_enet_ring)); + } else { + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + hns3_fini_ring(&tmp_rings[i]); } + kfree(tmp_rings); + if (if_running) ret = ndev->netdev_ops->ndo_open(ndev); @@ -973,6 +1033,9 @@ static int hns3_nway_reset(struct net_device *netdev) return -EINVAL; } + netif_dbg(handle, drv, netdev, + "nway reset (using %s)\n", phy ? "phy" : "mac"); + if (phy) return genphy_restart_aneg(phy); @@ -1297,6 +1360,9 @@ static int hns3_set_fecparam(struct net_device *netdev, if (!ops->set_fec) return -EOPNOTSUPP; fec_mode = eth_to_loc_fec(fec->fec); + + netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode); + return ops->set_fec(handle, fec_mode); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 22f6acd45d9a..ecf58cfd253d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -103,14 +103,17 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) dma_addr_t dma = ring->desc_dma_addr; struct hclge_dev *hdev = ring->dev; struct hclge_hw *hw = &hdev->hw; + u32 reg_val; if (ring->ring_type == HCLGE_TYPE_CSQ) { hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, upper_32_bits(dma)); - hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, - ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S); + reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG); + reg_val &= HCLGE_NIC_SW_RST_RDY; + reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S; + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); } else { @@ -383,6 +386,23 @@ err_csq: return ret; } +static int hclge_firmware_compat_config(struct hclge_dev *hdev) +{ + struct hclge_firmware_compat_cmd *req; + struct hclge_desc desc; + u32 compat = 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false); + + req = (struct hclge_firmware_compat_cmd *)desc.data; + + hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1); + hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1); + req->compat = cpu_to_le32(compat); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + int hclge_cmd_init(struct hclge_dev *hdev) { u32 version; @@ -419,7 +439,24 @@ int hclge_cmd_init(struct hclge_dev *hdev) } hdev->fw_version = version; - dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); + dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + /* ask the firmware to enable some features, driver can work without + * it. + */ + ret = hclge_firmware_compat_config(hdev); + if (ret) + dev_warn(&hdev->pdev->dev, + "Firmware compatible features not enabled(%d).\n", + ret); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 96840d8f3e24..dade20a37d40 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -86,6 +86,7 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_PF_RSRC = 0x0023, HCLGE_OPC_QUERY_VF_RSRC = 0x0024, HCLGE_OPC_GET_CFG_PARAM = 0x0025, + HCLGE_OPC_PF_RST_DONE = 0x0026, HCLGE_OPC_STATS_64_BIT = 0x0030, HCLGE_OPC_STATS_32_BIT = 0x0031, @@ -257,6 +258,7 @@ enum hclge_opcode_type { /* M7 stats command */ HCLGE_OPC_M7_STATS_BD = 0x7012, HCLGE_OPC_M7_STATS_INFO = 0x7013, + HCLGE_OPC_M7_COMPAT_CFG = 0x701A, /* SFP command */ HCLGE_OPC_GET_SFP_INFO = 0x7104, @@ -827,7 +829,7 @@ struct hclge_mac_ethertype_idx_rd_cmd { u8 flags; u8 resp_code; __le16 vlan_tag; - u8 mac_add[6]; + u8 mac_addr[6]; __le16 index; __le16 ethter_type; __le16 egress_port; @@ -877,6 +879,13 @@ struct hclge_reset_cmd { u8 rsv[22]; }; +#define HCLGE_PF_RESET_DONE_BIT BIT(0) + +struct hclge_pf_rst_done_cmd { + u8 pf_rst_done; + u8 rsv[23]; +}; + #define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) #define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2) #define HCLGE_CMD_SERDES_DONE_B BIT(0) @@ -906,8 +915,11 @@ struct hclge_serdes_lb_cmd { #define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 #define HCLGE_NIC_CRQ_TAIL_REG 0x27024 #define HCLGE_NIC_CRQ_HEAD_REG 0x27028 -#define HCLGE_NIC_CMQ_EN_B 16 -#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B) + +/* this bit indicates that the driver is ready for hardware reset */ +#define HCLGE_NIC_SW_RST_RDY_B 16 +#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B) + #define HCLGE_NIC_CMQ_DESC_NUM 1024 #define HCLGE_NIC_CMQ_DESC_NUM_S 3 @@ -1009,6 +1021,13 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd { u8 rsv[4]; }; +#define HCLGE_LINK_EVENT_REPORT_EN_B 0 +#define HCLGE_NCSI_ERROR_REPORT_EN_B 1 +struct hclge_firmware_compat_cmd { + __le32 compat; + u8 rsv[20]; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index bac4ce13f6ae..814e0f076e32 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -201,6 +201,7 @@ static int hclge_client_setup_tc(struct hclge_dev *hdev) static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) { struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; struct hclge_dev *hdev = vport->back; bool map_changed = false; u8 num_tc = 0; @@ -215,6 +216,8 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) return ret; if (map_changed) { + netif_dbg(h, drv, netdev, "set ets\n"); + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); if (ret) return ret; @@ -300,6 +303,7 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) { struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; struct hclge_dev *hdev = vport->back; u8 i, j, pfc_map, *prio_tc; @@ -325,6 +329,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) hdev->tm_info.hw_pfc_map = pfc_map; hdev->tm_info.pfc_en = pfc->pfc_en; + netif_dbg(h, drv, netdev, + "set pfc: pfc_en=%u, pfc_map=%u, num_tc=%u\n", + pfc->pfc_en, pfc_map, hdev->tm_info.num_tc); + hclge_tm_pfc_info_update(hdev); return hclge_pause_setup_hw(hdev, false); @@ -345,8 +353,11 @@ static u8 hclge_getdcbx(struct hnae3_handle *h) static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) { struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; struct hclge_dev *hdev = vport->back; + netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode); + /* No support for LLD_MANAGED modes or CEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || (mode & DCB_CAP_DCBX_VER_CEE) || diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index ab625c757a95..f16bfc67412a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -325,6 +325,12 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev) struct hclge_desc desc; int i, ret; + if (!hnae3_dev_dcb_supported(hdev)) { + dev_info(&hdev->pdev->dev, + "Only DCB-supported dev supports tc\n"); + return; + } + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -409,6 +415,12 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]); + if (!hnae3_dev_dcb_supported(hdev)) { + dev_info(&hdev->pdev->dev, + "Only DCB-supported dev supports tm mapping\n"); + return; + } + cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; hclge_cmd_setup_basic_desc(&desc, cmd, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -590,6 +602,12 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n", queue_id, qset_id, pri_id, tc_id); + if (!hnae3_dev_dcb_supported(hdev)) { + dev_info(&hdev->pdev->dev, + "Only DCB-supported dev supports tm mapping\n"); + return; + } + cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; for (group_id = 0; group_id < 32; group_id++) { @@ -715,6 +733,34 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", rx_buf_cmd->shared_buf); + cmd = HCLGE_OPC_RX_COM_WL_ALLOC; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; + dev_info(&hdev->pdev->dev, "\n"); + dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", + rx_com_wl->com_wl.high, rx_com_wl->com_wl.low); + + cmd = HCLGE_OPC_RX_GBL_PKT_CNT; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; + dev_info(&hdev->pdev->dev, + "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", + rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low); + dev_info(&hdev->pdev->dev, "\n"); + + if (!hnae3_dev_dcb_supported(hdev)) { + dev_info(&hdev->pdev->dev, + "Only DCB-supported dev supports rx priv wl\n"); + return; + } cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC; hclge_cmd_setup_basic_desc(&desc[0], cmd, true); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); @@ -723,7 +769,6 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) if (ret) goto err_qos_cmd_send; - dev_info(&hdev->pdev->dev, "\n"); rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) dev_info(&hdev->pdev->dev, @@ -758,29 +803,6 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4, rx_com_thrd->com_thrd[i].high, rx_com_thrd->com_thrd[i].low); - - cmd = HCLGE_OPC_RX_COM_WL_ALLOC; - hclge_cmd_setup_basic_desc(desc, cmd, true); - ret = hclge_cmd_send(&hdev->hw, desc, 1); - if (ret) - goto err_qos_cmd_send; - - rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; - dev_info(&hdev->pdev->dev, "\n"); - dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", - rx_com_wl->com_wl.high, rx_com_wl->com_wl.low); - - cmd = HCLGE_OPC_RX_GBL_PKT_CNT; - hclge_cmd_setup_basic_desc(desc, cmd, true); - ret = hclge_cmd_send(&hdev->hw, desc, 1); - if (ret) - goto err_qos_cmd_send; - - rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; - dev_info(&hdev->pdev->dev, - "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", - rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low); - return; err_qos_cmd_send: @@ -825,9 +847,9 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); snprintf(printf_buf, HCLGE_DBG_BUF_LEN, "%02u |%02x:%02x:%02x:%02x:%02x:%02x|", - req0->index, req0->mac_add[0], req0->mac_add[1], - req0->mac_add[2], req0->mac_add[3], req0->mac_add[4], - req0->mac_add[5]); + req0->index, req0->mac_addr[0], req0->mac_addr[1], + req0->mac_addr[2], req0->mac_addr[3], + req0->mac_addr[4], req0->mac_addr[5]); snprintf(printf_buf + strlen(printf_buf), HCLGE_DBG_BUF_LEN - strlen(printf_buf), diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 0a7243825e7b..05a4cdbf903a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -652,16 +652,11 @@ static void hclge_log_error(struct device *dev, char *reg, * @desc: descriptor for describing the command * @cmd: command opcode * @flag: flag for extended command structure - * @w_num: offset for setting the read interrupt type. - * @int_type: select which type of the interrupt for which the error - * info will be read(RAS-CE/RAS-NFE/RAS-FE etc). * * This function query the error info from hw register/s using command */ static int hclge_cmd_query_error(struct hclge_dev *hdev, - struct hclge_desc *desc, u32 cmd, - u16 flag, u8 w_num, - enum hclge_err_int_type int_type) + struct hclge_desc *desc, u32 cmd, u16 flag) { struct device *dev = &hdev->pdev->dev; int desc_num = 1; @@ -673,8 +668,6 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, hclge_cmd_setup_basic_desc(&desc[1], cmd, true); desc_num = 2; } - if (w_num) - desc[0].data[w_num] = cpu_to_le32(int_type); ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); if (ret) @@ -872,8 +865,7 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en) } /* configure TM QCN hw errors */ - ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, - 0, 0, 0); + ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0); if (ret) { dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret); return ret; @@ -1410,7 +1402,7 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev) ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD, - HCLGE_CMD_FLAG_NEXT, 0, 0); + HCLGE_CMD_FLAG_NEXT); if (ret) { dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret); return ret; @@ -1434,7 +1426,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) /* read overflow error status */ ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD, - 0, 0, 0); + 0); if (ret) { dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); return ret; @@ -1483,8 +1475,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) /* read RAS error interrupt status */ ret = hclge_cmd_query_error(hdev, &desc[0], - HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, - 0, 0, 0); + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0); if (ret) { dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); /* reset everything for now */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 3fde5471e1c0..b7399f5ba2c3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1270,6 +1270,12 @@ static int hclge_configure(struct hclge_dev *hdev) hclge_init_kdump_kernel_config(hdev); + /* Set the init affinity based on pci func number */ + i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); + i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; + cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), + &hdev->affinity_mask); + return ret; } @@ -2499,22 +2505,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) && !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) - schedule_work(&hdev->mbx_service_task); + queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq, + &hdev->mbx_service_task); } static void hclge_reset_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) - schedule_work(&hdev->rst_service_task); + queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq, + &hdev->rst_service_task); } -static void hclge_task_schedule(struct hclge_dev *hdev) +void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) { if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && - !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) - (void)schedule_work(&hdev->service_task); + !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) { + hdev->hw_stats.stats_timer++; + hdev->fd_arfs_expire_timer++; + mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), + system_wq, &hdev->service_task, + delay_time); + } } static int hclge_get_mac_link_status(struct hclge_dev *hdev) @@ -2729,25 +2742,6 @@ static int hclge_get_status(struct hnae3_handle *handle) return hdev->hw.mac.link; } -static void hclge_service_timer(struct timer_list *t) -{ - struct hclge_dev *hdev = from_timer(hdev, t, service_timer); - - mod_timer(&hdev->service_timer, jiffies + HZ); - hdev->hw_stats.stats_timer++; - hdev->fd_arfs_expire_timer++; - hclge_task_schedule(hdev); -} - -static void hclge_service_complete(struct hclge_dev *hdev) -{ - WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); - - /* Flush memory before next watchdog */ - smp_mb__before_atomic(); - clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); -} - static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) { u32 rst_src_reg, cmdq_src_reg, msix_src_reg; @@ -2882,10 +2876,15 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) break; } - /* clear the source of interrupt if it is not cause by reset */ + hclge_clear_event_cause(hdev, event_cause, clearval); + + /* Enable interrupt if it is not cause by reset. And when + * clearval equal to 0, it means interrupt status may be + * cleared by hardware before driver reads status register. + * For this case, vector0 interrupt also should be enabled. + */ if (!clearval || event_cause == HCLGE_VECTOR0_EVENT_MBX) { - hclge_clear_event_cause(hdev, event_cause, clearval); hclge_enable_vector(&hdev->misc_vector, true); } @@ -2918,6 +2917,36 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) hdev->num_msi_used += 1; } +static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct hclge_dev *hdev = container_of(notify, struct hclge_dev, + affinity_notify); + + cpumask_copy(&hdev->affinity_mask, mask); +} + +static void hclge_irq_affinity_release(struct kref *ref) +{ +} + +static void hclge_misc_affinity_setup(struct hclge_dev *hdev) +{ + irq_set_affinity_hint(hdev->misc_vector.vector_irq, + &hdev->affinity_mask); + + hdev->affinity_notify.notify = hclge_irq_affinity_notify; + hdev->affinity_notify.release = hclge_irq_affinity_release; + irq_set_affinity_notifier(hdev->misc_vector.vector_irq, + &hdev->affinity_notify); +} + +static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) +{ + irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL); + irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); +} + static int hclge_misc_irq_init(struct hclge_dev *hdev) { int ret; @@ -3229,7 +3258,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) if (!clearval) return; - hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); + /* For revision 0x20, the reset interrupt source + * can only be cleared after hardware reset done + */ + if (hdev->pdev->revision == 0x20) + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, + clearval); + hclge_enable_vector(&hdev->misc_vector, true); } @@ -3250,6 +3285,19 @@ static int hclge_reset_prepare_down(struct hclge_dev *hdev) return ret; } +static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) +{ + u32 reg_val; + + reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG); + if (enable) + reg_val |= HCLGE_NIC_SW_RST_RDY; + else + reg_val &= ~HCLGE_NIC_SW_RST_RDY; + + hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); +} + static int hclge_reset_prepare_wait(struct hclge_dev *hdev) { #define HCLGE_RESET_SYNC_TIME 100 @@ -3298,14 +3346,13 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) /* inform hardware that preparatory work is done */ msleep(HCLGE_RESET_SYNC_TIME); - hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, - HCLGE_NIC_CMQ_ENABLE); + hclge_reset_handshake(hdev, true); dev_info(&hdev->pdev->dev, "prepare wait ok\n"); return ret; } -static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) +static bool hclge_reset_err_handle(struct hclge_dev *hdev) { #define MAX_RESET_FAIL_CNT 5 @@ -3322,27 +3369,34 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) return false; } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { hdev->reset_fail_cnt++; - if (is_timeout) { - set_bit(hdev->reset_type, &hdev->reset_pending); - dev_info(&hdev->pdev->dev, - "re-schedule to wait for hw reset done\n"); - return true; - } - - dev_info(&hdev->pdev->dev, "Upgrade reset level\n"); - hclge_clear_reset_cause(hdev); - set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request); - mod_timer(&hdev->reset_timer, - jiffies + HCLGE_RESET_INTERVAL); - - return false; + set_bit(hdev->reset_type, &hdev->reset_pending); + dev_info(&hdev->pdev->dev, + "re-schedule reset task(%d)\n", + hdev->reset_fail_cnt); + return true; } hclge_clear_reset_cause(hdev); + + /* recover the handshake status when reset fail */ + hclge_reset_handshake(hdev, true); + dev_err(&hdev->pdev->dev, "Reset fail!\n"); return false; } +static int hclge_set_rst_done(struct hclge_dev *hdev) +{ + struct hclge_pf_rst_done_cmd *req; + struct hclge_desc desc; + + req = (struct hclge_pf_rst_done_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); + req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_reset_prepare_up(struct hclge_dev *hdev) { int ret = 0; @@ -3353,10 +3407,18 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev) case HNAE3_FLR_RESET: ret = hclge_set_all_vf_rst(hdev, false); break; + case HNAE3_GLOBAL_RESET: + /* fall through */ + case HNAE3_IMP_RESET: + ret = hclge_set_rst_done(hdev); + break; default: break; } + /* clear up the handshake status after re-initialize done */ + hclge_reset_handshake(hdev, false); + return ret; } @@ -3382,7 +3444,6 @@ static int hclge_reset_stack(struct hclge_dev *hdev) static void hclge_reset(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - bool is_timeout = false; int ret; /* Initialize ae_dev reset status as well, in case enet layer wants to @@ -3410,10 +3471,8 @@ static void hclge_reset(struct hclge_dev *hdev) if (ret) goto err_reset; - if (hclge_reset_wait(hdev)) { - is_timeout = true; + if (hclge_reset_wait(hdev)) goto err_reset; - } hdev->rst_stats.hw_reset_done_cnt++; @@ -3458,14 +3517,22 @@ static void hclge_reset(struct hclge_dev *hdev) hdev->reset_fail_cnt = 0; hdev->rst_stats.reset_done_cnt++; ae_dev->reset_type = HNAE3_NONE_RESET; - del_timer(&hdev->reset_timer); + + /* if default_reset_request has a higher level reset request, + * it should be handled as soon as possible. since some errors + * need this kind of reset to fix. + */ + hdev->reset_level = hclge_get_reset_level(ae_dev, + &hdev->default_reset_request); + if (hdev->reset_level != HNAE3_NONE_RESET) + set_bit(hdev->reset_level, &hdev->reset_request); return; err_reset_lock: rtnl_unlock(); err_reset: - if (hclge_reset_err_handle(hdev, is_timeout)) + if (hclge_reset_err_handle(hdev)) hclge_reset_task_schedule(hdev); } @@ -3493,9 +3560,10 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) handle = &hdev->vport[0].nic; if (time_before(jiffies, (hdev->last_reset_time + - HCLGE_RESET_INTERVAL))) + HCLGE_RESET_INTERVAL))) { + mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); return; - else if (hdev->default_reset_request) + } else if (hdev->default_reset_request) hdev->reset_level = hclge_get_reset_level(ae_dev, &hdev->default_reset_request); @@ -3525,6 +3593,12 @@ static void hclge_reset_timer(struct timer_list *t) { struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); + /* if default_reset_request has no value, it means that this reset + * request has already be handled, so just return here + */ + if (!hdev->default_reset_request) + return; + dev_info(&hdev->pdev->dev, "triggering reset in reset timer\n"); hclge_reset_event(hdev->pdev, NULL); @@ -3606,7 +3680,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev) static void hclge_service_task(struct work_struct *work) { struct hclge_dev *hdev = - container_of(work, struct hclge_dev, service_task); + container_of(work, struct hclge_dev, service_task.work); + + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { hclge_update_stats_for_all(hdev); @@ -3621,7 +3697,8 @@ static void hclge_service_task(struct work_struct *work) hclge_rfs_filter_expire(hdev); hdev->fd_arfs_expire_timer = 0; } - hclge_service_complete(hdev); + + hclge_task_schedule(hdev, round_jiffies_relative(HZ)); } struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) @@ -5808,7 +5885,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, return -ENOSPC; } - rule = kzalloc(sizeof(*rule), GFP_KERNEL); + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); if (!rule) { spin_unlock_bh(&hdev->fd_rule_lock); @@ -6160,10 +6237,13 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) struct hclge_dev *hdev = vport->back; if (enable) { - mod_timer(&hdev->service_timer, jiffies + HZ); + hclge_task_schedule(hdev, round_jiffies_relative(HZ)); } else { - del_timer_sync(&hdev->service_timer); - cancel_work_sync(&hdev->service_task); + /* Set the DOWN flag here to disable the service to be + * scheduled again + */ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + cancel_delayed_work_sync(&hdev->service_task); clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); } } @@ -6202,6 +6282,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle) if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && hdev->reset_type != HNAE3_FUNC_RESET) { hclge_mac_stop_phy(hdev); + hclge_update_link_status(hdev); return; } @@ -6249,7 +6330,6 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, enum hclge_mac_vlan_tbl_opcode op) { struct hclge_dev *hdev = vport->back; - int return_status = -EIO; if (cmdq_resp) { dev_err(&hdev->pdev->dev, @@ -6260,52 +6340,53 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if (op == HCLGE_MAC_VLAN_ADD) { if ((!resp_code) || (resp_code == 1)) { - return_status = 0; + return 0; } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) { - return_status = -ENOSPC; dev_err(&hdev->pdev->dev, "add mac addr failed for uc_overflow.\n"); + return -ENOSPC; } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) { - return_status = -ENOSPC; dev_err(&hdev->pdev->dev, "add mac addr failed for mc_overflow.\n"); - } else { - dev_err(&hdev->pdev->dev, - "add mac addr failed for undefined, code=%d.\n", - resp_code); + return -ENOSPC; } + + dev_err(&hdev->pdev->dev, + "add mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; } else if (op == HCLGE_MAC_VLAN_REMOVE) { if (!resp_code) { - return_status = 0; + return 0; } else if (resp_code == 1) { - return_status = -ENOENT; dev_dbg(&hdev->pdev->dev, "remove mac addr failed for miss.\n"); - } else { - dev_err(&hdev->pdev->dev, - "remove mac addr failed for undefined, code=%d.\n", - resp_code); + return -ENOENT; } + + dev_err(&hdev->pdev->dev, + "remove mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; } else if (op == HCLGE_MAC_VLAN_LKUP) { if (!resp_code) { - return_status = 0; + return 0; } else if (resp_code == 1) { - return_status = -ENOENT; dev_dbg(&hdev->pdev->dev, "lookup mac addr failed for miss.\n"); - } else { - dev_err(&hdev->pdev->dev, - "lookup mac addr failed for undefined, code=%d.\n", - resp_code); + return -ENOENT; } - } else { - return_status = -EINVAL; + dev_err(&hdev->pdev->dev, - "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", - op); + "lookup mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; } - return return_status; + dev_err(&hdev->pdev->dev, + "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op); + + return -EINVAL; } static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) @@ -8602,12 +8683,10 @@ static void hclge_state_uninit(struct hclge_dev *hdev) set_bit(HCLGE_STATE_DOWN, &hdev->state); set_bit(HCLGE_STATE_REMOVING, &hdev->state); - if (hdev->service_timer.function) - del_timer_sync(&hdev->service_timer); if (hdev->reset_timer.function) del_timer_sync(&hdev->reset_timer); - if (hdev->service_task.func) - cancel_work_sync(&hdev->service_task); + if (hdev->service_task.work.func) + cancel_delayed_work_sync(&hdev->service_task); if (hdev->rst_service_task.func) cancel_work_sync(&hdev->rst_service_task); if (hdev->mbx_service_task.func) @@ -8812,12 +8891,16 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_dcb_ops_set(hdev); - timer_setup(&hdev->service_timer, hclge_service_timer, 0); timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); - INIT_WORK(&hdev->service_task, hclge_service_task); + INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); + /* Setup affinity after service timer setup because add_timer_on + * is called in affinity notify. + */ + hclge_misc_affinity_setup(hdev); + hclge_clear_all_event_cause(hdev); hclge_clear_resetting_state(hdev); @@ -8842,7 +8925,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_state_init(hdev); hdev->last_reset_time = jiffies; - pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); + dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", + HCLGE_DRIVER_NAME); + return 0; err_mdiobus_unreg: @@ -8979,6 +9064,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) struct hclge_dev *hdev = ae_dev->priv; struct hclge_mac *mac = &hdev->hw.mac; + hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); if (mac->phydev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 6a12285f4c76..c9b9867fc226 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -302,6 +302,13 @@ enum hclge_fc_mode { HCLGE_FC_DEFAULT }; +enum hclge_link_fail_code { + HCLGE_LF_NORMAL, + HCLGE_LF_REF_CLOCK_LOST, + HCLGE_LF_XSFP_TX_DISABLE, + HCLGE_LF_XSFP_ABSENT, +}; + #define HCLGE_PG_NUM 4 #define HCLGE_SCH_MODE_SP 0 #define HCLGE_SCH_MODE_DWRR 1 @@ -806,9 +813,8 @@ struct hclge_dev { u16 adminq_work_limit; /* Num of admin receive queue desc to process */ unsigned long service_timer_period; unsigned long service_timer_previous; - struct timer_list service_timer; struct timer_list reset_timer; - struct work_struct service_task; + struct delayed_work service_task; struct work_struct rst_service_task; struct work_struct mbx_service_task; @@ -864,6 +870,10 @@ struct hclge_dev { DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); + + /* affinity mask and notify for misc interrupt */ + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; }; /* VPort level vlan tag configuration for TX direction */ @@ -1018,4 +1028,5 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, u16 state, u16 vlan_tag, u16 qos, u16 vlan_proto); +void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 690b9990215c..5a7221ee6bb9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -545,6 +545,36 @@ static int hclge_get_rss_key(struct hclge_vport *vport, HCLGE_RSS_MBX_RESP_LEN); } +static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) +{ + switch (link_fail_code) { + case HCLGE_LF_REF_CLOCK_LOST: + dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); + break; + case HCLGE_LF_XSFP_TX_DISABLE: + dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); + break; + case HCLGE_LF_XSFP_ABSENT: + dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); + break; + default: + break; + } +} + +static void hclge_handle_link_change_event(struct hclge_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req) +{ +#define LINK_STATUS_OFFSET 1 +#define LINK_FAIL_CODE_OFFSET 2 + + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); + hclge_task_schedule(hdev, 0); + + if (!req->msg[LINK_STATUS_OFFSET]) + hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]); +} + static bool hclge_cmd_crq_empty(struct hclge_hw *hw) { u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); @@ -552,6 +582,15 @@ static bool hclge_cmd_crq_empty(struct hclge_hw *hw) return tail == hw->cmq.crq.next_to_use; } +static void hclge_handle_ncsi_error(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + + ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); + dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); + ae_dev->ops->reset_event(hdev->pdev, NULL); +} + void hclge_mbx_handler(struct hclge_dev *hdev) { struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; @@ -707,6 +746,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "PF fail(%d) to media type for VF\n", ret); break; + case HCLGE_MBX_PUSH_LINK_STATUS: + hclge_handle_link_change_event(hdev, req); + break; + case HCLGE_MBX_NCSI_ERROR: + hclge_handle_ncsi_error(hdev); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 3f41fa2bc414..f30d1126d378 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -650,12 +650,8 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev) } } -static int hclge_tm_schd_info_init(struct hclge_dev *hdev) +static void hclge_tm_schd_info_init(struct hclge_dev *hdev) { - if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && - (hdev->tm_info.num_pg != 1)) - return -EINVAL; - hclge_tm_pg_info_init(hdev); hclge_tm_tc_info_init(hdev); @@ -663,8 +659,6 @@ static int hclge_tm_schd_info_init(struct hclge_dev *hdev) hclge_tm_vport_info_update(hdev); hclge_pfc_info_init(hdev); - - return 0; } static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) @@ -1428,15 +1422,15 @@ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) int hclge_tm_schd_init(struct hclge_dev *hdev) { - int ret; - /* fc_mode is HCLGE_FC_FULL on reset */ hdev->tm_info.fc_mode = HCLGE_FC_FULL; hdev->fc_mode_last_time = hdev->tm_info.fc_mode; - ret = hclge_tm_schd_info_init(hdev); - if (ret) - return ret; + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && + hdev->tm_info.num_pg != 1) + return -EINVAL; + + hclge_tm_schd_info_init(hdev); return hclge_tm_init_hw(hdev, true); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 652b796044e3..55d3c784f2d4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -97,7 +97,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); - reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG); + reg_val &= HCLGEVF_NIC_SW_RST_RDY; + reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); @@ -405,7 +407,15 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) } hdev->fw_version = version; - dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); + dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 127a434a56f3..f830eef02e5c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -244,8 +244,11 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { #define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 #define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 #define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 -#define HCLGEVF_NIC_CMQ_EN_B 16 -#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B) + +/* this bit indicates that the driver is ready for hardware reset */ +#define HCLGEVF_NIC_SW_RST_RDY_B 16 +#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B) + #define HCLGEVF_NIC_CMQ_DESC_NUM 1024 #define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 #define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index a13a0e101c3b..ce82b2b0f8f5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1396,19 +1396,22 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) u32 val; int ret; - /* wait to check the hardware reset completion status */ - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); - dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); - if (hdev->reset_type == HNAE3_FLR_RESET) return hclgevf_flr_poll_timeout(hdev, HCLGEVF_RESET_WAIT_US, HCLGEVF_RESET_WAIT_CNT); - - ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, - !(val & HCLGEVF_RST_ING_BITS), - HCLGEVF_RESET_WAIT_US, - HCLGEVF_RESET_WAIT_TIMEOUT_US); + else if (hdev->reset_type == HNAE3_VF_RESET) + ret = readl_poll_timeout(hdev->hw.io_base + + HCLGEVF_VF_RST_ING, val, + !(val & HCLGEVF_VF_RST_ING_BIT), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); + else + ret = readl_poll_timeout(hdev->hw.io_base + + HCLGEVF_RST_ING, val, + !(val & HCLGEVF_RST_ING_BITS), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); /* hardware completion status should be available by this time */ if (ret) { @@ -1426,6 +1429,20 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) return 0; } +static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) +{ + u32 reg_val; + + reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); + if (enable) + reg_val |= HCLGEVF_NIC_SW_RST_RDY; + else + reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; + + hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, + reg_val); +} + static int hclgevf_reset_stack(struct hclgevf_dev *hdev) { int ret; @@ -1448,7 +1465,14 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) if (ret) return ret; - return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); + if (ret) + return ret; + + /* clear handshake status with IMP */ + hclgevf_reset_handshake(hdev, false); + + return 0; } static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) @@ -1474,8 +1498,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); /* inform hardware that preparatory work is done */ msleep(HCLGEVF_RESET_SYNC_TIME); - hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, - HCLGEVF_NIC_CMQ_ENABLE); + hclgevf_reset_handshake(hdev, true); dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", hdev->reset_type, ret); @@ -1484,6 +1507,8 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) { + /* recover handshake status with IMP when reset fail */ + hclgevf_reset_handshake(hdev, true); hdev->rst_stats.rst_fail_cnt++; dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n", hdev->rst_stats.rst_fail_cnt); @@ -1494,9 +1519,6 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) if (hclgevf_is_reset_pending(hdev)) { set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); - } else { - hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, - HCLGEVF_NIC_CMQ_ENABLE); } } @@ -1867,7 +1889,7 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, u32 *clearval) { - u32 cmdq_src_reg, rst_ing_reg; + u32 val, cmdq_src_reg, rst_ing_reg; /* fetch the events from their corresponding regs */ cmdq_src_reg = hclgevf_read_dev(&hdev->hw, @@ -1883,6 +1905,12 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); *clearval = cmdq_src_reg; hdev->rst_stats.vf_rst_cnt++; + /* set up VF hardware reset status, its PF will clear + * this status when PF has initialized done. + */ + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); + hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, + val | HCLGEVF_VF_RST_ING_BIT); return HCLGEVF_VECTOR0_EVENT_RST; } @@ -2695,7 +2723,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) } hdev->last_reset_time = jiffies; - pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); + dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", + HCLGEVF_DRIVER_NAME); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 5a9e30998a8f..f0736b060884 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -103,6 +103,9 @@ (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \ HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT) +#define HCLGEVF_VF_RST_ING 0x07008 +#define HCLGEVF_VF_RST_ING_BIT BIT(16) + #define HCLGEVF_RSS_IND_TBL_SIZE 512 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff #define HCLGEVF_RSS_KEY_SIZE 40 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 9c78251f9c39..0e13d1c7e474 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -136,7 +136,7 @@ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma_addr; int i, j; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 395dde444483..9e43c9ace9c2 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1549,7 +1549,7 @@ emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) ctrl); /* skb fragments */ for (i = 0; i < nr_frags; ++i) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF)) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3da680073265..81a05ea38237 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1485,7 +1485,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) memcpy(dst + cur, page_address(skb_frag_page(frag)) + - frag->page_offset, skb_frag_size(frag)); + skb_frag_off(frag), skb_frag_size(frag)); cur += skb_frag_size(frag); } } else { diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index f703fa58458e..6b6ba1c38235 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -2889,9 +2889,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; - frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); offset = 0; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 395b05701480..a1fab77b2096 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) else phy_reg |= 0xFA; e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg); + + if (speed == SPEED_1000) { + hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, + &phy_reg); + + phy_reg |= HV_PM_CTRL_K1_CLK_REQ; + + hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, + phy_reg); + } } hw->phy.ops.release(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index eb09c755fa17..1502895eb45d 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -210,7 +210,7 @@ /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) -#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_CLK_REQ 0x200 #define HV_PM_CTRL_K1_ENABLE 0x4000 #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e4baa13b3cda..8a3f035c3a5f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5579,9 +5579,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; - frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); offset = 0; @@ -6297,7 +6296,7 @@ fl_out: static int e1000e_pm_freeze(struct device *dev) { - struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); @@ -6630,7 +6629,7 @@ static int __e1000_resume(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int e1000e_pm_thaw(struct device *dev) { - struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); e1000e_set_interrupt_capability(adapter); @@ -6679,8 +6678,7 @@ static int e1000e_pm_resume(struct device *dev) static int e1000e_pm_runtime_idle(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); u16 eee_lp; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 7d42582ed48d..b14441944b4b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #ifndef _FM10K_H_ #define _FM10K_H_ @@ -177,14 +177,10 @@ static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) #define MIN_Q_VECTORS 1 enum fm10k_non_q_vectors { FM10K_MBX_VECTOR, -#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF - NON_Q_VECTORS_PF + NON_Q_VECTORS }; -#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \ - NON_Q_VECTORS_PF : \ - NON_Q_VECTORS_VF) -#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw)) +#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS) struct fm10k_q_vector { struct fm10k_intfc *interface; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index 20768ac7f17e..c45315472245 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k.h" @@ -36,7 +36,7 @@ static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) { u8 num_tc = 0; - int i, err; + int i; /* verify type and determine num_tcs needed */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { @@ -57,7 +57,7 @@ static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) /* update TC hardware mapping if necessary */ if (num_tc != netdev_get_num_tc(dev)) { - err = fm10k_setup_tc(dev, num_tc); + int err = fm10k_setup_tc(dev, num_tc); if (err) return err; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 4895dd83dd08..c681d2d28107 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include <linux/vmalloc.h> @@ -222,7 +222,6 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, const unsigned int size) { unsigned int i; - char *p; if (!pointer) { /* memory is not zero allocated so we have to clear it */ @@ -232,7 +231,7 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, } for (i = 0; i < size; i++) { - p = (char *)pointer + stats[i].stat_offset; + char *p = (char *)pointer + stats[i].stat_offset; switch (stats[i].sizeof_stat) { case sizeof(u64): @@ -651,7 +650,6 @@ static int fm10k_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_q_vector *qv; u16 tx_itr, rx_itr; int i; @@ -677,7 +675,8 @@ static int fm10k_set_coalesce(struct net_device *dev, /* update q_vectors */ for (i = 0; i < interface->num_q_vectors; i++) { - qv = interface->q_vector[i]; + struct fm10k_q_vector *qv = interface->q_vector[i]; + qv->tx.itr = tx_itr; qv->rx.itr = rx_itr; } @@ -1115,13 +1114,12 @@ static void fm10k_get_channels(struct net_device *dev, struct ethtool_channels *ch) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_hw *hw = &interface->hw; /* report maximum channels */ ch->max_combined = fm10k_max_channels(dev); /* report info for other vector */ - ch->max_other = NON_Q_VECTORS(hw); + ch->max_other = NON_Q_VECTORS; ch->other_count = ch->max_other; /* record RSS queues */ @@ -1133,14 +1131,13 @@ static int fm10k_set_channels(struct net_device *dev, { struct fm10k_intfc *interface = netdev_priv(dev); unsigned int count = ch->combined_count; - struct fm10k_hw *hw = &interface->hw; /* verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) return -EINVAL; /* verify other_count has not changed */ - if (ch->other_count != NON_Q_VECTORS(hw)) + if (ch->other_count != NON_Q_VECTORS) return -EINVAL; /* verify the number of channels does not exceed hardware limits */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 8de77155f2e7..afe1fafd2447 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k.h" #include "fm10k_vf.h" @@ -426,7 +426,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) struct fm10k_iov_data *iov_data = interface->iov_data; struct fm10k_hw *hw = &interface->hw; size_t size; - int i, err; + int i; /* return error if iov_data is already populated */ if (iov_data) @@ -452,6 +452,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) /* loop through vf_info structures initializing each entry */ for (i = 0; i < num_vfs; i++) { struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + int err; /* Record VF VSI value */ vf_info->vsi = i + 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 90270b4a1682..e0a2be534b20 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include <linux/types.h> #include <linux/module.h> @@ -17,7 +17,7 @@ const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = - "Copyright(c) 2013 - 2018 Intel Corporation."; + "Copyright(c) 2013 - 2019 Intel Corporation."; MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); @@ -315,7 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, /* prefetch first cache line of first page */ prefetch(page_addr); #if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); + prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES)); #endif /* allocate a skb to store the frags */ @@ -946,7 +946,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring, struct sk_buff *skb = first->skb; struct fm10k_tx_buffer *tx_buffer; struct fm10k_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned char *data; dma_addr_t dma; unsigned int data_len, size; @@ -1074,7 +1074,8 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; @@ -1823,7 +1824,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) v_budget = min_t(u16, v_budget, num_online_cpus()); /* account for vectors not related to queues */ - v_budget += NON_Q_VECTORS(hw); + v_budget += NON_Q_VECTORS; /* At the same time, hardware can only support a maximum of * hw.mac->max_msix_vectors vectors. With features @@ -1855,7 +1856,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) } /* record the number of queues available for q_vectors */ - interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); + interface->num_q_vectors = v_budget - NON_Q_VECTORS; return 0; } @@ -1869,7 +1870,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) { struct net_device *dev = interface->netdev; - int pc, offset, rss_i, i, q_idx; + int pc, offset, rss_i, i; u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; u8 num_pcs = netdev_get_num_tc(dev); @@ -1879,7 +1880,8 @@ static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) rss_i = interface->ring_feature[RING_F_RSS].indices; for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { - q_idx = pc; + int q_idx = pc; + for (i = 0; i < rss_i; i++) { interface->tx_ring[offset + i]->reg_idx = q_idx; interface->tx_ring[offset + i]->qos_pc = pc; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 21021fe4f1c3..75e51f91036c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_common.h" @@ -297,13 +297,14 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) { struct fm10k_mbx_fifo *fifo = &mbx->rx; u16 total_len = 0, msg_len; - u32 *msg; /* length should include previous amounts pushed */ len += mbx->pushed; /* offset in message is based off of current message size */ do { + u32 *msg; + msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len); msg_len = FM10K_TLV_DWORD_LEN(*msg); total_len += msg_len; @@ -1920,7 +1921,6 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, /* reduce length by 1 to convert to a mask */ u16 mbmem_len = mbx->mbmem_len - 1; u16 tail_len, len = 0; - u32 *msg; /* push head behind tail */ if (mbx->tail < head) @@ -1930,6 +1930,8 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, /* determine msg aligned offset for end of buffer */ do { + u32 *msg; + msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len); tail_len = len; len += FM10K_TLV_DWORD_LEN(*msg); @@ -2132,7 +2134,8 @@ fifo_err: * DWORDs, not bytes. Any invalid values will cause the mailbox to return * error. **/ -s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, +s32 fm10k_sm_mbx_init(struct fm10k_hw __always_unused *hw, + struct fm10k_mbx_info *mbx, const struct fm10k_msg_data *msg_data) { mbx->mbx_reg = FM10K_GMBX; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 538a8467f434..d3e85480f46d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k.h" #include <linux/vmalloc.h> @@ -54,7 +54,7 @@ err: **/ static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface) { - int i, err = 0; + int i, err; for (i = 0; i < interface->num_tx_queues; i++) { err = fm10k_setup_tx_resources(interface->tx_ring[i]); @@ -121,7 +121,7 @@ err: **/ static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface) { - int i, err = 0; + int i, err; for (i = 0; i < interface->num_rx_queues; i++) { err = fm10k_setup_rx_resources(interface->rx_ring[i]); @@ -169,7 +169,6 @@ void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring, **/ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) { - struct fm10k_tx_buffer *tx_buffer; unsigned long size; u16 i; @@ -179,7 +178,8 @@ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { - tx_buffer = &tx_ring->tx_buffer[i]; + struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i]; + fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); } @@ -871,7 +871,7 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev, u16 glort = interface->glort; u16 vid = interface->vid; bool set = !!(vid / VLAN_N_VID); - int err = -EHOSTDOWN; + int err; /* drop any leading bits on the VLAN ID */ vid &= VLAN_N_VID - 1; @@ -891,7 +891,7 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev, u16 glort = interface->glort; u16 vid = interface->vid; bool set = !!(vid / VLAN_N_VID); - int err = -EHOSTDOWN; + int err; /* drop any leading bits on the VLAN ID */ vid &= VLAN_N_VID - 1; @@ -1444,11 +1444,11 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, struct fm10k_l2_accel *l2_accel) { - struct fm10k_ring *ring; int i; for (i = 0; i < interface->num_rx_queues; i++) { - ring = interface->rx_ring[i]; + struct fm10k_ring *ring = interface->rx_ring[i]; + rcu_assign_pointer(ring->l2_accel, l2_accel); } @@ -1463,7 +1463,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, struct fm10k_l2_accel *old_l2_accel = NULL; struct fm10k_dglort_cfg dglort = { 0 }; struct fm10k_hw *hw = &interface->hw; - int size = 0, i; + int size, i; u16 vid, glort; /* The hardware supported by fm10k only filters on the destination MAC diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index e49fb51d3613..bb236fa44048 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include <linux/module.h> #include <linux/interrupt.h> @@ -344,7 +344,6 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface) struct net_device *netdev = interface->netdev; u32 __iomem *hw_addr; u32 value; - int err; /* do nothing if netdev is still present or hw_addr is set */ if (netif_device_present(netdev) || interface->hw.hw_addr) @@ -362,6 +361,8 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface) hw_addr = READ_ONCE(interface->uc_addr); value = readl(hw_addr); if (~value) { + int err; + /* Make sure the reset was initiated because we detached, * otherwise we might race with a different reset flow. */ @@ -697,8 +698,6 @@ static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) */ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) { - int i; - /* If we're down or resetting, just bail */ if (test_bit(__FM10K_DOWN, interface->state) || test_bit(__FM10K_RESETTING, interface->state)) @@ -710,6 +709,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) interface->next_tx_hang_check = jiffies + (2 * HZ); if (netif_carrier_ok(interface->netdev)) { + int i; + /* Force detection of hung controller */ for (i = 0; i < interface->num_tx_queues; i++) set_check_for_tx_hang(interface->tx_ring[i]); @@ -897,7 +898,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, /* Map interrupt */ if (ring->q_vector) { - txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); + txint = ring->q_vector->v_idx + NON_Q_VECTORS; txint |= FM10K_INT_MAP_TIMER0; } @@ -1036,7 +1037,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* Map interrupt */ if (ring->q_vector) { - rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); + rxint = ring->q_vector->v_idx + NON_Q_VECTORS; rxint |= FM10K_INT_MAP_TIMER1; } @@ -1719,10 +1720,9 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface) void fm10k_qv_free_irq(struct fm10k_intfc *interface) { int vector = interface->num_q_vectors; - struct fm10k_hw *hw = &interface->hw; struct msix_entry *entry; - entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector]; + entry = &interface->msix_entries[NON_Q_VECTORS + vector]; while (vector) { struct fm10k_q_vector *q_vector; @@ -1759,7 +1759,7 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface) unsigned int ri = 0, ti = 0; int vector, err; - entry = &interface->msix_entries[NON_Q_VECTORS(hw)]; + entry = &interface->msix_entries[NON_Q_VECTORS]; for (vector = 0; vector < interface->num_q_vectors; vector++) { struct fm10k_q_vector *q_vector = interface->q_vector[vector]; @@ -2339,7 +2339,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) /* Restart the MAC/VLAN request queue in-case of outstanding events */ fm10k_macvlan_schedule(interface); - return err; + return 0; } /** @@ -2352,7 +2352,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) **/ static int __maybe_unused fm10k_resume(struct device *dev) { - struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); + struct fm10k_intfc *interface = dev_get_drvdata(dev); struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; int err; @@ -2379,7 +2379,7 @@ static int __maybe_unused fm10k_resume(struct device *dev) **/ static int __maybe_unused fm10k_suspend(struct device *dev) { - struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); + struct fm10k_intfc *interface = dev_get_drvdata(dev); struct net_device *netdev = interface->netdev; netif_device_detach(netdev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index cb4d02629b86..be07bfdb0bb4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_pf.h" #include "fm10k_vf.h" @@ -1152,7 +1152,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, * assumption is that in this case it is acceptable to just directly * hand off the message from the VF to the underlying shared code. **/ -s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 __always_unused **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; @@ -1352,7 +1352,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; - u32 *result; s32 err = 0; u32 msg[2]; u8 mode = 0; @@ -1362,7 +1361,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, return FM10K_ERR_PARAM; if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) { - result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; + u32 *result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; /* XCAST mode update requested */ err = fm10k_tlv_attr_get_u8(result, &mode); @@ -1566,7 +1565,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type, /* read remaining fields */ fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI); fault->address <<= 32; - fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO); + fault->address |= fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO); fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO); /* clear valid bit to allow for next error */ @@ -1642,7 +1641,7 @@ const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = { * switch API. **/ s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { u16 glort, mask; u32 dglort_map; @@ -1685,7 +1684,7 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { * This handler configures the default VLAN for the PF **/ static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { u16 glort, pvid; u32 pvid_update; @@ -1746,7 +1745,7 @@ const struct fm10k_tlv_attr fm10k_err_msg_attr[] = { * messages that the PF has sent. **/ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_swapi_error err_msg; s32 err; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index 2a7a40bf2b1c..21eff0895a7a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_tlv.h" @@ -472,7 +472,7 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, const struct fm10k_tlv_attr *tlv_attr) { u32 i, attr_id, offset = 0; - s32 err = 0; + s32 err; u16 len; /* verify pointers are not NULL */ @@ -587,8 +587,9 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg, * a minimum it just indicates that the message requested was * unimplemented. **/ -s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) +s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw, + u32 __always_unused **results, + struct fm10k_mbx_info __always_unused *mbx) { return FM10K_NOT_IMPLEMENTED; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 9fb9fca375e3..15ac1c7885bc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #ifndef _FM10K_TYPE_H_ #define _FM10K_TYPE_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index a8519c1f0406..dc8ccd378ec9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_vf.h" @@ -198,7 +198,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) * This function should determine the MAC address for the VF **/ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { u8 perm_addr[ETH_ALEN]; u16 vid; @@ -267,8 +267,10 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) * This function is used to add or remove unicast MAC addresses for * the VF. **/ -static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, - const u8 *mac, u16 vid, bool add, u8 flags) +static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, + u16 __always_unused glort, + const u8 *mac, u16 vid, bool add, + u8 __always_unused flags) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[7]; @@ -309,7 +311,8 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, * This function is used to add or remove multicast MAC addresses for * the VF. **/ -static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, +static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, + u16 __always_unused glort, const u8 *mac, u16 vid, bool add) { struct fm10k_mbx_info *mbx = &hw->mbx; @@ -373,7 +376,7 @@ const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = { * are ready to bring up the interface. **/ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ? FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO; @@ -392,8 +395,9 @@ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, * enabled we can add filters, if it is disabled all filters for this * logical port are flushed. **/ -static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, - u16 count, bool enable) +static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, + u16 __always_unused glort, + u16 __always_unused count, bool enable) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[2]; @@ -420,7 +424,8 @@ static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, * so that it can enable either multicast, multicast promiscuous, or * promiscuous mode of operation. **/ -static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) +static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, + u16 __always_unused glort, u8 mode) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[3]; @@ -475,7 +480,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, * that information to then populate a DGLORTMAP/DEC entry and the queues * to which it has been assigned. **/ -static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw, +static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw __always_unused *hw, struct fm10k_dglort_cfg *dglort) { /* verify the dglort pointer */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 84bd06901014..3e535d3263b3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1021,6 +1021,7 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type) return NULL; } void i40e_update_stats(struct i40e_vsi *vsi); +void i40e_update_veb_stats(struct i40e_veb *veb); void i40e_update_eth_stats(struct i40e_vsi *vsi); struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); int i40e_fetch_switch_configuration(struct i40e_pf *pf, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 527eb52c5401..01e4615b1b4b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -711,6 +711,35 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, } /** + * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask + * @req_fec_info: mask request FEC info + * @ks: ethtool ksettings to fill in + **/ +static void i40e_get_settings_link_up_fec(u8 req_fec_info, + struct ethtool_link_ksettings *ks) +{ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + + if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + } else { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_NONE); + if (I40E_AQ_SET_FEC_AUTO & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + } + } +} + +/** * i40e_get_settings_link_up - Get the Link settings for when link is up * @hw: hw structure * @ks: ethtool ksettings to fill in @@ -769,13 +798,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseSR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -892,9 +915,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseKR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); ethtool_link_ksettings_add_link_mode(ks, supported, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, supported, @@ -908,10 +928,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseKR_Full); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, advertising, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -929,13 +946,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); + break; case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: @@ -945,13 +957,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); + ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -2250,7 +2257,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - struct i40e_veb *veb = pf->veb[pf->lan_veb]; + struct i40e_veb *veb = NULL; unsigned int i; bool veb_stats; u64 *p = data; @@ -2273,8 +2280,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, goto check_data_pointer; veb_stats = ((pf->lan_veb != I40E_NO_VEB) && + (pf->lan_veb < I40E_MAX_VEB) && (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)); + if (veb_stats) { + veb = pf->veb[pf->lan_veb]; + i40e_update_veb_stats(veb); + } + /* If veb stats aren't enabled, pass NULL instead of the veb so that * we initialize stats to zero and update the data pointer * intelligently @@ -2329,7 +2342,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) } if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) - return; + goto check_data_pointer; i40e_add_stat_strings(&data, i40e_gstrings_veb_stats); @@ -2341,6 +2354,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); +check_data_pointer: WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, "stat strings count mismatch!"); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9ebbe3da61bb..6d456e579314 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -677,7 +677,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) * i40e_update_veb_stats - Update Switch component statistics * @veb: the VEB being updated **/ -static void i40e_update_veb_stats(struct i40e_veb *veb) +void i40e_update_veb_stats(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; struct i40e_hw *hw = &pf->hw; @@ -2530,6 +2530,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) vsi_name, i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); + } else { + dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n", + vsi->netdev->name, + cur_multipromisc ? "entering" : "leaving"); } } @@ -8486,6 +8490,11 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf, lock_acquired); + dev_info(&pf->pdev->dev, + pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? + "FW LLDP is disabled\n" : + "FW LLDP is enabled\n"); + } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; @@ -15605,8 +15614,7 @@ static void i40e_shutdown(struct pci_dev *pdev) **/ static int __maybe_unused i40e_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_pf *pf = dev_get_drvdata(dev); struct i40e_hw *hw = &pf->hw; /* If we're already suspended, then there is nothing to do */ @@ -15656,8 +15664,7 @@ static int __maybe_unused i40e_suspend(struct device *dev) **/ static int __maybe_unused i40e_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_pf *pf = dev_get_drvdata(dev); int err; /* If we're not suspended, then there is nothing to do */ @@ -15674,7 +15681,7 @@ static int __maybe_unused i40e_resume(struct device *dev) */ err = i40e_restore_interrupt_scheme(pf); if (err) { - dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n", + dev_err(dev, "Cannot restore interrupt scheme: %d\n", err); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2a2fe3ec7926..e3f29dc8b290 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3262,7 +3262,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) **/ bool __i40e_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ @@ -3306,7 +3306,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) * descriptor associated with the fragment. */ if (stale_size > I40E_MAX_DATA_PER_TXD) { - int align_pad = -(stale->page_offset) & + int align_pad = -(skb_frag_off(stale)) & (I40E_MAX_READ_REQ_SIZE - 1); sum -= align_pad; @@ -3349,7 +3349,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); - struct skb_frag_struct *frag; + skb_frag_t *frag; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 100e92d2982f..36d37f31a287 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -521,7 +521,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) **/ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; int count = 0, size = skb_headlen(skb); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 02b09a8ad54c..4601f9e4e998 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -55,7 +55,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - if (vf->link_forced) { + + /* Always report link is down if the VF queues aren't enabled */ + if (!vf->queues_enabled) { + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; + } else if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); @@ -65,6 +70,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed); } + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -2037,30 +2043,33 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) alluni = true; aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, alluni); - if (!aq_ret) { - if (allmulti) { + if (aq_ret) + goto err_out; + + if (allmulti) { + if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC, + &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully set multicast promiscuous mode\n", vf->vf_id); - set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } else { - dev_info(&pf->pdev->dev, - "VF %d successfully unset multicast promiscuous mode\n", - vf->vf_id); - clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } - if (alluni) { + } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC, + &vf->vf_states)) + dev_info(&pf->pdev->dev, + "VF %d successfully unset multicast promiscuous mode\n", + vf->vf_id); + + if (alluni) { + if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC, + &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully set unicast promiscuous mode\n", vf->vf_id); - set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - } else { - dev_info(&pf->pdev->dev, - "VF %d successfully unset unicast promiscuous mode\n", - vf->vf_id); - clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - } - } + } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC, + &vf->vf_states)) + dev_info(&pf->pdev->dev, + "VF %d successfully unset unicast promiscuous mode\n", + vf->vf_id); + err_out: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, @@ -2153,7 +2162,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) * VF does not know about these additional VSIs and all * it cares is about its own queues. PF configures these queues * to its appropriate VSIs based on TC mapping - **/ + */ if (vf->adq_enabled) { if (idx >= ARRAY_SIZE(vf->ch)) { aq_ret = I40E_ERR_NO_AVAILABLE_VSI; @@ -2364,6 +2373,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) } } + vf->queues_enabled = true; + error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, @@ -2385,6 +2396,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; + /* Immediately mark queues as disabled */ + vf->queues_enabled = false; + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index f65cc0c16550..7164b9bb294f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -99,6 +99,7 @@ struct i40e_vf { unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ bool link_forced; bool link_up; /* only valid if VF link is forced */ + bool queues_enabled; /* true if the VF queues are enabled */ bool spoofchk; u16 num_mac; u16 num_vlan; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 0cca1b589b56..7a30d5d5ef53 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2161,7 +2161,7 @@ static void iavf_create_tx_ctx(struct iavf_ring *tx_ring, **/ bool __iavf_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ @@ -2205,7 +2205,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb) * descriptor associated with the fragment. */ if (stale_size > IAVF_MAX_DATA_PER_TXD) { - int align_pad = -(stale->page_offset) & + int align_pad = -(skb_frag_off(stale)) & (IAVF_MAX_READ_REQ_SIZE - 1); sum -= align_pad; @@ -2269,7 +2269,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); - struct skb_frag_struct *frag; + skb_frag_t *frag; struct iavf_tx_buffer *tx_bi; struct iavf_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index 71e7d090f8db..dd3348f9da9d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -462,7 +462,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb); **/ static inline int iavf_xmit_descriptor_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; int count = 0, size = skb_headlen(skb); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 9ee6b55553c0..794d97460fc7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -329,7 +329,6 @@ struct ice_q_vector { } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { - ICE_FLAG_MSIX_ENA, ICE_FLAG_FLTR_SYNC, ICE_FLAG_RSS_ENA, ICE_FLAG_SRIOV_ENA, @@ -337,6 +336,7 @@ enum ice_pf_flags { ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_ENA, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, + ICE_FLAG_NO_MEDIA, ICE_FLAG_ENABLE_FW_LLDP, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_PF_FLAGS_NBITS /* must be last */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2e0731c1e1a3..5f9dc76699d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -740,7 +740,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ice_get_itr_intrl_gran(hw); - status = ice_init_all_ctrlq(hw); + status = ice_create_all_ctrlq(hw); if (status) goto err_unroll_cqinit; @@ -855,7 +855,7 @@ err_unroll_sched: err_unroll_alloc: devm_kfree(ice_hw_to_dev(hw), hw->port_info); err_unroll_cqinit: - ice_shutdown_all_ctrlq(hw); + ice_destroy_all_ctrlq(hw); return status; } @@ -881,7 +881,7 @@ void ice_deinit_hw(struct ice_hw *hw) /* Attempt to disable FW logging before shutting down control queues */ ice_cfg_fw_log(hw, false); - ice_shutdown_all_ctrlq(hw); + ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ ice_clear_all_vsi_ctx(hw); @@ -1078,6 +1078,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), + ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), { 0 } }; @@ -1088,7 +1089,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * @rxq_index: the index of the Rx queue * * Converts rxq context from sparse to dense structure and then writes - * it to HW register space + * it to HW register space and enables the hardware to prefetch descriptors + * instead of only fetching them on demand */ enum ice_status ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, @@ -1096,6 +1098,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + if (!rlan_ctx) + return ICE_ERR_BAD_PTR; + + rlan_ctx->prefena = 1; + ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); } @@ -3240,40 +3247,44 @@ void ice_replay_post(struct ice_hw *hw) /** * ice_stat_update40 - read 40 bit stat from the chip and update stat values * @hw: ptr to the hardware info - * @hireg: high 32 bit HW register to read from - * @loreg: low 32 bit HW register to read from + * @reg: offset of 64 bit HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ void -ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) { - u64 new_data; - - new_data = rd32(hw, loreg); - new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; + u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. */ - if (!prev_stat_loaded) + if (!prev_stat_loaded) { *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ if (new_data >= *prev_stat) - *cur_stat = new_data - *prev_stat; + *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; - *cur_stat &= 0xFFFFFFFFFFULL; + *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; } /** * ice_stat_update32 - read 32 bit stat from the chip and update stat values * @hw: ptr to the hardware info - * @reg: HW register to read from + * @reg: offset of HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value @@ -3287,17 +3298,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, new_data = rd32(hw, reg); /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. */ - if (!prev_stat_loaded) + if (!prev_stat_loaded) { *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ if (new_data >= *prev_stat) - *cur_stat = new_data - *prev_stat; + *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; + *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index d1f8353fe6bb..e376d1eadba4 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -17,8 +17,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); +enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw); +void ice_destroy_all_ctrlq(struct ice_hw *hw); enum ice_status ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); @@ -123,8 +125,8 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); void -ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat); void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index e91ac4df0242..2353166c654e 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -310,7 +310,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @cq: pointer to the specific Control queue * * This is the main initialization routine for the Control Send Queue - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->sq_buf_size @@ -369,7 +369,7 @@ init_ctrlq_exit: * @cq: pointer to the specific Control queue * * The main initialization routine for the Admin Receive (Event) Queue. - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_rq_entries * - cq->rq_buf_size @@ -569,14 +569,8 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) return 0; init_ctrlq_free_rq: - if (cq->rq.count) { - ice_shutdown_rq(hw, cq); - mutex_destroy(&cq->rq_lock); - } - if (cq->sq.count) { - ice_shutdown_sq(hw, cq); - mutex_destroy(&cq->sq_lock); - } + ice_shutdown_rq(hw, cq); + ice_shutdown_sq(hw, cq); return status; } @@ -585,12 +579,14 @@ init_ctrlq_free_rq: * @hw: pointer to the hardware structure * @q_type: specific Control queue type * - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size + * + * NOTE: this function does not initialize the controlq locks */ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { @@ -616,8 +612,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) !cq->rq_buf_size || !cq->sq_buf_size) { return ICE_ERR_CFG; } - mutex_init(&cq->sq_lock); - mutex_init(&cq->rq_lock); /* setup SQ command write back timeout */ cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; @@ -625,7 +619,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) /* allocate the ATQ */ ret_code = ice_init_sq(hw, cq); if (ret_code) - goto init_ctrlq_destroy_locks; + return ret_code; /* allocate the ARQ */ ret_code = ice_init_rq(hw, cq); @@ -637,9 +631,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) init_ctrlq_free_sq: ice_shutdown_sq(hw, cq); -init_ctrlq_destroy_locks: - mutex_destroy(&cq->sq_lock); - mutex_destroy(&cq->rq_lock); return ret_code; } @@ -647,12 +638,14 @@ init_ctrlq_destroy_locks: * ice_init_all_ctrlq - main initialization routine for all control queues * @hw: pointer to the hardware structure * - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver MUST* set the following fields * in the cq->structure for all control queues: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size + * + * NOTE: this function does not initialize the controlq locks. */ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) { @@ -672,9 +665,47 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) } /** + * ice_init_ctrlq_locks - Initialize locks for a control queue + * @cq: pointer to the control queue + * + * Initializes the send and receive queue locks for a given control queue. + */ +static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) +{ + mutex_init(&cq->sq_lock); + mutex_init(&cq->rq_lock); +} + +/** + * ice_create_all_ctrlq - main initialization routine for all control queues + * @hw: pointer to the hardware structure + * + * Prior to calling this function, the driver *MUST* set the following fields + * in the cq->structure for all control queues: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + * + * This function creates all the control queue locks and then calls + * ice_init_all_ctrlq. It should be called once during driver load. If the + * driver needs to re-initialize control queues at run time it should call + * ice_init_all_ctrlq instead. + */ +enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) +{ + ice_init_ctrlq_locks(&hw->adminq); + ice_init_ctrlq_locks(&hw->mailboxq); + + return ice_init_all_ctrlq(hw); +} + +/** * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type + * + * NOTE: this function does not destroy the control queue locks. */ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { @@ -693,19 +724,17 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) return; } - if (cq->sq.count) { - ice_shutdown_sq(hw, cq); - mutex_destroy(&cq->sq_lock); - } - if (cq->rq.count) { - ice_shutdown_rq(hw, cq); - mutex_destroy(&cq->rq_lock); - } + ice_shutdown_sq(hw, cq); + ice_shutdown_rq(hw, cq); } /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure + * + * NOTE: this function does not destroy the control queue locks. The driver + * may call this at runtime to shutdown and later restart control queues, such + * as in response to a reset event. */ void ice_shutdown_all_ctrlq(struct ice_hw *hw) { @@ -716,6 +745,37 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) } /** + * ice_destroy_ctrlq_locks - Destroy locks for a control queue + * @cq: pointer to the control queue + * + * Destroys the send and receive queue locks for a given control queue. + */ +static void +ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) +{ + mutex_destroy(&cq->sq_lock); + mutex_destroy(&cq->rq_lock); +} + +/** + * ice_destroy_all_ctrlq - exit routine for all control queues + * @hw: pointer to the hardware structure + * + * This function shuts down all the control queues and then destroys the + * control queue locks. It should be called once during driver unload. The + * driver should call ice_shutdown_all_ctrlq if it needs to shut down and + * reinitialize control queues, such as in response to a reset event. + */ +void ice_destroy_all_ctrlq(struct ice_hw *hw) +{ + /* shut down all the control queues first */ + ice_shutdown_all_ctrlq(hw); + + ice_destroy_ctrlq_locks(&hw->adminq); + ice_destroy_ctrlq_locks(&hw->mailboxq); +} + +/** * ice_clean_sq - cleans Admin send queue (ATQ) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 52083a63dee6..d3ba535bd65a 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1716,6 +1716,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_port_info *pi = np->vsi->port_info; struct ethtool_link_ksettings cap_ksettings; struct ice_link_status *link_info; struct ice_vsi *vsi = np->vsi; @@ -2040,6 +2041,33 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, break; } ks->base.duplex = DUPLEX_FULL; + + if (link_info->an_info & ICE_AQ_AN_COMPLETED) + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Autoneg); + + /* Set flow control negotiated Rx/Tx pause */ + switch (pi->fc.current_mode) { + case ICE_FC_FULL: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); + break; + case ICE_FC_TX_PAUSE: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + break; + case ICE_FC_RX_PAUSE: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + break; + case ICE_FC_PFC: + /* fall through */ + default: + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause); + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, + Asym_Pause); + break; + } } /** @@ -2078,9 +2106,12 @@ ice_get_link_ksettings(struct net_device *netdev, struct ice_aqc_get_phy_caps_data *caps; struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; + enum ice_status status; + int err = 0; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); hw_link_info = &vsi->port_info->phy.link_info; /* set speed and duplex */ @@ -2125,48 +2156,36 @@ ice_get_link_ksettings(struct net_device *netdev, /* flow control is symmetric and always supported */ ethtool_link_ksettings_add_link_mode(ks, supported, Pause); - switch (vsi->port_info->fc.req_mode) { - case ICE_FC_FULL: + caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + if (!caps) + return -ENOMEM; + + status = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_SW_CFG, caps, NULL); + if (status) { + err = -EIO; + goto done; + } + + /* Set the advertised flow control based on the PHY capability */ + if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && + (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) { ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); - break; - case ICE_FC_TX_PAUSE: ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); - break; - case ICE_FC_RX_PAUSE: + } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) { ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); - break; - case ICE_FC_PFC: - default: + } else { ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); ethtool_link_ksettings_del_link_mode(ks, advertising, Asym_Pause); - break; } - caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); - if (!caps) - goto done; - - if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP, - caps, NULL)) - netdev_info(netdev, "Get phy capability failed.\n"); - - /* Set supported FEC modes based on PHY capability */ - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - - if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || - caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - - if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG, - caps, NULL)) - netdev_info(netdev, "Get phy capability failed.\n"); - /* Set advertised FEC modes based on PHY capability */ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); @@ -2178,9 +2197,25 @@ ice_get_link_ksettings(struct net_device *netdev, caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + status = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_TOPO_CAP, caps, NULL); + if (status) { + err = -EIO; + goto done; + } + + /* Set supported FEC modes based on PHY capability */ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + + if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + done: devm_kfree(&vsi->back->pdev->dev, caps); - return 0; + return err; } /** @@ -2763,6 +2798,11 @@ static int ice_nway_reset(struct net_device *netdev) * ice_get_pauseparam - Get Flow Control status * @netdev: network interface device structure * @pause: ethernet pause (flow control) parameters + * + * Get requested flow control status from PHY capability. + * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which + * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report + * the negotiated Rx/Tx pause via lp_advertising. */ static void ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6c5ce05742b1..87652d722a30 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -281,14 +281,10 @@ #define GL_PWR_MODE_CTL 0x000B820C #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) -#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) #define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) -#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) #define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) #define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) -#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) #define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) -#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) #define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) #define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) #define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) @@ -296,38 +292,22 @@ #define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) #define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) #define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) -#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) #define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) -#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) #define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) #define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) -#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) #define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) -#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) #define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) -#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) #define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) -#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) #define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) -#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) #define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) -#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) #define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) -#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) #define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) -#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) #define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) -#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) #define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) -#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) #define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) -#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) #define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) -#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) #define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) -#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) #define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) -#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) #define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) #define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64)) #define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64)) @@ -340,32 +320,23 @@ #define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) #define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64)) #define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) -#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) #define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) -#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) #define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) -#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) #define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) -#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) #define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) -#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) #define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) -#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) #define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) -#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) #define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) -#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) #define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) #define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) -#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) -#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define PF_VT_PFALLOC_HIF 0x0009DD80 #define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HLUT_MAX_INDEX 15 #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) +#define PRTRPB_RDPC 0x000AC260 #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 510a8c900e61..57ea6811fe2c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -290,6 +290,7 @@ struct ice_rlan_ctx { u8 tphdata_ena; u8 tphhead_ena; u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ }; struct ice_ctx_ele { diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index a19f5920733b..6e34c40e7840 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1129,12 +1129,7 @@ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) return -EEXIST; } - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - num_q_vectors = vsi->num_q_vectors; - } else { - err = -EINVAL; - goto err_out; - } + num_q_vectors = vsi->num_q_vectors; for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { err = ice_vsi_alloc_q_vector(vsi, v_idx); @@ -1180,9 +1175,6 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) return -EEXIST; } - if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - return -ENOENT; - num_q_vectors = vsi->num_q_vectors; /* reserve slots from OS requested IRQs */ vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, @@ -1477,40 +1469,32 @@ void ice_update_eth_stats(struct ice_vsi *vsi) prev_es = &vsi->eth_stats_prev; cur_es = &vsi->eth_stats; - ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_bytes, - &cur_es->rx_bytes); + ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_bytes, &cur_es->rx_bytes); - ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_unicast, - &cur_es->rx_unicast); + ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_unicast, &cur_es->rx_unicast); - ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_multicast, - &cur_es->rx_multicast); + ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_multicast, &cur_es->rx_multicast); - ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_broadcast, - &cur_es->rx_broadcast); + ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_broadcast, &cur_es->rx_broadcast); ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, &prev_es->rx_discards, &cur_es->rx_discards); - ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_bytes, - &cur_es->tx_bytes); + ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_bytes, &cur_es->tx_bytes); - ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_unicast, - &cur_es->tx_unicast); + ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_unicast, &cur_es->tx_unicast); - ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_multicast, - &cur_es->tx_multicast); + ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_multicast, &cur_es->tx_multicast); - ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_broadcast, - &cur_es->tx_broadcast); + ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_broadcast, &cur_es->tx_broadcast); ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, &prev_es->tx_errors, &cur_es->tx_errors); @@ -2156,6 +2140,9 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, if (status == ICE_ERR_RESET_ONGOING) { dev_dbg(&pf->pdev->dev, "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status == ICE_ERR_DOES_NOT_EXIST) { + dev_dbg(&pf->pdev->dev, + "LAN Tx queues does not exist, nothing to disabled\n"); } else if (status) { dev_err(&pf->pdev->dev, "Failed to disable LAN Tx queues, error: %d\n", @@ -2519,7 +2506,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); @@ -2610,39 +2597,36 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; int base = vsi->base_vector; + int i; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; - - if (!vsi->q_vectors || !vsi->irqs_ready) - return; + if (!vsi->q_vectors || !vsi->irqs_ready) + return; - ice_vsi_release_msix(vsi); - if (vsi->type == ICE_VSI_VF) - return; + ice_vsi_release_msix(vsi); + if (vsi->type == ICE_VSI_VF) + return; - vsi->irqs_ready = false; - ice_for_each_q_vector(vsi, i) { - u16 vector = i + base; - int irq_num; + vsi->irqs_ready = false; + ice_for_each_q_vector(vsi, i) { + u16 vector = i + base; + int irq_num; - irq_num = pf->msix_entries[vector].vector; + irq_num = pf->msix_entries[vector].vector; - /* free only the irqs that were actually requested */ - if (!vsi->q_vectors[i] || - !(vsi->q_vectors[i]->num_ring_tx || - vsi->q_vectors[i]->num_ring_rx)) - continue; + /* free only the irqs that were actually requested */ + if (!vsi->q_vectors[i] || + !(vsi->q_vectors[i]->num_ring_tx || + vsi->q_vectors[i]->num_ring_rx)) + continue; - /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); - synchronize_irq(irq_num); - devm_free_irq(&pf->pdev->dev, irq_num, - vsi->q_vectors[i]); - } + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(irq_num, NULL); + synchronize_irq(irq_num); + devm_free_irq(&pf->pdev->dev, irq_num, + vsi->q_vectors[i]); } } @@ -2821,15 +2805,13 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /* disable each interrupt */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - ice_for_each_q_vector(vsi, i) - wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); + ice_for_each_q_vector(vsi, i) + wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); - ice_flush(hw); + ice_flush(hw); - ice_for_each_q_vector(vsi, i) - synchronize_irq(pf->msix_entries[i + base].vector); - } + ice_for_each_q_vector(vsi, i) + synchronize_irq(pf->msix_entries[i + base].vector); } /** @@ -2986,6 +2968,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_rings; + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_vectors; + ret = ice_vsi_set_q_vectors_reg_idx(vsi); if (ret) goto err_vectors; @@ -3007,10 +2993,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_rings; - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_vectors; - ret = ice_vsi_set_q_vectors_reg_idx(vsi); if (ret) goto err_vectors; @@ -3028,7 +3010,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); @@ -3145,7 +3127,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) if (ena_tc & BIT(i)) num_tc++; /* populate max_txqs per TC */ - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; } vsi->tc_cfg.ena_tc = ena_tc; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 63db08d9bafa..c26e6a102dac 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -9,7 +9,7 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" -#define DRV_VERSION "0.7.4-k" +#define DRV_VERSION "0.7.5-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; @@ -488,6 +488,7 @@ static void ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; + u8 i; /* already prepared for reset */ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) @@ -497,6 +498,10 @@ ice_prepare_for_reset(struct ice_pf *pf) if (ice_check_sq_alive(hw, &hw->mailboxq)) ice_vc_notify_reset(pf); + /* Disable VFs until reset is completed */ + for (i = 0; i < pf->num_alloc_vfs; i++) + clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); + /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf, false); @@ -810,6 +815,20 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (!vsi || !vsi->port_info) return -EINVAL; + /* turn off PHY if media was removed */ + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && + !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); + + result = ice_aq_set_link_restart_an(pi, false, NULL); + if (result) { + dev_dbg(&pf->pdev->dev, + "Failed to set link down, VSI %d error %d\n", + vsi->vsi_num, result); + return result; + } + } + ice_vsi_link_event(vsi, link_up); ice_print_link_msg(vsi, link_up); @@ -1315,6 +1334,124 @@ static void ice_handle_mdd_event(struct ice_pf *pf) } /** + * ice_force_phys_link_state - Force the physical link state + * @vsi: VSI to force the physical link state to up/down + * @link_up: true/false indicates to set the physical link to up/down + * + * Force the physical link state by getting the current PHY capabilities from + * hardware and setting the PHY config based on the determined capabilities. If + * link changes a link event will be triggered because both the Enable Automatic + * Link Update and LESM Enable bits are set when setting the PHY capabilities. + * + * Returns 0 on success, negative on failure + */ +static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_aqc_set_phy_cfg_data *cfg; + struct ice_port_info *pi; + struct device *dev; + int retcode; + + if (!vsi || !vsi->port_info || !vsi->back) + return -EINVAL; + if (vsi->type != ICE_VSI_PF) + return 0; + + dev = &vsi->back->pdev->dev; + + pi = vsi->port_info; + + pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (retcode) { + dev_err(dev, + "Failed to get phy capabilities, VSI %d error %d\n", + vsi->vsi_num, retcode); + retcode = -EIO; + goto out; + } + + /* No change in link */ + if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && + link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) + goto out; + + cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + retcode = -ENOMEM; + goto out; + } + + cfg->phy_type_low = pcaps->phy_type_low; + cfg->phy_type_high = pcaps->phy_type_high; + cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + cfg->low_power_ctrl = pcaps->low_power_ctrl; + cfg->eee_cap = pcaps->eee_cap; + cfg->eeer_value = pcaps->eeer_value; + cfg->link_fec_opt = pcaps->link_fec_options; + if (link_up) + cfg->caps |= ICE_AQ_PHY_ENA_LINK; + else + cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; + + retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); + if (retcode) { + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", + vsi->vsi_num, retcode); + retcode = -EIO; + } + + devm_kfree(dev, cfg); +out: + devm_kfree(dev, pcaps); + return retcode; +} + +/** + * ice_check_media_subtask - Check for media; bring link up if detected. + * @pf: pointer to PF struct + */ +static void ice_check_media_subtask(struct ice_pf *pf) +{ + struct ice_port_info *pi; + struct ice_vsi *vsi; + int err; + + vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + if (!vsi) + return; + + /* No need to check for media if it's already present or the interface + * is down + */ + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) || + test_bit(__ICE_DOWN, vsi->state)) + return; + + /* Refresh link info and check if media is present */ + pi = vsi->port_info; + err = ice_update_link_info(pi); + if (err) + return; + + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + err = ice_force_phys_link_state(vsi, true); + if (err) + return; + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); + + /* A Link Status Event will be generated; the event handler + * will complete bringing the interface up + */ + } +} + +/** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct */ @@ -1336,6 +1473,7 @@ static void ice_service_task(struct work_struct *work) return; } + ice_check_media_subtask(pf); ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); ice_handle_mdd_event(pf); @@ -1409,15 +1547,11 @@ static void ice_irq_affinity_release(struct kref __always_unused *ref) {} */ static int ice_vsi_ena_irq(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; + struct ice_hw *hw = &vsi->back->hw; + int i; - ice_for_each_q_vector(vsi, i) - ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); - } + ice_for_each_q_vector(vsi, i) + ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); ice_flush(hw); return 0; @@ -1665,7 +1799,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, 0); ice_flush(hw); - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { + if (pf->msix_entries) { synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); devm_free_irq(&pf->pdev->dev, pf->msix_entries[pf->oicr_idx].vector, pf); @@ -2091,7 +2225,6 @@ static void ice_deinit_pf(struct ice_pf *pf) static void ice_init_pf(struct ice_pf *pf) { bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); - set_bit(ICE_FLAG_MSIX_ENA, pf->flags); #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.common_cap.sr_iov_1_1) { struct ice_hw *hw = &pf->hw; @@ -2191,7 +2324,6 @@ msix_err: exit_err: pf->num_lan_msix = 0; - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); return err; } @@ -2204,7 +2336,6 @@ static void ice_dis_msix(struct ice_pf *pf) pci_disable_msix(pf->pdev); devm_kfree(&pf->pdev->dev, pf->msix_entries); pf->msix_entries = NULL; - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); } /** @@ -2213,8 +2344,7 @@ static void ice_dis_msix(struct ice_pf *pf) */ static void ice_clear_interrupt_scheme(struct ice_pf *pf) { - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_dis_msix(pf); + ice_dis_msix(pf); if (pf->irq_tracker) { devm_kfree(&pf->pdev->dev, pf->irq_tracker); @@ -2230,10 +2360,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) { int vectors; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - vectors = ice_ena_msix_range(pf); - else - return -ENODEV; + vectors = ice_ena_msix_range(pf); if (vectors < 0) return vectors; @@ -2390,12 +2517,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) * the misc functionality and queue processing is combined in * the same vector and that gets setup at open. */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - err = ice_req_irq_msix_misc(pf); - if (err) { - dev_err(dev, "setup of misc vector failed: %d\n", err); - goto err_init_interrupt_unroll; - } + err = ice_req_irq_msix_misc(pf); + if (err) { + dev_err(dev, "setup of misc vector failed: %d\n", err); + goto err_init_interrupt_unroll; } /* create switch struct for the switch element created by FW on boot */ @@ -3008,10 +3133,7 @@ static int ice_up_complete(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; int err; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_vsi_cfg_msix(vsi); - else - return -ENOTSUPP; + ice_vsi_cfg_msix(vsi); /* Enable only Rx rings, Tx rings were enabled by the FW when the * Tx queue group list was configured and the context bits were @@ -3159,6 +3281,8 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi) cur_ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; cur_ns->rx_length_errors = pf->stats.rx_len_errors; + /* record drops from the port level */ + cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; } } @@ -3176,96 +3300,86 @@ static void ice_update_pf_stats(struct ice_pf *pf) cur_ps = &pf->stats; pf_id = hw->pf_id; - ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, + ice_stat_update40(hw, GLPRT_GORCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.rx_bytes, &cur_ps->eth.rx_bytes); - ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, + ice_stat_update40(hw, GLPRT_UPRCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.rx_unicast, &cur_ps->eth.rx_unicast); - ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, + ice_stat_update40(hw, GLPRT_MPRCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.rx_multicast, &cur_ps->eth.rx_multicast); - ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, + ice_stat_update40(hw, GLPRT_BPRCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.rx_broadcast, &cur_ps->eth.rx_broadcast); - ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, + ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, + &prev_ps->eth.rx_discards, + &cur_ps->eth.rx_discards); + + ice_stat_update40(hw, GLPRT_GOTCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.tx_bytes, &cur_ps->eth.tx_bytes); - ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, + ice_stat_update40(hw, GLPRT_UPTCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.tx_unicast, &cur_ps->eth.tx_unicast); - ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, + ice_stat_update40(hw, GLPRT_MPTCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.tx_multicast, &cur_ps->eth.tx_multicast); - ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, + ice_stat_update40(hw, GLPRT_BPTCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.tx_broadcast, &cur_ps->eth.tx_broadcast); ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, &prev_ps->tx_dropped_link_down, &cur_ps->tx_dropped_link_down); - ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_64, - &cur_ps->rx_size_64); + ice_stat_update40(hw, GLPRT_PRC64L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_64, &cur_ps->rx_size_64); - ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_127, - &cur_ps->rx_size_127); + ice_stat_update40(hw, GLPRT_PRC127L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_127, &cur_ps->rx_size_127); - ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_255, - &cur_ps->rx_size_255); + ice_stat_update40(hw, GLPRT_PRC255L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_255, &cur_ps->rx_size_255); - ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_511, - &cur_ps->rx_size_511); + ice_stat_update40(hw, GLPRT_PRC511L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_511, &cur_ps->rx_size_511); - ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), - GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); - ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), - GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); - ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), - GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, &prev_ps->rx_size_big, &cur_ps->rx_size_big); - ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_64, - &cur_ps->tx_size_64); + ice_stat_update40(hw, GLPRT_PTC64L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_64, &cur_ps->tx_size_64); - ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_127, - &cur_ps->tx_size_127); + ice_stat_update40(hw, GLPRT_PTC127L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_127, &cur_ps->tx_size_127); - ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_255, - &cur_ps->tx_size_255); + ice_stat_update40(hw, GLPRT_PTC255L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_255, &cur_ps->tx_size_255); - ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_511, - &cur_ps->tx_size_511); + ice_stat_update40(hw, GLPRT_PTC511L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_511, &cur_ps->tx_size_511); - ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), - GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); - ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), - GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); - ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), - GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, &prev_ps->tx_size_big, &cur_ps->tx_size_big); ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, @@ -3372,85 +3486,6 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) } /** - * ice_force_phys_link_state - Force the physical link state - * @vsi: VSI to force the physical link state to up/down - * @link_up: true/false indicates to set the physical link to up/down - * - * Force the physical link state by getting the current PHY capabilities from - * hardware and setting the PHY config based on the determined capabilities. If - * link changes a link event will be triggered because both the Enable Automatic - * Link Update and LESM Enable bits are set when setting the PHY capabilities. - * - * Returns 0 on success, negative on failure - */ -static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) -{ - struct ice_aqc_get_phy_caps_data *pcaps; - struct ice_aqc_set_phy_cfg_data *cfg; - struct ice_port_info *pi; - struct device *dev; - int retcode; - - if (!vsi || !vsi->port_info || !vsi->back) - return -EINVAL; - if (vsi->type != ICE_VSI_PF) - return 0; - - dev = &vsi->back->pdev->dev; - - pi = vsi->port_info; - - pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); - if (!pcaps) - return -ENOMEM; - - retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, - NULL); - if (retcode) { - dev_err(dev, - "Failed to get phy capabilities, VSI %d error %d\n", - vsi->vsi_num, retcode); - retcode = -EIO; - goto out; - } - - /* No change in link */ - if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && - link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) - goto out; - - cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); - if (!cfg) { - retcode = -ENOMEM; - goto out; - } - - cfg->phy_type_low = pcaps->phy_type_low; - cfg->phy_type_high = pcaps->phy_type_high; - cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - cfg->low_power_ctrl = pcaps->low_power_ctrl; - cfg->eee_cap = pcaps->eee_cap; - cfg->eeer_value = pcaps->eeer_value; - cfg->link_fec_opt = pcaps->link_fec_options; - if (link_up) - cfg->caps |= ICE_AQ_PHY_ENA_LINK; - else - cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; - - retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); - if (retcode) { - dev_err(dev, "Failed to set phy config, VSI %d error %d\n", - vsi->vsi_num, retcode); - retcode = -EIO; - } - - devm_kfree(dev, cfg); -out: - devm_kfree(dev, pcaps); - return retcode; -} - -/** * ice_down - Shutdown the connection * @vsi: The VSI being stopped */ @@ -3559,24 +3594,6 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) } /** - * ice_vsi_req_irq - Request IRQ from the OS - * @vsi: The VSI IRQ is being requested for - * @basename: name for the vector - * - * Return 0 on success and a negative value on error - */ -static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) -{ - struct ice_pf *pf = vsi->back; - int err = -EINVAL; - - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - err = ice_vsi_req_irq_msix(vsi, basename); - - return err; -} - -/** * ice_vsi_open - Called when a network interface is made active * @vsi: the VSI to open * @@ -3605,7 +3622,7 @@ static int ice_vsi_open(struct ice_vsi *vsi) snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), vsi->netdev->name); - err = ice_vsi_req_irq(vsi, int_name); + err = ice_vsi_req_irq_msix(vsi, int_name); if (err) goto err_setup_rx; @@ -3842,12 +3859,10 @@ static void ice_rebuild(struct ice_pf *pf) } /* start misc vector */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - err = ice_req_irq_msix_misc(pf); - if (err) { - dev_err(dev, "misc vector setup failed: %d\n", err); - goto err_vsi_rebuild; - } + err = ice_req_irq_msix_misc(pf); + if (err) { + dev_err(dev, "misc vector setup failed: %d\n", err); + goto err_vsi_rebuild; } /* restart the VSIs that were rebuilt and running before the reset */ @@ -4244,9 +4259,7 @@ static void ice_tx_timeout(struct net_device *netdev) head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; /* Read interrupt register */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - val = rd32(hw, - GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); + val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, hung_queue, tx_ring->next_to_clean, @@ -4295,6 +4308,7 @@ int ice_open(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_port_info *pi; int err; if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { @@ -4304,13 +4318,33 @@ int ice_open(struct net_device *netdev) netif_carrier_off(netdev); - err = ice_force_phys_link_state(vsi, true); + pi = vsi->port_info; + err = ice_update_link_info(pi); if (err) { - netdev_err(netdev, - "Failed to set physical link up, error %d\n", err); + netdev_err(netdev, "Failed to get link info, error %d\n", + err); return err; } + /* Set PHY if there is media, otherwise, turn off PHY */ + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + err = ice_force_phys_link_state(vsi, true); + if (err) { + netdev_err(netdev, + "Failed to set physical link up, error %d\n", + err); + return err; + } + } else { + err = ice_aq_set_link_restart_an(pi, false, NULL); + if (err) { + netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", + vsi->vsi_num, err); + return err; + } + set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags); + } + err = ice_vsi_open(vsi); if (err) netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 3c83230434b6..9234fd203929 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -377,18 +377,28 @@ err: */ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) { + u16 prev_ntu = rx_ring->next_to_use; + rx_ring->next_to_use = val; /* update next to alloc since we have filled the ring */ rx_ring->next_to_alloc = val; - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). + /* QRX_TAIL will be updated with any tail value, but hardware ignores + * the lower 3 bits. This makes it so we only bump tail on meaningful + * boundaries. Also, this allows us to bump tail on intervals of 8 up to + * the budget depending on the current traffic load. */ - wmb(); - writel(val, rx_ring->tail); + val &= ~0x7; + if (prev_ntu != val) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); + } } /** @@ -445,7 +455,13 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * - * Returns false if all allocations were successful, true if any fail + * Returns false if all allocations were successful, true if any fail. Returning + * true signals to the caller that we didn't replace cleaned_count buffers and + * there is more work to do. + * + * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx + * buffers. Then bump tail at most one time. Grouping like this lets us avoid + * multiple tail writes per call. */ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) { @@ -462,8 +478,9 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) bi = &rx_ring->rx_buf[ntu]; do { + /* if we fail here, we have work remaining */ if (!ice_alloc_mapped_page(rx_ring, bi)) - goto no_bufs; + break; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, @@ -494,16 +511,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) if (rx_ring->next_to_use != ntu) ice_release_rx_desc(rx_ring, ntu); - return false; - -no_bufs: - if (rx_ring->next_to_use != ntu) - ice_release_rx_desc(rx_ring, ntu); - - /* make sure to come back via polling to try again after - * allocation failure - */ - return true; + return !!cleaned_count; } /** @@ -990,7 +998,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); - bool failure = false; + bool failure; /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { @@ -1002,13 +1010,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) u16 vlan_tag = 0; u8 rx_ptype; - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= ICE_RX_BUF_WRITE) { - failure = failure || - ice_alloc_rx_bufs(rx_ring, cleaned_count); - cleaned_count = 0; - } - /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); @@ -1085,6 +1086,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) total_rx_pkts++; } + /* return up to cleaned_count buffers to hardware */ + failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); + /* update queue and vector specific stats */ u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.pkts += total_rx_pkts; @@ -1409,7 +1413,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget) struct ice_q_vector *q_vector = container_of(napi, struct ice_q_vector, napi); struct ice_vsi *vsi = q_vector->vsi; - struct ice_pf *pf = vsi->back; bool clean_complete = true; int budget_per_ring = 0; struct ice_ring *ring; @@ -1450,8 +1453,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * poll us due to busy-polling */ if (likely(napi_complete_done(napi, work_done))) - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_update_ena_itr(vsi, q_vector); + ice_update_ena_itr(vsi, q_vector); return min_t(int, work_done, budget - 1); } @@ -1521,7 +1523,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, { u64 td_offset, td_tag, td_cmd; u16 i = tx_ring->next_to_use; - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int data_len, size; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; @@ -1923,7 +1925,7 @@ static unsigned int ice_txd_use_count(unsigned int size) */ static unsigned int ice_xmit_desc_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int count = 0, size = skb_headlen(skb); @@ -1954,7 +1956,7 @@ static unsigned int ice_xmit_desc_count(struct sk_buff *skb) */ static bool __ice_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 5d24b539648f..ce01cbe70ea4 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -297,13 +297,6 @@ void ice_free_vfs(struct ice_pf *pf) if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { /* disable VF qp mappings */ ice_dis_vf_mappings(&pf->vf[i]); - - /* Set this state so that assigned VF vectors can be - * reclaimed by PF for reuse in ice_vsi_release(). No - * need to clear this bit since pf->vf array is being - * freed anyways after this for loop - */ - set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states); ice_free_vf_res(&pf->vf[i]); } } @@ -551,7 +544,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) * expect vector assignment to be changed unless there is a request for * more vectors. */ - clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states); ice_alloc_vsi_res_exit: ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); return status; @@ -567,11 +559,6 @@ static int ice_alloc_vf_res(struct ice_vf *vf) int tx_rx_queue_left; int status; - /* setup VF VSI and necessary resources */ - status = ice_alloc_vsi_res(vf); - if (status) - goto ice_alloc_vf_res_exit; - /* Update number of VF queues, in case VF had requested for queue * changes */ @@ -581,6 +568,11 @@ static int ice_alloc_vf_res(struct ice_vf *vf) vf->num_req_qs != vf->num_vf_qs) vf->num_vf_qs = vf->num_req_qs; + /* setup VF VSI and necessary resources */ + status = ice_alloc_vsi_res(vf); + if (status) + goto ice_alloc_vf_res_exit; + if (vf->trusted) set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); else @@ -1283,9 +1275,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) /* assign default capabilities */ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - - /* Set this state so that PF driver does VF vector assignment */ - set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states); } pf->num_alloc_vfs = num_alloc_vfs; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index c3ca522c245a..ada69120ff38 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -30,11 +30,6 @@ enum ice_vf_states { ICE_VF_STATE_DIS, ICE_VF_STATE_MC_PROMISC, ICE_VF_STATE_UC_PROMISC, - /* state to indicate if PF needs to do vector assignment for VF. - * This needs to be set during first time VF initialization or later - * when VF asks for more Vectors through virtchnl OP. - */ - ICE_VF_STATE_CFG_INTR, ICE_VF_STATES_NBITS }; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b4df3e319467..b63e77528a91 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5918,7 +5918,7 @@ static int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb = first->skb; struct igb_tx_buffer *tx_buffer; union e1000_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@ -6074,7 +6074,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); if (igb_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ @@ -8879,8 +8880,7 @@ static int __maybe_unused igb_resume(struct device *dev) static int __maybe_unused igb_runtime_idle(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct igb_adapter *adapter = netdev_priv(netdev); if (!igb_has_link(adapter)) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 34cd30d7162f..0f2b68f4bb0f 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2174,7 +2174,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, goto dma_error; for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; count++; i++; diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index 59258d791106..db289bcce21d 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -40,7 +40,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw) ctrl = rd32(IGC_CTRL); hw_dbg("Issuing a global reset to MAC\n"); - wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); + wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST); ret_val = igc_get_auto_rd_done(hw); if (ret_val) { @@ -209,6 +209,9 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) switch (hw->device_id) { case IGC_DEV_ID_I225_LM: case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: mac->type = igc_i225; break; default: diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index fc0ccfe38a20..11b99acf4abe 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -54,7 +54,7 @@ #define IGC_ERR_SWFW_SYNC 13 /* Device Control */ -#define IGC_CTRL_RST 0x04000000 /* Global reset */ +#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */ #define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ #define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 1039a224ac80..abb2d72911ff 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -18,6 +18,9 @@ #define IGC_DEV_ID_I225_LM 0x15F2 #define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 #define IGC_FUNC_0 0 @@ -151,16 +154,10 @@ struct igc_phy_info { u16 autoneg_advertised; u16 autoneg_mask; - u16 cable_length; - u16 max_cable_length; - u16 min_cable_length; - u16 pair_length[4]; u8 mdix; - bool disable_polarity_correction; bool is_mdix; - bool polarity_correction; bool reset_disable; bool speed_downgraded; bool autoneg_wait_to_complete; @@ -190,12 +187,7 @@ struct igc_fc_info { }; struct igc_dev_spec_base { - bool global_device_reset; - bool eee_disable; bool clear_semaphore_once; - bool module_plugged; - u8 media_port; - bool mas_capable; }; struct igc_hw { diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index aa9323e55406..e5114bebd30b 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -36,6 +36,9 @@ static const struct igc_info *igc_info_tbl[] = { static const struct pci_device_id igc_pci_tbl[] = { { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, /* required last entry */ {0, } }; @@ -861,7 +864,7 @@ static int igc_tx_map(struct igc_ring *tx_ring, struct igc_tx_buffer *tx_buffer; union igc_adv_tx_desc *tx_desc; u32 tx_flags = first->tx_flags; - struct skb_frag_struct *frag; + skb_frag_t *frag; u16 i = tx_ring->next_to_use; unsigned int data_len, size; dma_addr_t dma; @@ -1015,7 +1018,8 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); if (igc_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index e5ac2d3fd816..0940a0da16f2 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1331,9 +1331,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); offset = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cbaf712d6529..dc7b128c780e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1785,7 +1785,7 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; unsigned int pull_len; @@ -1807,7 +1807,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, /* update all of the pointers */ skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; + skb_frag_off_add(frag, pull_len); skb->data_len -= pull_len; skb->tail += pull_len; } @@ -1840,11 +1840,11 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, skb_headlen(skb), DMA_FROM_DEVICE); } else { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag), DMA_FROM_DEVICE); } @@ -8186,7 +8186,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@ -8605,7 +8605,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d2b41f9f87f8..8c011d4ce7a9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3949,7 +3949,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct sk_buff *skb = first->skb; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@ -4134,8 +4134,11 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + count += TXD_USE_COUNT(skb_frag_size(frag)); + } #else count += skb_shinfo(skb)->nr_frags; #endif diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 0b668357db4d..6d52cf5ce20e 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -2030,23 +2030,22 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) bool hidma = jme->dev->features & NETIF_F_HIGHDMA; int i, nr_frags = skb_shinfo(skb)->nr_frags; int mask = jme->tx_ring_mask; - const struct skb_frag_struct *frag; u32 len; int ret = 0; for (i = 0 ; i < nr_frags ; ++i) { - frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + ctxdesc = txdesc + ((idx + i + 2) & (mask)); ctxbi = txbi + ((idx + i + 2) & (mask)); ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, - skb_frag_page(frag), - frag->page_offset, skb_frag_size(frag), hidma); + skb_frag_page(frag), skb_frag_off(frag), + skb_frag_size(frag), hidma); if (ret) { jme_drop_tx_map(jme, idx, i); goto out; } - } len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; @@ -3193,8 +3192,7 @@ jme_shutdown(struct pci_dev *pdev) static int jme_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct jme_adapter *jme = netdev_priv(netdev); if (!netif_running(netdev)) @@ -3236,8 +3234,7 @@ jme_suspend(struct device *dev) static int jme_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct jme_adapter *jme = netdev_priv(netdev); if (!netif_running(netdev)) diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index cda641ef89af..900affbdcc0e 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -458,17 +458,11 @@ static int xrx200_probe(struct platform_device *pdev) } priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx"); - if (priv->chan_rx.dma.irq < 0) { - dev_err(dev, "failed to get RX IRQ, %i\n", - priv->chan_rx.dma.irq); + if (priv->chan_rx.dma.irq < 0) return -ENOENT; - } priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx"); - if (priv->chan_tx.dma.irq < 0) { - dev_err(dev, "failed to get TX IRQ, %i\n", - priv->chan_tx.dma.irq); + if (priv->chan_tx.dma.irq < 0) return -ENOENT; - } /* get the clock */ priv->clk = devm_clk_get(dev, NULL); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 88ea5ac83c93..82ea55ae5053 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -659,7 +659,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; - if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7) + if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7) return 1; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 895bfed26a8a..e49820675c8c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2350,10 +2350,10 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - void *addr = page_address(frag->page.p) + frag->page_offset; + void *addr = skb_frag_address(frag); tx_desc = mvneta_txq_next_desc_get(txq); - tx_desc->data_size = frag->size; + tx_desc->data_size = skb_frag_size(frag); tx_desc->buf_phys_addr = dma_map_single(pp->dev->dev.parent, addr, @@ -4469,7 +4469,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) /* Device initialization routine */ static int mvneta_probe(struct platform_device *pdev) { - struct resource *res; struct device_node *dn = pdev->dev.of_node; struct device_node *bm_node; struct mvneta_port *pp; @@ -4553,8 +4552,7 @@ static int mvneta_probe(struct platform_device *pdev) if (!IS_ERR(pp->clk_bus)) clk_prepare_enable(pp->clk_bus); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pp->base = devm_ioremap_resource(&pdev->dev, res); + pp->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pp->base)) { err = PTR_ERR(pp->base); goto err_clk; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index ccdd47f3b8fb..74fd9e171865 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -2923,14 +2923,15 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - void *addr = page_address(frag->page.p) + frag->page_offset; + void *addr = skb_frag_address(frag); tx_desc = mvpp2_txq_next_desc_get(aggr_txq); mvpp2_txdesc_txq_set(port, tx_desc, txq->id); - mvpp2_txdesc_size_set(port, tx_desc, frag->size); + mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), + DMA_TO_DEVICE); if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { mvpp2_txq_desc_put(txq); goto cleanup; @@ -5010,7 +5011,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, struct device_node *port_node = to_of_node(port_fwnode); netdev_features_t features; struct net_device *dev; - struct resource *res; struct phylink *phylink; char *mac_from = ""; unsigned int ntxqs, nrxqs, thread; @@ -5114,8 +5114,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->comphy = comphy; if (priv->hw_version == MVPP21) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); - port->base = devm_ioremap_resource(&pdev->dev, res); + port->base = devm_platform_ioremap_resource(pdev, 2 + id); if (IS_ERR(port->base)) { err = PTR_ERR(port->base); goto err_free_irq; @@ -5544,14 +5543,12 @@ static int mvpp2_probe(struct platform_device *pdev) if (priv->hw_version == MVPP21) queue_mode = MVPP2_QDIST_SINGLE_MODE; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); if (priv->hw_version == MVPP21) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->lms_base = devm_ioremap_resource(&pdev->dev, res); + priv->lms_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(priv->lms_base)) return PTR_ERR(priv->lms_base); } else { diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9ac854c2b371..06dffee81e02 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -4078,8 +4078,7 @@ static void skge_remove(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int skge_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct skge_hw *hw = pci_get_drvdata(pdev); + struct skge_hw *hw = dev_get_drvdata(dev); int i; if (!hw) @@ -4103,8 +4102,7 @@ static int skge_suspend(struct device *dev) static int skge_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct skge_hw *hw = pci_get_drvdata(pdev); + struct skge_hw *hw = dev_get_drvdata(dev); int i, err; if (!hw) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index a01c75ede871..c2e00bb587cd 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5167,8 +5167,7 @@ static void sky2_remove(struct pci_dev *pdev) static int sky2_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct sky2_hw *hw = pci_get_drvdata(pdev); + struct sky2_hw *hw = dev_get_drvdata(dev); int i; if (!hw) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c39d7f4ab1d4..ddbffeb5701b 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -696,7 +696,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, txd = itxd; nr_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; unsigned int offset = 0; int frag_size = skb_frag_size(frag); @@ -781,13 +781,14 @@ err_dma: static inline int mtk_cal_txd_req(struct sk_buff *skb) { int i, nfrags; - struct skb_frag_struct *frag; + skb_frag_t *frag; nfrags = 1; if (skb_is_gso(skb)) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; - nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN); + nfrags += DIV_ROUND_UP(skb_frag_size(frag), + MTK_TX_DMA_BUF_LEN); } } else { nfrags += skb_shinfo(skb)->nr_frags; @@ -2446,7 +2447,6 @@ free_netdev: static int mtk_probe(struct platform_device *pdev) { - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct device_node *mac_np; struct mtk_eth *eth; int err; @@ -2459,7 +2459,7 @@ static int mtk_probe(struct platform_device *pdev) eth->soc = of_device_get_match_data(&pdev->dev); eth->dev = &pdev->dev; - eth->base = devm_ioremap_resource(&pdev->dev, res); + eth->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(eth->base)) return PTR_ERR(eth->base); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index c1438ae52a11..40ec5acf79c0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2645,14 +2645,6 @@ out: en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); return; } - - /* set offloads */ - priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL; } static void mlx4_en_del_vxlan_offloads(struct work_struct *work) @@ -2660,14 +2652,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) int ret; struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, vxlan_del_task); - /* unset offloads */ - priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL); - ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 0); if (ret) @@ -3415,6 +3399,23 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (mdev->LSO_support) dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + if (mdev->dev->caps.tunnel_offload_mode == + MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + dev->features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; + dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + } + dev->vlan_features = dev->hw_features; dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; @@ -3483,16 +3484,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->rss_hash_fn = ETH_RSS_HASH_TOP; } - if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { - dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL; - dev->features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL; - dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; - } - /* MTU range: 68 - hw-specific max */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = priv->max_mtu; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 36a92b19e613..4d5ca302c067 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -772,9 +772,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv, /* Map fragments if any */ for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) { - const struct skb_frag_struct *frag; - - frag = &shinfo->frags[i_frag]; + const skb_frag_t *frag = &shinfo->frags[i_frag]; byte_count = skb_frag_size(frag); dma = skb_frag_dma_map(ddev, frag, 0, byte_count, diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 1f6e16d5ea6b..07c204bd3fc4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2292,23 +2292,31 @@ static int mlx4_init_fw(struct mlx4_dev *dev) static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_init_hca_param *init_hca = NULL; + struct mlx4_dev_cap *dev_cap = NULL; struct mlx4_adapter adapter; - struct mlx4_dev_cap dev_cap; struct mlx4_profile profile; - struct mlx4_init_hca_param init_hca; u64 icm_size; struct mlx4_config_dev_params params; int err; if (!mlx4_is_slave(dev)) { - err = mlx4_dev_cap(dev, &dev_cap); + dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); + init_hca = kzalloc(sizeof(*init_hca), GFP_KERNEL); + + if (!dev_cap || !init_hca) { + err = -ENOMEM; + goto out_free; + } + + err = mlx4_dev_cap(dev, dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); - return err; + goto out_free; } - choose_steering_mode(dev, &dev_cap); - choose_tunnel_offload_mode(dev, &dev_cap); + choose_steering_mode(dev, dev_cap); + choose_tunnel_offload_mode(dev, dev_cap); if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && mlx4_is_master(dev)) @@ -2331,48 +2339,48 @@ static int mlx4_init_hca(struct mlx4_dev *dev) MLX4_STEERING_MODE_DEVICE_MANAGED) profile.num_mcg = MLX4_FS_NUM_MCG; - icm_size = mlx4_make_profile(dev, &profile, &dev_cap, - &init_hca); + icm_size = mlx4_make_profile(dev, &profile, dev_cap, + init_hca); if ((long long) icm_size < 0) { err = icm_size; - return err; + goto out_free; } dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; if (enable_4k_uar || !dev->persist->num_vfs) { - init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + + init_hca->log_uar_sz = ilog2(dev->caps.num_uars) + PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; - init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; + init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; } else { - init_hca.log_uar_sz = ilog2(dev->caps.num_uars); - init_hca.uar_page_sz = PAGE_SHIFT - 12; + init_hca->log_uar_sz = ilog2(dev->caps.num_uars); + init_hca->uar_page_sz = PAGE_SHIFT - 12; } - init_hca.mw_enabled = 0; + init_hca->mw_enabled = 0; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) - init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; + init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE; - err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); + err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size); if (err) - return err; + goto out_free; - err = mlx4_INIT_HCA(dev, &init_hca); + err = mlx4_INIT_HCA(dev, init_hca); if (err) { mlx4_err(dev, "INIT_HCA command failed, aborting\n"); goto err_free_icm; } - if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { - err = mlx4_query_func(dev, &dev_cap); + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_query_func(dev, dev_cap); if (err < 0) { mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); goto err_close; } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { - dev->caps.num_eqs = dev_cap.max_eqs; - dev->caps.reserved_eqs = dev_cap.reserved_eqs; - dev->caps.reserved_uars = dev_cap.reserved_uars; + dev->caps.num_eqs = dev_cap->max_eqs; + dev->caps.reserved_eqs = dev_cap->reserved_eqs; + dev->caps.reserved_uars = dev_cap->reserved_uars; } } @@ -2381,14 +2389,13 @@ static int mlx4_init_hca(struct mlx4_dev *dev) * read HCA frequency by QUERY_HCA command */ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { - memset(&init_hca, 0, sizeof(init_hca)); - err = mlx4_QUERY_HCA(dev, &init_hca); + err = mlx4_QUERY_HCA(dev, init_hca); if (err) { mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; } else { dev->caps.hca_core_clock = - init_hca.hca_core_clock; + init_hca->hca_core_clock; } /* In case we got HCA frequency 0 - disable timestamping @@ -2464,7 +2471,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev) priv->eq_table.inta_pin = adapter.inta_pin; memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id)); - return 0; + err = 0; + goto out_free; unmap_bf: unmap_internal_clock(dev); @@ -2483,6 +2491,10 @@ err_free_icm: if (!mlx4_is_slave(dev)) mlx4_free_icms(dev); +out_free: + kfree(dev_cap); + kfree(init_hca); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ce1be2a84231..0807992090b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -351,6 +351,7 @@ enum { MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_TLS, + MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, }; struct mlx5e_sq_wqe_info { @@ -475,8 +476,6 @@ struct mlx5e_xdp_mpwqe { struct mlx5e_tx_wqe *wqe; u8 ds_count; u8 pkt_count; - u8 max_ds_count; - u8 complete; u8 inline_on; }; @@ -1128,7 +1127,6 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, u16 num_channels); -u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index be5961ff24cc..4518ce19112e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -10,6 +10,8 @@ enum { }; struct mlx5e_tc_table { + /* protects flow table */ + struct mutex t_lock; struct mlx5_flow_table *t; struct rhashtable ht; @@ -132,12 +134,17 @@ struct mlx5e_ethtool_steering { void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); -int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); -int mlx5e_get_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *info, u32 *rule_locs); +int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); +int mlx5e_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rule_locs); #else static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { } static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { } +static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ return -EOPNOTSUPP; } +static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ return -EOPNOTSUPP; } #endif /* CONFIG_MLX5_EN_RXNFC */ #ifdef CONFIG_MLX5_EN_ARFS diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h index e78e92753d73..ed7a3881d2c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h @@ -4,7 +4,6 @@ #ifndef __MLX5E_EN_REPORTER_H #define __MLX5E_EN_REPORTER_H -#include <linux/mlx5/driver.h> #include "en.h" int mlx5e_tx_reporter_create(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index f3d98748b211..6e54fefea410 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2019 Mellanox Technologies. */ -#include <net/devlink.h> #include "reporter.h" #include "lib/eq.h" @@ -117,7 +116,7 @@ static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, char *err_str, struct mlx5e_tx_err_ctx *err_ctx) { - if (IS_ERR_OR_NULL(tx_reporter)) { + if (!tx_reporter) { netdev_err(err_ctx->sq->channel->netdev, err_str); return err_ctx->recover(err_ctx->sq); } @@ -289,23 +288,27 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { int mlx5e_tx_reporter_create(struct mlx5e_priv *priv) { + struct devlink_health_reporter *reporter; struct mlx5_core_dev *mdev = priv->mdev; struct devlink *devlink = priv_to_devlink(mdev); - priv->tx_reporter = + reporter = devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops, MLX5_REPORTER_TX_GRACEFUL_PERIOD, true, priv); - if (IS_ERR(priv->tx_reporter)) + if (IS_ERR(reporter)) { netdev_warn(priv->netdev, "Failed to create tx reporter, err = %ld\n", - PTR_ERR(priv->tx_reporter)); - return IS_ERR_OR_NULL(priv->tx_reporter); + PTR_ERR(reporter)); + return PTR_ERR(reporter); + } + priv->tx_reporter = reporter; + return 0; } void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv) { - if (IS_ERR_OR_NULL(priv->tx_reporter)) + if (!priv->tx_reporter) return; devlink_health_reporter_destroy(priv->tx_reporter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index a6a52806be45..4c4620db3d31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -31,29 +31,36 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev; uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); - uplink_upper = netdev_master_upper_dev_get(uplink_dev); + + rcu_read_lock(); + uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); + /* mlx5_lag_is_sriov() is a blocking function which can't be called + * while holding rcu read lock. Take the net_device for correctness + * sake. + */ + if (uplink_upper) + dev_hold(uplink_upper); + rcu_read_unlock(); + dst_is_lag_dev = (uplink_upper && netif_is_lag_master(uplink_upper) && real_dev == uplink_upper && mlx5_lag_is_sriov(priv->mdev)); + if (uplink_upper) + dev_put(uplink_upper); /* if the egress device isn't on the same HW e-switch or * it's a LAG device, use the uplink */ + *route_dev = dev; if (!netdev_port_same_parent_id(priv->netdev, real_dev) || - dst_is_lag_dev) { - *route_dev = dev; + dst_is_lag_dev || is_vlan_dev(*route_dev)) *out_dev = uplink_dev; - } else { - *route_dev = dev; - if (is_vlan_dev(*route_dev)) - *out_dev = uplink_dev; - else if (mlx5e_eswitch_rep(dev) && - mlx5e_is_valid_eswitch_fwd_dev(priv, dev)) - *out_dev = *route_dev; - else - return -EOPNOTSUPP; - } + else if (mlx5e_eswitch_rep(dev) && + mlx5e_is_valid_eswitch_fwd_dev(priv, dev)) + *out_dev = *route_dev; + else + return -EOPNOTSUPP; if (!(mlx5e_eswitch_rep(*out_dev) && mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index ddfe19adb3d9..87be96747902 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -6,7 +6,7 @@ #include "en.h" -#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS +#define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1) #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ MLX5E_SQ_NOPS_ROOM) @@ -117,9 +117,27 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, mlx5_write64((__be32 *)ctrl, uar_map); } -static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe) +static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg) { - return !!wqe->ctrl.tisn; + return cseg && !!cseg->tisn; +} + +static inline u8 +mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, + struct sk_buff *skb) +{ + u8 mode; + + if (mlx5e_transport_inline_tx_wqe(cseg)) + return MLX5_INLINE_MODE_TCP_UDP; + + mode = sq->min_inline_mode; + + if (skb_vlan_tag_present(skb) && + test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state)) + mode = max_t(u8, MLX5_INLINE_MODE_L2, mode); + + return mode; } static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index b0b982cf69bb..1ed5c33e022f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -179,33 +179,19 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; struct mlx5e_xdpsq_stats *stats = sq->stats; struct mlx5_wq_cyc *wq = &sq->wq; - u8 wqebbs; - u16 pi; - - mlx5e_xdpsq_fetch_wqe(sq, &session->wqe); - - prefetchw(session->wqe->data); - session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT; - session->pkt_count = 0; - session->complete = 0; + u16 pi, contig_wqebbs; pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); -/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS - * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. - * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a - * full-session WQE be cache-aligned. - */ -#if L1_CACHE_BYTES < 128 -#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) -#else -#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) -#endif + if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS)) + mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs); - wqebbs = min_t(u16, mlx5_wq_cyc_get_contig_wqebbs(wq, pi), - MLX5E_XDP_MPW_MAX_WQEBBS); + session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi); - session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs; + prefetchw(session->wqe->data); + session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT; + session->pkt_count = 0; mlx5e_xdp_update_inline_state(sq); @@ -244,7 +230,7 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) { if (unlikely(!sq->mpwqe.wqe)) { if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, - MLX5_SEND_WQE_MAX_WQEBBS))) { + MLX5E_XDPSQ_STOP_ROOM))) { /* SQ is full, ring doorbell */ mlx5e_xmit_xdp_doorbell(sq); sq->stats->full++; @@ -285,8 +271,8 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats); - if (unlikely(session->complete || - session->ds_count == session->max_ds_count)) + if (unlikely(mlx5e_xdp_no_room_for_inline_pkt(session) || + session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS)) mlx5e_xdp_mpwqe_complete(sq); mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index b90923932668..36ac1e3816b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -40,6 +40,26 @@ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) +#define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM) + +#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg)) +#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \ + DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS) + +/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS + * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. + * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a + * full-session WQE be cache-aligned. + */ +#if L1_CACHE_BYTES < 128 +#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) +#else +#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) +#endif + +#define MLX5E_XDP_MPW_MAX_NUM_DS \ + (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) + struct mlx5e_xsk_param; int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, @@ -114,6 +134,30 @@ static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq) session->inline_on = 1; } +static inline bool +mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session) +{ + return session->inline_on && + session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS; +} + +static inline void +mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq, + u16 pi, u16 nnops) +{ + struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; + + edge_wi = wi + nnops; + /* fill sq frag edge with nops to avoid wqe wrapping two pages */ + for (; wi < edge_wi; wi++) { + wi->num_wqebbs = 1; + wi->num_pkts = 0; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } + + sq->stats->nops += nnops; +} + static inline void mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd, @@ -126,20 +170,12 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, session->pkt_count++; -#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg)) - if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) { struct mlx5_wqe_inline_seg *inline_dseg = (struct mlx5_wqe_inline_seg *)dseg; u16 ds_len = sizeof(*inline_dseg) + dma_len; u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS); - if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) { - /* Not enough space for inline wqe, send with memory pointer */ - session->complete = true; - goto no_inline; - } - inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG); memcpy(inline_dseg->data, xdptxd->data, dma_len); @@ -148,21 +184,23 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, return; } -no_inline: dseg->addr = cpu_to_be64(xdptxd->dma_addr); dseg->byte_count = cpu_to_be32(dma_len); dseg->lkey = sq->mkey_be; session->ds_count++; } -static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, - struct mlx5e_tx_wqe **wqe) +static inline struct mlx5e_tx_wqe * +mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi) { struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + struct mlx5e_tx_wqe *wqe; + + *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + wqe = mlx5_wq_cyc_get_wqe(wq, *pi); + memset(wqe, 0, sizeof(*wqe)); - *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - memset(*wqe, 0, sizeof(**wqe)); + return wqe; } static inline void diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index aaffa6f68dc0..f701e4f3c076 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -60,24 +60,28 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct xdp_umem *umem, struct mlx5e_channel *c) { - struct mlx5e_channel_param cparam = {}; + struct mlx5e_channel_param *cparam; struct dim_cq_moder icocq_moder = {}; int err; if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) return -EINVAL; - mlx5e_build_xsk_cparam(priv, params, xsk, &cparam); + cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL); + if (!cparam) + return -ENOMEM; - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam.rx_cq, &c->xskrq.cq); + mlx5e_build_xsk_cparam(priv, params, xsk, cparam); + + err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq); if (unlikely(err)) - return err; + goto err_free_cparam; - err = mlx5e_open_rq(c, params, &cparam.rq, xsk, umem, &c->xskrq); + err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq); if (unlikely(err)) goto err_close_rx_cq; - err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam.tx_cq, &c->xsksq.cq); + err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq); if (unlikely(err)) goto err_close_rq; @@ -87,21 +91,23 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, * is disabled and then reenabled, but the SQ continues receiving CQEs * from the old UMEM. */ - err = mlx5e_open_xdpsq(c, params, &cparam.xdp_sq, umem, &c->xsksq, true); + err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true); if (unlikely(err)) goto err_close_tx_cq; - err = mlx5e_open_cq(c, icocq_moder, &cparam.icosq_cq, &c->xskicosq.cq); + err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq); if (unlikely(err)) goto err_close_sq; /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be * triggered and NAPI to be called on the correct CPU. */ - err = mlx5e_open_icosq(c, params, &cparam.icosq, &c->xskicosq); + err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq); if (unlikely(err)) goto err_close_icocq; + kvfree(cparam); + spin_lock_init(&c->xskicosq_lock); set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); @@ -123,6 +129,9 @@ err_close_rq: err_close_rx_cq: mlx5e_close_cq(&c->xskrq.cq); +err_free_cparam: + kvfree(cparam); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 1539cf3de5dc..f7890e0ce96c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -180,15 +180,3 @@ out: return err; } - -u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev) -{ - u8 min_inline_mode; - - mlx5_query_min_inline(mdev, &min_inline_mode); - if (min_inline_mode == MLX5_INLINE_MODE_NONE && - !MLX5_CAP_ETH(mdev, wqe_vlan_insert)) - min_inline_mode = MLX5_INLINE_MODE_L2; - - return min_inline_mode; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 8dd31b5c740c..01f2918063af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1101,7 +1101,7 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv, struct mlx5e_params *params) { - params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev); + mlx5_query_min_inline(priv->mdev, ¶ms->tx_min_inline_mode); if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP && params->tx_min_inline_mode == MLX5_INLINE_MODE_L2) params->tx_min_inline_mode = MLX5_INLINE_MODE_IP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 03bed714bac3..02530b50609c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1913,21 +1913,27 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev) return priv->channels.params.pflags; } -#ifndef CONFIG_MLX5_EN_RXNFC -/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS - * otherwise this function will be defined from en_fs_ethtool.c - */ static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct mlx5e_priv *priv = netdev_priv(dev); - if (info->cmd != ETHTOOL_GRXRINGS) - return -EOPNOTSUPP; - /* ring_count is needed by ethtool -x */ - info->data = priv->channels.params.num_channels; - return 0; + /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part + * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc, + * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc + * is compiled out via CONFIG_MLX5_EN_RXNFC=n. + */ + if (info->cmd == ETHTOOL_GRXRINGS) { + info->data = priv->channels.params.num_channels; + return 0; + } + + return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs); +} + +static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + return mlx5e_ethtool_set_rxnfc(dev, cmd); } -#endif const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, @@ -1948,9 +1954,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, .get_rxnfc = mlx5e_get_rxnfc, -#ifdef CONFIG_MLX5_EN_RXNFC .set_rxnfc = mlx5e_set_rxnfc, -#endif .get_tunable = mlx5e_get_tunable, .set_tunable = mlx5e_set_tunable, .get_pauseparam = mlx5e_get_pauseparam, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 94304abc49e9..eed7101e8bb7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -888,10 +888,10 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv, return 0; } -int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { - int err = 0; struct mlx5e_priv *priv = netdev_priv(dev); + int err = 0; switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: @@ -911,16 +911,13 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return err; } -int mlx5e_get_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *info, u32 *rule_locs) +int mlx5e_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rule_locs) { struct mlx5e_priv *priv = netdev_priv(dev); int err = 0; switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = priv->channels.params.num_channels; - break; case ETHTOOL_GRXCLSRLCNT: info->rule_cnt = priv->fs.ethtool.tot_num_rules; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 6c712c5be4d8..9a2fcef6e7f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1130,6 +1130,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; sq->stop_room = MLX5E_SQ_STOP_ROOM; INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); + if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) + set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); if (mlx5_accel_is_tls_device(c->priv->mdev)) { @@ -2322,7 +2324,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } - if (!IS_ERR_OR_NULL(priv->tx_reporter)) + if (priv->tx_reporter) devlink_health_reporter_state_update(priv->tx_reporter, DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); @@ -3423,7 +3425,7 @@ out: #ifdef CONFIG_MLX5_ESWITCH static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, struct flow_cls_offload *cls_flower, - int flags) + unsigned long flags) { switch (cls_flower->command) { case FLOW_CLS_REPLACE: @@ -3443,12 +3445,12 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { + unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD); struct mlx5e_priv *priv = cb_priv; switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS | - MLX5E_TC_NIC_OFFLOAD); + return mlx5e_setup_tc_cls_flower(priv, type_data, flags); default: return -EOPNOTSUPP; } @@ -3641,7 +3643,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); - if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) { + if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) { netdev_err(netdev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); return -EINVAL; @@ -3782,9 +3784,10 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); } if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) { - features &= ~NETIF_F_LRO; - if (params->lro_en) + if (features & NETIF_F_LRO) { netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); + features &= ~NETIF_F_LRO; + } } if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { @@ -3951,7 +3954,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: /* Disable CQE compression */ - netdev_warn(priv->netdev, "Disabling cqe compression"); + if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) + netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); err = mlx5e_modify_rx_cqe_compression_locked(priv, false); if (err) { netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); @@ -4769,7 +4773,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); /* TX inline */ - params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); + mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); /* RSS */ mlx5e_build_rss_params(rss_params, params->num_channels); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index d0684fdb69e1..fbb9de633578 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -659,8 +659,8 @@ mlx5e_rep_indr_offload(struct net_device *netdev, struct flow_cls_offload *flower, struct mlx5e_rep_indr_block_priv *indr_priv) { + unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); - int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD; int err = 0; switch (flower->command) { @@ -722,10 +722,6 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, if (indr_priv) return -EEXIST; - if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb, - indr_priv, &mlx5e_block_cb_list)) - return -EBUSY; - indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL); if (!indr_priv) return -ENOMEM; @@ -1160,15 +1156,34 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, } } +static +int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) +{ + switch (ma->command) { + case TC_CLSMATCHALL_REPLACE: + return mlx5e_tc_configure_matchall(priv, ma); + case TC_CLSMATCHALL_DESTROY: + return mlx5e_tc_delete_matchall(priv, ma); + case TC_CLSMATCHALL_STATS: + mlx5e_tc_stats_matchall(priv, ma); + return 0; + default: + return -EOPNOTSUPP; + } +} + static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { + unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); struct mlx5e_priv *priv = cb_priv; switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS | - MLX5E_TC_ESW_OFFLOAD); + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags); + case TC_SETUP_CLSMATCHALL: + return mlx5e_rep_setup_tc_cls_matchall(priv, type_data); default: return -EOPNOTSUPP; } @@ -1564,6 +1579,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { uplink_priv = &rpriv->uplink_priv; + mutex_init(&uplink_priv->unready_flows_lock); INIT_LIST_HEAD(&uplink_priv->unready_flows); /* init shared tc flow table */ @@ -1608,6 +1624,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) /* delete shared tc flow table */ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht); + mutex_destroy(&rpriv->uplink_priv.unready_flows_lock); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index c56e6ee4350c..43eeebe9c8d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -75,6 +75,8 @@ struct mlx5_rep_uplink_priv { struct mlx5_tun_entropy tun_entropy; + /* protects unready_flows */ + struct mutex unready_flows_lock; struct list_head unready_flows; struct work_struct reoffload_flows_work; }; @@ -86,6 +88,7 @@ struct mlx5e_rep_priv { struct mlx5_flow_handle *vport_rx_rule; struct list_head vport_sqs_list; struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */ + struct rtnl_link_stats64 prev_vf_vport_stats; struct devlink_port dl_port; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index ac6e586d403d..60570b442fff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -859,13 +859,24 @@ tail_padding_csum(struct sk_buff *skb, int offset, } static void -mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, - struct mlx5e_rq_stats *stats) +mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto, + struct mlx5e_rq_stats *stats) { struct ipv6hdr *ip6; struct iphdr *ip4; int pkt_len; + /* Fixup vlan headers, if any */ + if (network_depth > ETH_HLEN) + /* CQE csum is calculated from the IP header and does + * not cover VLAN headers (if present). This will add + * the checksum manually. + */ + skb->csum = csum_partial(skb->data + ETH_HLEN, + network_depth - ETH_HLEN, + skb->csum); + + /* Fixup tail padding, if any */ switch (proto) { case htons(ETH_P_IP): ip4 = (struct iphdr *)(skb->data + network_depth); @@ -931,16 +942,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; /* CQE csum covers all received bytes */ /* csum might need some fixups ...*/ - if (network_depth > ETH_HLEN) - /* CQE csum is calculated from the IP header and does - * not cover VLAN headers (if present). This will add - * the checksum manually. - */ - skb->csum = csum_partial(skb->data + ETH_HLEN, - network_depth - ETH_HLEN, - skb->csum); - - mlx5e_skb_padding_csum(skb, network_depth, proto, stats); + mlx5e_skb_csum_fixup(skb, network_depth, proto, stats); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 57f9f346d213..94a32c76c182 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -74,6 +74,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, @@ -90,6 +91,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, @@ -200,6 +202,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_xdp_tx_xmit += xdpsq_stats->xmit; s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; + s->rx_xdp_tx_nops += xdpsq_stats->nops; s->rx_xdp_tx_full += xdpsq_stats->full; s->rx_xdp_tx_err += xdpsq_stats->err; s->rx_xdp_tx_cqe += xdpsq_stats->cqes; @@ -227,6 +230,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_xdp_xmit += xdpsq_red_stats->xmit; s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; + s->tx_xdp_nops += xdpsq_red_stats->nops; s->tx_xdp_full += xdpsq_red_stats->full; s->tx_xdp_err += xdpsq_red_stats->err; s->tx_xdp_cqes += xdpsq_red_stats->cqes; @@ -1331,6 +1335,7 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = { { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, @@ -1340,6 +1345,7 @@ static const struct counter_desc xdpsq_stats_desc[] = { { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 76ac111e14d0..bf645d42c833 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -81,6 +81,7 @@ struct mlx5e_sw_stats { u64 rx_xdp_tx_xmit; u64 rx_xdp_tx_mpwqe; u64 rx_xdp_tx_inlnw; + u64 rx_xdp_tx_nops; u64 rx_xdp_tx_full; u64 rx_xdp_tx_err; u64 rx_xdp_tx_cqe; @@ -97,6 +98,7 @@ struct mlx5e_sw_stats { u64 tx_xdp_xmit; u64 tx_xdp_mpwqe; u64 tx_xdp_inlnw; + u64 tx_xdp_nops; u64 tx_xdp_full; u64 tx_xdp_err; u64 tx_xdp_cqes; @@ -288,6 +290,7 @@ struct mlx5e_xdpsq_stats { u64 xmit; u64 mpwqe; u64 inlnw; + u64 nops; u64 full; u64 err; /* dirtied @completion */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7ecfc53cf5f6..4d97cc47835f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -38,6 +38,7 @@ #include <linux/mlx5/fs.h> #include <linux/mlx5/device.h> #include <linux/rhashtable.h> +#include <linux/refcount.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_tunnel_key.h> @@ -65,19 +66,20 @@ struct mlx5_nic_flow_attr { struct mlx5_fc *counter; }; -#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1) +#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1) enum { - MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS, - MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS, - MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD, - MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD, - MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE), - MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1), - MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2), - MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3), - MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4), - MLX5E_TC_FLOW_NOT_READY = BIT(MLX5E_TC_FLOW_BASE + 5), + MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, + MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, + MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE, + MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1, + MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2, + MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3, + MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4, + MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5, + MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6, }; #define MLX5E_TC_MAX_SPLITS 1 @@ -108,7 +110,7 @@ struct mlx5e_tc_flow { struct rhash_head node; struct mlx5e_priv *priv; u64 cookie; - u16 flags; + unsigned long flags; struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; /* Flow can be associated with multiple encap IDs. * The number of encaps is bounded by the number of supported @@ -120,6 +122,8 @@ struct mlx5e_tc_flow { struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */ + refcount_t refcnt; + struct rcu_head rcu_head; union { struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_nic_flow_attr nic_attr[0]; @@ -184,6 +188,77 @@ struct mlx5e_mod_hdr_entry { #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) +static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); + +static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) +{ + if (!flow || !refcount_inc_not_zero(&flow->refcnt)) + return ERR_PTR(-EINVAL); + return flow; +} + +static void mlx5e_flow_put(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + if (refcount_dec_and_test(&flow->refcnt)) { + mlx5e_tc_del_flow(priv, flow); + kfree_rcu(flow, rcu_head); + } +} + +static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + /* Complete all memory stores before setting bit. */ + smp_mb__before_atomic(); + set_bit(flag, &flow->flags); +} + +#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag) + +static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow, + unsigned long flag) +{ + /* test_and_set_bit() provides all necessary barriers */ + return test_and_set_bit(flag, &flow->flags); +} + +#define flow_flag_test_and_set(flow, flag) \ + __flow_flag_test_and_set(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + +static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + /* Complete all memory stores before clearing bit. */ + smp_mb__before_atomic(); + clear_bit(flag, &flow->flags); +} + +#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + +static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + bool ret = test_bit(flag, &flow->flags); + + /* Read fields of flow structure only after checking flags. */ + smp_mb__after_atomic(); + return ret; +} + +#define flow_flag_test(flow, flag) __flow_flag_test(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + +static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) +{ + return flow_flag_test(flow, ESWITCH); +} + +static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) +{ + return flow_flag_test(flow, OFFLOADED); +} + static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key) { return jhash(key->actions, @@ -205,9 +280,9 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int num_actions, actions_size, namespace, err; + bool found = false, is_eswitch_flow; struct mlx5e_mod_hdr_entry *mh; struct mod_hdr_key key; - bool found = false; u32 hash_key; num_actions = parse_attr->num_mod_hdr_actions; @@ -218,7 +293,8 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, hash_key = hash_mod_hdr_info(&key); - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { + is_eswitch_flow = mlx5e_is_eswitch_flow(flow); + if (is_eswitch_flow) { namespace = MLX5_FLOW_NAMESPACE_FDB; hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh, mod_hdr_hlist, hash_key) { @@ -257,14 +333,14 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, if (err) goto out_err; - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (is_eswitch_flow) hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key); else hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key); attach_flow: list_add(&flow->mod_hdr, &mh->flows); - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (is_eswitch_flow) flow->esw_attr->mod_hdr_id = mh->mod_hdr_id; else flow->nic_attr->mod_hdr_id = mh->mod_hdr_id; @@ -281,6 +357,10 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, { struct list_head *next = flow->mod_hdr.next; + /* flow wasn't fully initialized */ + if (list_empty(&flow->mod_hdr)) + return; + list_del(&flow->mod_hdr); if (list_empty(next)) { @@ -675,7 +755,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, attach_flow: if (hpe->hp->num_channels > 1) { - flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS; + flow_flag_set(flow, HAIRPIN_RSS); flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t; } else { flow->nic_attr->hairpin_tirn = hpe->hp->tirn; @@ -694,6 +774,10 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, { struct list_head *next = flow->hairpin.next; + /* flow wasn't fully initialized */ + if (list_empty(&flow->hairpin)) + return; + list_del(&flow->hairpin); /* no more hairpin flows for us, release the hairpin pair */ @@ -727,18 +811,17 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, .flags = FLOW_ACT_NO_APPEND, }; struct mlx5_fc *counter = NULL; - bool table_created = false; int err, dest_ix = 0; flow_context->flags |= FLOW_CONTEXT_HAS_TAG; flow_context->flow_tag = attr->flow_tag; - if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) { + if (flow_flag_test(flow, HAIRPIN)) { err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); - if (err) { - goto err_add_hairpin_flow; - } - if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) { + if (err) + return err; + + if (flow_flag_test(flow, HAIRPIN_RSS)) { dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[dest_ix].ft = attr->hairpin_ft; } else { @@ -754,10 +837,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(dev, true); - if (IS_ERR(counter)) { - err = PTR_ERR(counter); - goto err_fc_create; - } + if (IS_ERR(counter)) + return PTR_ERR(counter); + dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[dest_ix].counter_id = mlx5_fc_id(counter); dest_ix++; @@ -769,9 +851,10 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, flow_act.modify_id = attr->mod_hdr_id; kfree(parse_attr->mod_hdr_actions); if (err) - goto err_create_mod_hdr_id; + return err; } + mutex_lock(&priv->fs.tc.t_lock); if (IS_ERR_OR_NULL(priv->fs.tc.t)) { int tc_grp_size, tc_tbl_size; u32 max_flow_counter; @@ -791,15 +874,13 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, MLX5E_TC_TABLE_NUM_GROUPS, MLX5E_TC_FT_LEVEL, 0); if (IS_ERR(priv->fs.tc.t)) { + mutex_unlock(&priv->fs.tc.t_lock); NL_SET_ERR_MSG_MOD(extack, "Failed to create tc offload table\n"); netdev_err(priv->netdev, "Failed to create tc offload table\n"); - err = PTR_ERR(priv->fs.tc.t); - goto err_create_ft; + return PTR_ERR(priv->fs.tc.t); } - - table_created = true; } if (attr->match_level != MLX5_MATCH_NONE) @@ -807,29 +888,12 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, &flow_act, dest, dest_ix); + mutex_unlock(&priv->fs.tc.t_lock); - if (IS_ERR(flow->rule[0])) { - err = PTR_ERR(flow->rule[0]); - goto err_add_rule; - } + if (IS_ERR(flow->rule[0])) + return PTR_ERR(flow->rule[0]); return 0; - -err_add_rule: - if (table_created) { - mlx5_destroy_flow_table(priv->fs.tc.t); - priv->fs.tc.t = NULL; - } -err_create_ft: - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - mlx5e_detach_mod_hdr(priv, flow); -err_create_mod_hdr_id: - mlx5_fc_destroy(dev, counter); -err_fc_create: - if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) - mlx5e_hairpin_flow_del(priv, flow); -err_add_hairpin_flow: - return err; } static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, @@ -839,18 +903,21 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, struct mlx5_fc *counter = NULL; counter = attr->counter; - mlx5_del_flow_rules(flow->rule[0]); + if (!IS_ERR_OR_NULL(flow->rule[0])) + mlx5_del_flow_rules(flow->rule[0]); mlx5_fc_destroy(priv->mdev, counter); - if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) { + mutex_lock(&priv->fs.tc.t_lock); + if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; } + mutex_unlock(&priv->fs.tc.t_lock); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); - if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) + if (flow_flag_test(flow, HAIRPIN)) mlx5e_hairpin_flow_del(priv, flow); } @@ -885,7 +952,6 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, } } - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; return rule; } @@ -894,7 +960,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, struct mlx5e_tc_flow *flow, struct mlx5_esw_flow_attr *attr) { - flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; + flow_flag_clear(flow, OFFLOADED); if (attr->split_count) mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); @@ -917,7 +983,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); if (!IS_ERR(rule)) - flow->flags |= MLX5E_TC_FLOW_SLOW; + flow_flag_set(flow, SLOW); return rule; } @@ -932,7 +998,26 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, slow_attr->split_count = 0; slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); - flow->flags &= ~MLX5E_TC_FLOW_SLOW; + flow_flag_clear(flow, SLOW); +} + +/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this + * function. + */ +static void unready_flow_add(struct mlx5e_tc_flow *flow, + struct list_head *unready_flows) +{ + flow_flag_set(flow, NOT_READY); + list_add_tail(&flow->unready, unready_flows); +} + +/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this + * function. + */ +static void unready_flow_del(struct mlx5e_tc_flow *flow) +{ + list_del(&flow->unready); + flow_flag_clear(flow, NOT_READY); } static void add_unready_flow(struct mlx5e_tc_flow *flow) @@ -945,14 +1030,24 @@ static void add_unready_flow(struct mlx5e_tc_flow *flow) rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_priv = &rpriv->uplink_priv; - flow->flags |= MLX5E_TC_FLOW_NOT_READY; - list_add_tail(&flow->unready, &uplink_priv->unready_flows); + mutex_lock(&uplink_priv->unready_flows_lock); + unready_flow_add(flow, &uplink_priv->unready_flows); + mutex_unlock(&uplink_priv->unready_flows_lock); } static void remove_unready_flow(struct mlx5e_tc_flow *flow) { - list_del(&flow->unready); - flow->flags &= ~MLX5E_TC_FLOW_NOT_READY; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *rpriv; + struct mlx5_eswitch *esw; + + esw = flow->priv->mdev->priv.eswitch; + rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &rpriv->uplink_priv; + + mutex_lock(&uplink_priv->unready_flows_lock); + unready_flow_del(flow); + mutex_unlock(&uplink_priv->unready_flows_lock); } static int @@ -980,14 +1075,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, if (attr->chain > max_chain) { NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); - err = -EOPNOTSUPP; - goto err_max_prio_chain; + return -EOPNOTSUPP; } if (attr->prio > max_prio) { NL_SET_ERR_MSG(extack, "Requested priority is out of supported range"); - err = -EOPNOTSUPP; - goto err_max_prio_chain; + return -EOPNOTSUPP; } for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { @@ -1002,7 +1095,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, err = mlx5e_attach_encap(priv, flow, out_dev, out_index, extack, &encap_dev, &encap_valid); if (err) - goto err_attach_encap; + return err; out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; @@ -1012,21 +1105,19 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, err = mlx5_eswitch_add_vlan_action(esw, attr); if (err) - goto err_add_vlan; + return err; if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); kfree(parse_attr->mod_hdr_actions); if (err) - goto err_mod_hdr; + return err; } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(attr->counter_dev, true); - if (IS_ERR(counter)) { - err = PTR_ERR(counter); - goto err_create_counter; - } + if (IS_ERR(counter)) + return PTR_ERR(counter); attr->counter = counter; } @@ -1044,27 +1135,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); } - if (IS_ERR(flow->rule[0])) { - err = PTR_ERR(flow->rule[0]); - goto err_add_rule; - } + if (IS_ERR(flow->rule[0])) + return PTR_ERR(flow->rule[0]); + else + flow_flag_set(flow, OFFLOADED); return 0; - -err_add_rule: - mlx5_fc_destroy(attr->counter_dev, counter); -err_create_counter: - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - mlx5e_detach_mod_hdr(priv, flow); -err_mod_hdr: - mlx5_eswitch_del_vlan_action(esw, attr); -err_add_vlan: - for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) - if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) - mlx5e_detach_encap(priv, flow, out_index); -err_attach_encap: -err_max_prio_chain: - return err; } static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow) @@ -1088,14 +1164,14 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr slow_attr; int out_index; - if (flow->flags & MLX5E_TC_FLOW_NOT_READY) { + if (flow_flag_test(flow, NOT_READY)) { remove_unready_flow(flow); kvfree(attr->parse_attr); return; } - if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { - if (flow->flags & MLX5E_TC_FLOW_SLOW) + if (mlx5e_is_offloaded_flow(flow)) { + if (flow_flag_test(flow, SLOW)) mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); else mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); @@ -1123,9 +1199,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr slow_attr, *esw_attr; + struct encap_flow_item *efi, *tmp; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; - struct encap_flow_item *efi; struct mlx5e_tc_flow *flow; int err; @@ -1142,11 +1218,14 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, e->flags |= MLX5_ENCAP_ENTRY_VALID; mlx5e_rep_queue_neigh_stats_work(priv); - list_for_each_entry(efi, &e->flows, list) { + list_for_each_entry_safe(efi, tmp, &e->flows, list) { bool all_flow_encaps_valid = true; int i; flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); + if (IS_ERR(mlx5e_flow_get(flow))) + continue; + esw_attr = flow->esw_attr; spec = &esw_attr->parse_attr->spec; @@ -1166,19 +1245,23 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, } /* Do not offload flows with unresolved neighbors */ if (!all_flow_encaps_valid) - continue; + goto loop_cont; /* update from slow path rule to encap rule */ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr); if (IS_ERR(rule)) { err = PTR_ERR(rule); mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", err); - continue; + goto loop_cont; } mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */ flow->rule[0] = rule; + /* was unset when slow path rule removed */ + flow_flag_set(flow, OFFLOADED); + +loop_cont: + mlx5e_flow_put(priv, flow); } } @@ -1187,14 +1270,17 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr slow_attr; + struct encap_flow_item *efi, *tmp; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; - struct encap_flow_item *efi; struct mlx5e_tc_flow *flow; int err; - list_for_each_entry(efi, &e->flows, list) { + list_for_each_entry_safe(efi, tmp, &e->flows, list) { flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); + if (IS_ERR(mlx5e_flow_get(flow))) + continue; + spec = &flow->esw_attr->parse_attr->spec; /* update from encap rule to slow path rule */ @@ -1206,12 +1292,16 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, err = PTR_ERR(rule); mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n", err); - continue; + goto loop_cont; } mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr); - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */ flow->rule[0] = rule; + /* was unset when fast path rule removed */ + flow_flag_set(flow, OFFLOADED); + +loop_cont: + mlx5e_flow_put(priv, flow); } /* we know that the encap is valid */ @@ -1221,7 +1311,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) { - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (mlx5e_is_eswitch_flow(flow)) return flow->esw_attr->counter; else return flow->nic_attr->counter; @@ -1248,20 +1338,26 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) return; list_for_each_entry(e, &nhe->encap_list, encap_list) { - struct encap_flow_item *efi; + struct encap_flow_item *efi, *tmp; if (!(e->flags & MLX5_ENCAP_ENTRY_VALID)) continue; - list_for_each_entry(efi, &e->flows, list) { + list_for_each_entry_safe(efi, tmp, &e->flows, list) { flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); - if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + if (IS_ERR(mlx5e_flow_get(flow))) + continue; + + if (mlx5e_is_offloaded_flow(flow)) { counter = mlx5e_tc_get_counter(flow); lastuse = mlx5_fc_query_lastuse(counter); if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { + mlx5e_flow_put(netdev_priv(e->out_dev), flow); neigh_used = true; break; } } + + mlx5e_flow_put(netdev_priv(e->out_dev), flow); } if (neigh_used) break; @@ -1287,6 +1383,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, { struct list_head *next = flow->encaps[out_index].list.next; + /* flow wasn't fully initialized */ + if (list_empty(&flow->encaps[out_index].list)) + return; + list_del(&flow->encaps[out_index].list); if (list_empty(next)) { struct mlx5e_encap_entry *e; @@ -1307,15 +1407,15 @@ static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; - if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) || - !(flow->flags & MLX5E_TC_FLOW_DUP)) + if (!flow_flag_test(flow, ESWITCH) || + !flow_flag_test(flow, DUP)) return; mutex_lock(&esw->offloads.peer_mutex); list_del(&flow->peer); mutex_unlock(&esw->offloads.peer_mutex); - flow->flags &= ~MLX5E_TC_FLOW_DUP; + flow_flag_clear(flow, DUP); mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); kvfree(flow->peer_flow); @@ -1339,7 +1439,7 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { + if (mlx5e_is_eswitch_flow(flow)) { mlx5e_tc_del_fdb_peer_flow(flow); mlx5e_tc_del_fdb_flow(priv, flow); } else { @@ -1837,11 +1937,13 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5e_rep_priv *rpriv = priv->ppriv; u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; struct mlx5_eswitch_rep *rep; + bool is_eswitch_flow; int err; err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); - if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { + is_eswitch_flow = mlx5e_is_eswitch_flow(flow); + if (!err && is_eswitch_flow) { rep = rpriv->rep; if (rep->vport != MLX5_VPORT_UPLINK && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && @@ -1855,7 +1957,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, } } - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { + if (is_eswitch_flow) { flow->esw_attr->match_level = match_level; flow->esw_attr->tunnel_match_level = tunnel_match_level; } else { @@ -2376,14 +2478,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv, { u32 actions; - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (mlx5e_is_eswitch_flow(flow)) actions = flow->esw_attr->action; else actions = flow->nic_attr->action; - if (flow->flags & MLX5E_TC_FLOW_EGRESS && + if (flow_flag_test(flow, EGRESS) && !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) || - (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP))) + (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || + (actions & MLX5_FLOW_CONTEXT_ACTION_DROP))) return false; if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) @@ -2533,7 +2636,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, if (priv->netdev->netdev_ops == peer_dev->netdev_ops && same_hw_devs(priv, netdev_priv(peer_dev))) { parse_attr->mirred_ifindex[0] = peer_dev->ifindex; - flow->flags |= MLX5E_TC_FLOW_HAIRPIN; + flow_flag_set(flow, HAIRPIN); action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; } else { @@ -2881,12 +2984,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (netdev_port_same_parent_id(priv->netdev, out_dev)) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); - struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev); + struct net_device *uplink_upper; + rcu_read_lock(); + uplink_upper = + netdev_master_upper_dev_get_rcu(uplink_dev); if (uplink_upper && netif_is_lag_master(uplink_upper) && uplink_upper == out_dev) out_dev = uplink_dev; + rcu_read_unlock(); if (is_vlan_dev(out_dev)) { err = add_vlan_push_action(priv, attr, @@ -3057,19 +3164,19 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return 0; } -static void get_flags(int flags, u16 *flow_flags) +static void get_flags(int flags, unsigned long *flow_flags) { - u16 __flow_flags = 0; + unsigned long __flow_flags = 0; - if (flags & MLX5E_TC_INGRESS) - __flow_flags |= MLX5E_TC_FLOW_INGRESS; - if (flags & MLX5E_TC_EGRESS) - __flow_flags |= MLX5E_TC_FLOW_EGRESS; + if (flags & MLX5_TC_FLAG(INGRESS)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS); + if (flags & MLX5_TC_FLAG(EGRESS)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS); - if (flags & MLX5E_TC_ESW_OFFLOAD) - __flow_flags |= MLX5E_TC_FLOW_ESWITCH; - if (flags & MLX5E_TC_NIC_OFFLOAD) - __flow_flags |= MLX5E_TC_FLOW_NIC; + if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); + if (flags & MLX5_TC_FLAG(NIC_OFFLOAD)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); *flow_flags = __flow_flags; } @@ -3081,12 +3188,13 @@ static const struct rhashtable_params tc_ht_params = { .automatic_shrinking = true, }; -static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags) +static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, + unsigned long flags) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *uplink_rpriv; - if (flags & MLX5E_TC_ESW_OFFLOAD) { + if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) { uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); return &uplink_rpriv->uplink_priv.tc_ht; } else /* NIC offload */ @@ -3097,7 +3205,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) { struct mlx5_esw_flow_attr *attr = flow->esw_attr; bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK && - flow->flags & MLX5E_TC_FLOW_INGRESS; + flow_flag_test(flow, INGRESS); bool act_is_encap = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom, @@ -3116,13 +3224,13 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) static int mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, - struct flow_cls_offload *f, u16 flow_flags, + struct flow_cls_offload *f, unsigned long flow_flags, struct mlx5e_tc_flow_parse_attr **__parse_attr, struct mlx5e_tc_flow **__flow) { struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow *flow; - int err; + int out_index, err; flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL); parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); @@ -3134,6 +3242,11 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, flow->cookie = f->cookie; flow->flags = flow_flags; flow->priv = priv; + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) + INIT_LIST_HEAD(&flow->encaps[out_index].list); + INIT_LIST_HEAD(&flow->mod_hdr); + INIT_LIST_HEAD(&flow->hairpin); + refcount_set(&flow->refcnt, 1); *__flow = flow; *__parse_attr = parse_attr; @@ -3173,7 +3286,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr, static struct mlx5e_tc_flow * __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - u16 flow_flags, + unsigned long flow_flags, struct net_device *filter_dev, struct mlx5_eswitch_rep *in_rep, struct mlx5_core_dev *in_mdev) @@ -3184,7 +3297,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; int attr_size, err; - flow_flags |= MLX5E_TC_FLOW_ESWITCH; + flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); attr_size = sizeof(struct mlx5_esw_flow_attr); err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, &parse_attr, &flow); @@ -3216,15 +3329,14 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, return flow; err_free: - kfree(flow); - kvfree(parse_attr); + mlx5e_flow_put(priv, flow); out: return ERR_PTR(err); } static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, struct mlx5e_tc_flow *flow, - u16 flow_flags) + unsigned long flow_flags) { struct mlx5e_priv *priv = flow->priv, *peer_priv; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw; @@ -3262,7 +3374,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, } flow->peer_flow = peer_flow; - flow->flags |= MLX5E_TC_FLOW_DUP; + flow_flag_set(flow, DUP); mutex_lock(&esw->offloads.peer_mutex); list_add_tail(&flow->peer, &esw->offloads.peer_flows); mutex_unlock(&esw->offloads.peer_mutex); @@ -3275,7 +3387,7 @@ out: static int mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - u16 flow_flags, + unsigned long flow_flags, struct net_device *filter_dev, struct mlx5e_tc_flow **__flow) { @@ -3309,7 +3421,7 @@ out: static int mlx5e_add_nic_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - u16 flow_flags, + unsigned long flow_flags, struct net_device *filter_dev, struct mlx5e_tc_flow **__flow) { @@ -3323,7 +3435,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common)) return -EOPNOTSUPP; - flow_flags |= MLX5E_TC_FLOW_NIC; + flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); attr_size = sizeof(struct mlx5_nic_flow_attr); err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, &parse_attr, &flow); @@ -3344,14 +3456,14 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, if (err) goto err_free; - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; + flow_flag_set(flow, OFFLOADED); kvfree(parse_attr); *__flow = flow; return 0; err_free: - kfree(flow); + mlx5e_flow_put(priv, flow); kvfree(parse_attr); out: return err; @@ -3360,12 +3472,12 @@ out: static int mlx5e_tc_add_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - int flags, + unsigned long flags, struct net_device *filter_dev, struct mlx5e_tc_flow **flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - u16 flow_flags; + unsigned long flow_flags; int err; get_flags(flags, &flow_flags); @@ -3384,14 +3496,16 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv, } int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags) + struct flow_cls_offload *f, unsigned long flags) { struct netlink_ext_ack *extack = f->common.extack; struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct mlx5e_tc_flow *flow; int err = 0; - flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); + rcu_read_lock(); + flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); + rcu_read_unlock(); if (flow) { NL_SET_ERR_MSG_MOD(extack, "flow cookie already exists, ignoring"); @@ -3406,51 +3520,62 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, if (err) goto out; - err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params); + err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params); if (err) goto err_free; return 0; err_free: - mlx5e_tc_del_flow(priv, flow); - kfree(flow); + mlx5e_flow_put(priv, flow); out: return err; } -#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS) -#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS) - static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) { - if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK)) - return true; + bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS)); + bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS)); - return false; + return flow_flag_test(flow, INGRESS) == dir_ingress && + flow_flag_test(flow, EGRESS) == dir_egress; } int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags) + struct flow_cls_offload *f, unsigned long flags) { struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct mlx5e_tc_flow *flow; + int err; + rcu_read_lock(); flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); - if (!flow || !same_flow_direction(flow, flags)) - return -EINVAL; + if (!flow || !same_flow_direction(flow, flags)) { + err = -EINVAL; + goto errout; + } + /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag + * set. + */ + if (flow_flag_test_and_set(flow, DELETED)) { + err = -EINVAL; + goto errout; + } rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); + rcu_read_unlock(); - mlx5e_tc_del_flow(priv, flow); - - kfree(flow); + mlx5e_flow_put(priv, flow); return 0; + +errout: + rcu_read_unlock(); + return err; } int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags) + struct flow_cls_offload *f, unsigned long flags) { struct mlx5_devcom *devcom = priv->mdev->priv.devcom; struct rhashtable *tc_ht = get_tc_ht(priv, flags); @@ -3460,15 +3585,24 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, u64 lastuse = 0; u64 packets = 0; u64 bytes = 0; + int err = 0; - flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); - if (!flow || !same_flow_direction(flow, flags)) - return -EINVAL; + rcu_read_lock(); + flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie, + tc_ht_params)); + rcu_read_unlock(); + if (IS_ERR(flow)) + return PTR_ERR(flow); + + if (!same_flow_direction(flow, flags)) { + err = -EINVAL; + goto errout; + } - if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + if (mlx5e_is_offloaded_flow(flow)) { counter = mlx5e_tc_get_counter(flow); if (!counter) - return 0; + goto errout; mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); } @@ -3480,8 +3614,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, if (!peer_esw) goto out; - if ((flow->flags & MLX5E_TC_FLOW_DUP) && - (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) { + if (flow_flag_test(flow, DUP) && + flow_flag_test(flow->peer_flow, OFFLOADED)) { u64 bytes2; u64 packets2; u64 lastuse2; @@ -3500,10 +3634,111 @@ no_peer_counter: mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); out: flow_stats_update(&f->stats, bytes, packets, lastuse); +errout: + mlx5e_flow_put(priv, flow); + return err; +} + +static int apply_police_params(struct mlx5e_priv *priv, u32 rate, + struct netlink_ext_ack *extack) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch *esw; + u16 vport_num; + u32 rate_mbps; + int err; + + esw = priv->mdev->priv.eswitch; + /* rate is given in bytes/sec. + * First convert to bits/sec and then round to the nearest mbit/secs. + * mbit means million bits. + * Moreover, if rate is non zero we choose to configure to a minimum of + * 1 mbit/sec. + */ + rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; + vport_num = rpriv->rep->vport; + + err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); + if (err) + NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); + + return err; +} + +static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct netlink_ext_ack *extack) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + const struct flow_action_entry *act; + int err; + int i; + + if (!flow_action_has_entries(flow_action)) { + NL_SET_ERR_MSG_MOD(extack, "matchall called with no action"); + return -EINVAL; + } + + if (!flow_offload_has_one_action(flow_action)) { + NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action"); + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_POLICE: + err = apply_police_params(priv, act->police.rate_bytes_ps, extack); + if (err) + return err; + + rpriv->prev_vf_vport_stats = priv->stats.vf_vport; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall"); + return -EOPNOTSUPP; + } + } return 0; } +int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) +{ + struct netlink_ext_ack *extack = ma->common.extack; + int prio = TC_H_MAJ(ma->common.prio) >> 16; + + if (prio != 1) { + NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); + return -EINVAL; + } + + return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack); +} + +int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) +{ + struct netlink_ext_ack *extack = ma->common.extack; + + return apply_police_params(priv, 0, extack); +} + +void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct rtnl_link_stats64 cur_stats; + u64 dbytes; + u64 dpkts; + + cur_stats = priv->stats.vf_vport; + dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; + dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; + rpriv->prev_vf_vport_stats = cur_stats; + flow_stats_update(&ma->stats, dpkts, dbytes, jiffies); +} + static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) { @@ -3555,6 +3790,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) struct mlx5e_tc_table *tc = &priv->fs.tc; int err; + mutex_init(&tc->t_lock); hash_init(tc->mod_hdr_tbl); hash_init(tc->hairpin_tbl); @@ -3593,6 +3829,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) mlx5_destroy_flow_table(tc->t); tc->t = NULL; } + mutex_destroy(&tc->t_lock); } int mlx5e_tc_esw_init(struct rhashtable *tc_ht) @@ -3605,7 +3842,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); } -int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) +int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) { struct rhashtable *tc_ht = get_tc_ht(priv, flags); @@ -3627,10 +3864,10 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work) reoffload_flows_work); struct mlx5e_tc_flow *flow, *tmp; - rtnl_lock(); + mutex_lock(&rpriv->unready_flows_lock); list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) { if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL)) - remove_unready_flow(flow); + unready_flow_del(flow); } - rtnl_unlock(); + mutex_unlock(&rpriv->unready_flows_lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 3ab39275ca7d..20f045e96c92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -40,13 +40,15 @@ #ifdef CONFIG_MLX5_ESWITCH enum { - MLX5E_TC_INGRESS = BIT(0), - MLX5E_TC_EGRESS = BIT(1), - MLX5E_TC_NIC_OFFLOAD = BIT(2), - MLX5E_TC_ESW_OFFLOAD = BIT(3), - MLX5E_TC_LAST_EXPORTED_BIT = 3, + MLX5E_TC_FLAG_INGRESS_BIT, + MLX5E_TC_FLAG_EGRESS_BIT, + MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, + MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, + MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, }; +#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT) + int mlx5e_tc_nic_init(struct mlx5e_priv *priv); void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); @@ -54,12 +56,19 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht); void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags); + struct flow_cls_offload *f, unsigned long flags); int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags); + struct flow_cls_offload *f, unsigned long flags); int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags); + struct flow_cls_offload *f, unsigned long flags); + +int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *f); +int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *f); +void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma); struct mlx5e_encap_entry; void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, @@ -70,7 +79,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, struct mlx5e_neigh_hash_entry; void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); -int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags); +int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags); void mlx5e_tc_reoffload_flows_work(struct work_struct *work); @@ -80,7 +89,11 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, #else /* CONFIG_MLX5_ESWITCH */ static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} -static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) { return 0; } +static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, + unsigned long flags) +{ + return 0; +} #endif #endif /* __MLX5_EN_TC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 600e92cb629a..d3a67a9b4eba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -210,7 +210,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int fsz = skb_frag_size(frag); dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, @@ -292,8 +292,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; stats->packets += skb_shinfo(skb)->gso_segs; } else { - u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ? - MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode; + u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb); opcode = MLX5_OPCODE_SEND; mss = 0; @@ -608,9 +607,11 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; stats->packets += skb_shinfo(skb)->gso_segs; } else { + u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb); + opcode = MLX5_OPCODE_SEND; mss = 0; - ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); + ihs = mlx5e_calc_min_inline(mode, skb); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); stats->packets++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 41f25ea2e8d9..2df9aaa421c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb, */ dma_rmb(); - if (likely(eqe->type < MLX5_EVENT_TYPE_MAX)) - atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe); - else - mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type); - + atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe); atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe); ++eq->cons_index; @@ -945,9 +941,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb) { struct mlx5_eq_table *eqt = dev->priv.eq_table; - if (nb->event_type >= MLX5_EVENT_TYPE_MAX) - return -EINVAL; - return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb); } EXPORT_SYMBOL(mlx5_eq_notifier_register); @@ -956,9 +949,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb) { struct mlx5_eq_table *eqt = dev->priv.eq_table; - if (nb->event_type >= MLX5_EVENT_TYPE_MAX) - return -EINVAL; - return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb); } EXPORT_SYMBOL(mlx5_eq_notifier_unregister); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 1f3891fde2eb..5fbebee7254d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -58,20 +58,9 @@ struct vport_addr { bool mc_promisc; }; -enum { - UC_ADDR_CHANGE = BIT(0), - MC_ADDR_CHANGE = BIT(1), - PROMISC_CHANGE = BIT(3), -}; - static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw); static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); -/* Vport context events */ -#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \ - MC_ADDR_CHANGE | \ - PROMISC_CHANGE) - struct mlx5_vport *__must_check mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) { @@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); - if (events_mask & UC_ADDR_CHANGE) + if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE) MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_uc_address_change, 1); - if (events_mask & MC_ADDR_CHANGE) + if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE) MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_mc_address_change, 1); - if (events_mask & PROMISC_CHANGE) + if (events_mask & MLX5_VPORT_PROMISC_CHANGE) MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_promisc_change, 1); @@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) return err; } +#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ + MLX5_VPORT_MC_ADDR_CHANGE | \ + MLX5_VPORT_PROMISC_CHANGE) + +static int esw_legacy_enable(struct mlx5_eswitch *esw) +{ + int ret; + + ret = esw_create_legacy_table(esw); + if (ret) + return ret; + + mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); + return 0; +} + static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) { esw_cleanup_vepa_rules(esw); @@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) esw_destroy_legacy_vepa_table(esw); } +static void esw_legacy_disable(struct mlx5_eswitch *esw) +{ + struct esw_mc_addr *mc_promisc; + + mlx5_eswitch_disable_pf_vf_vports(esw); + + mc_promisc = &esw->mc_promisc; + if (mc_promisc->uplink_rule) + mlx5_del_flow_rules(mc_promisc->uplink_rule); + + esw_destroy_legacy_table(esw); +} + /* E-Switch vport UC/MC lists management */ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, struct vport_addr *vaddr); @@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport) esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", vport->vport, mac); - if (vport->enabled_events & UC_ADDR_CHANGE) { + if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) { esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); } - if (vport->enabled_events & MC_ADDR_CHANGE) + if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE) esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); - if (vport->enabled_events & PROMISC_CHANGE) { + if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) { esw_update_vport_rx_mode(esw, vport); if (!IS_ERR_OR_NULL(vport->allmulti_rule)) esw_update_vport_mc_promisc(esw, vport); } - if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) + if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE)) esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); @@ -1393,18 +1411,49 @@ out: return err; } +static bool element_type_supported(struct mlx5_eswitch *esw, int type) +{ + struct mlx5_core_dev *dev = esw->dev = esw->dev; + + switch (type) { + case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_TASR; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_VPORT; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_VPORT_TC; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; + } + return false; +} + /* Vport QoS management */ -static int esw_create_tsar(struct mlx5_eswitch *esw) +static void esw_create_tsar(struct mlx5_eswitch *esw) { u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_core_dev *dev = esw->dev; + __be32 *attr; int err; if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) - return 0; + return; + + if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR)) + return; if (esw->qos.enabled) - return -EEXIST; + return; + + MLX5_SET(scheduling_context, tsar_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); + + attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); + *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw) &esw->qos.root_tsar_id); if (err) { esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); - return err; + return; } esw->qos.enabled = true; - return 0; } static void esw_destroy_tsar(struct mlx5_eswitch *esw) @@ -1537,6 +1585,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, return 0; } +int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, + u32 rate_mbps) +{ + u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_vport *vport; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); + + return mlx5_modify_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + ctx, + vport->qos.esw_tsar_ix, + MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW); +} + static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) { ((u8 *)node_guid)[7] = mac[0]; @@ -1619,7 +1683,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport) } static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, - int enable_events) + enum mlx5_eswitch_vport_event enabled_events) { u16 vport_num = vport->vport; @@ -1641,7 +1705,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); /* Sync with current vport context */ - vport->enabled_events = enable_events; + vport->enabled_events = enabled_events; vport->enabled = true; /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well @@ -1770,11 +1834,46 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) /* Public E-Switch API */ #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) -int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) +/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs + * whichever are present on the eswitch. + */ +void +mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, + enum mlx5_eswitch_vport_event enabled_events) { struct mlx5_vport *vport; + int i; + + /* Enable PF vport */ + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); + esw_enable_vport(esw, vport, enabled_events); + + /* Enable ECPF vports */ + if (mlx5_ecpf_vport_exists(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); + esw_enable_vport(esw, vport, enabled_events); + } + + /* Enable VF vports */ + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + esw_enable_vport(esw, vport, enabled_events); +} + +/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs + * whichever are previously enabled on the eswitch. + */ +void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + int i; + + mlx5_esw_for_all_vports_reverse(esw, i, vport) + esw_disable_vport(esw, vport); +} + +int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) +{ int err; - int i, enabled_events; if (!ESW_ALLOWED(esw) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { @@ -1788,44 +1887,23 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) esw_warn(esw->dev, "engress ACL is not supported by FW\n"); + esw_create_tsar(esw); + esw->mode = mode; mlx5_lag_update(esw->dev); if (mode == MLX5_ESWITCH_LEGACY) { - err = esw_create_legacy_table(esw); - if (err) - goto abort; + err = esw_legacy_enable(esw); } else { mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - err = esw_offloads_init(esw); + err = esw_offloads_enable(esw); } if (err) goto abort; - err = esw_create_tsar(esw); - if (err) - esw_warn(esw->dev, "Failed to create eswitch TSAR"); - - enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS : - UC_ADDR_CHANGE; - - /* Enable PF vport */ - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - esw_enable_vport(esw, vport, enabled_events); - - /* Enable ECPF vports */ - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); - esw_enable_vport(esw, vport, enabled_events); - } - - /* Enable VF vports */ - mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) - esw_enable_vport(esw, vport, enabled_events); - mlx5_eswitch_event_handlers_register(esw); esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", @@ -1847,10 +1925,7 @@ abort: void mlx5_eswitch_disable(struct mlx5_eswitch *esw) { - struct esw_mc_addr *mc_promisc; - struct mlx5_vport *vport; int old_mode; - int i; if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE) return; @@ -1859,21 +1934,14 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw) esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", esw->esw_funcs.num_vfs, esw->enabled_vports); - mc_promisc = &esw->mc_promisc; mlx5_eswitch_event_handlers_unregister(esw); - mlx5_esw_for_all_vports(esw, i, vport) - esw_disable_vport(esw, vport); - - if (mc_promisc && mc_promisc->uplink_rule) - mlx5_del_flow_rules(mc_promisc->uplink_rule); - - esw_destroy_tsar(esw); - if (esw->mode == MLX5_ESWITCH_LEGACY) - esw_destroy_legacy_table(esw); + esw_legacy_disable(esw); else if (esw->mode == MLX5_ESWITCH_OFFLOADS) - esw_offloads_cleanup(esw); + esw_offloads_disable(esw); + + esw_destroy_tsar(esw); old_mode = esw->mode; esw->mode = MLX5_ESWITCH_NONE; @@ -1933,6 +2001,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) hash_init(esw->offloads.encap_tbl); hash_init(esw->offloads.mod_hdr_tbl); + atomic64_set(&esw->offloads.num_flows, 0); mutex_init(&esw->state_lock); mlx5_esw_for_all_vports(esw, i, vport) { @@ -2085,23 +2154,19 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, if (vlan > 4095 || qos > 7) return -EINVAL; - mutex_lock(&esw->state_lock); - err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); if (err) - goto unlock; + return err; evport->info.vlan = vlan; evport->info.qos = qos; if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) { err = esw_vport_ingress_config(esw, evport); if (err) - goto unlock; + return err; err = esw_vport_egress_config(esw, evport); } -unlock: - mutex_unlock(&esw->state_lock); return err; } @@ -2109,11 +2174,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u16 vport, u16 vlan, u8 qos) { u8 set_flags = 0; + int err; if (vlan || qos) set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; - return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); + mutex_lock(&esw->state_lock); + err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); + mutex_unlock(&esw->state_lock); + + return err; } int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a38e8a3c7c9a..804912e38dee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -35,6 +35,7 @@ #include <linux/if_ether.h> #include <linux/if_link.h> +#include <linux/atomic.h> #include <net/devlink.h> #include <linux/mlx5/device.h> #include <linux/mlx5/eswitch.h> @@ -101,6 +102,13 @@ struct mlx5_vport_info { bool trusted; }; +/* Vport context events */ +enum mlx5_eswitch_vport_event { + MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), + MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), + MLX5_VPORT_PROMISC_CHANGE = BIT(3), +}; + struct mlx5_vport { struct mlx5_core_dev *dev; int vport; @@ -122,7 +130,7 @@ struct mlx5_vport { } qos; bool enabled; - u16 enabled_events; + enum mlx5_eswitch_vport_event enabled_events; }; enum offloads_fdb_flags { @@ -179,7 +187,7 @@ struct mlx5_esw_offload { struct mutex termtbl_mutex; /* protects termtbl hash */ const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; u8 inline_mode; - u64 num_flows; + atomic64_t num_flows; enum devlink_eswitch_encap_mode encap; }; @@ -207,8 +215,11 @@ enum { struct mlx5_eswitch { struct mlx5_core_dev *dev; struct mlx5_nb nb; + /* legacy data structures */ struct mlx5_eswitch_fdb fdb_table; struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; + struct esw_mc_addr mc_promisc; + /* end of legacy */ struct workqueue_struct *work_queue; struct mlx5_vport *vports; u32 flags; @@ -218,7 +229,6 @@ struct mlx5_eswitch { * and async SRIOV admin state changes */ struct mutex state_lock; - struct esw_mc_addr mc_promisc; struct { bool enabled; @@ -233,8 +243,8 @@ struct mlx5_eswitch { struct mlx5_esw_functions esw_funcs; }; -void esw_offloads_cleanup(struct mlx5_eswitch *esw); -int esw_offloads_init(struct mlx5_eswitch *esw); +void esw_offloads_disable(struct mlx5_eswitch *esw); +int esw_offloads_enable(struct mlx5_eswitch *esw); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, @@ -251,6 +261,8 @@ void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport); void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, + u32 rate_mbps); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); @@ -513,6 +525,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); (vport) = &(esw)->vports[i], \ (i) < (esw)->total_vports; (i)++) +#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \ + for ((i) = (esw)->total_vports - 1; \ + (vport) = &(esw)->vports[i], \ + (i) >= MLX5_VPORT_PF; (i)--) + #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ for ((i) = MLX5_VPORT_FIRST_VF; \ (vport) = &(esw)->vports[(i)], \ @@ -574,6 +591,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); +void +mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, + enum mlx5_eswitch_vport_event enabled_events); +void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 089ae4d48a82..8fe5dddf18d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -233,7 +233,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (IS_ERR(rule)) goto err_add_rule; else - esw->offloads.num_flows++; + atomic64_inc(&esw->offloads.num_flows); return rule; @@ -298,7 +298,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, if (IS_ERR(rule)) goto add_err; - esw->offloads.num_flows++; + atomic64_inc(&esw->offloads.num_flows); return rule; add_err: @@ -326,7 +326,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl); } - esw->offloads.num_flows--; + atomic64_dec(&esw->offloads.num_flows); if (fwd_rule) { esw_put_prio_table(esw, attr->chain, attr->prio, 1); @@ -442,9 +442,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !attr->dest_chain); + mutex_lock(&esw->state_lock); + err = esw_add_vlan_action_check(attr, push, pop, fwd); if (err) - return err; + goto unlock; attr->vlan_handled = false; @@ -457,11 +459,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, attr->vlan_handled = true; } - return 0; + goto unlock; } if (!push && !pop) - return 0; + goto unlock; if (!(offloads->vlan_push_pop_refcount)) { /* it's the 1st vlan rule, apply global vlan pop policy */ @@ -486,6 +488,8 @@ skip_set_push: out: if (!err) attr->vlan_handled = true; +unlock: + mutex_unlock(&esw->state_lock); return err; } @@ -508,6 +512,8 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); + mutex_lock(&esw->state_lock); + vport = esw_vlan_action_get_vport(attr, push, pop); if (!push && !pop && fwd) { @@ -515,7 +521,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) vport->vlan_refcount--; - return 0; + goto out; } if (push) { @@ -533,12 +539,13 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, skip_unset_push: offloads->vlan_push_pop_refcount--; if (offloads->vlan_push_pop_refcount) - return 0; + goto out; /* no more vlan rules, stop global vlan pop policy */ err = esw_set_global_vlan_pop(esw, 0); out: + mutex_unlock(&esw->state_lock); return err; } @@ -587,38 +594,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) mlx5_del_flow_rules(rule); } -static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw) +static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) { u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; u8 fdb_to_vport_reg_c_id; int err; - err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, - out, sizeof(out)); - if (err) - return err; - - fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out, - esw_vport_context.fdb_to_vport_reg_c_id); - - fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0; - MLX5_SET(modify_esw_vport_context_in, in, - esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id); - - MLX5_SET(modify_esw_vport_context_in, in, - field_select.fdb_to_vport_reg_c_id, 1); - - return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport, - in, sizeof(in)); -} - -static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw) -{ - u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; - u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; - u8 fdb_to_vport_reg_c_id; - int err; + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) + return 0; err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, out, sizeof(out)); @@ -628,7 +612,10 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw) fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out, esw_vport_context.fdb_to_vport_reg_c_id); - fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0; + if (enable) + fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0; + else + fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0; MLX5_SET(modify_esw_vport_context_in, in, esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id); @@ -2124,7 +2111,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type return NOTIFY_OK; } -int esw_offloads_init(struct mlx5_eswitch *esw) +int esw_offloads_enable(struct mlx5_eswitch *esw) { int err; @@ -2138,11 +2125,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw) if (err) return err; - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { - err = mlx5_eswitch_enable_passing_vport_metadata(esw); - if (err) - goto err_vport_metadata; - } + err = esw_set_passing_vport_metadata(esw, true); + if (err) + goto err_vport_metadata; + + mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); err = esw_offloads_load_all_reps(esw); if (err) @@ -2156,8 +2143,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw) return 0; err_reps: - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) - mlx5_eswitch_disable_passing_vport_metadata(esw); + mlx5_eswitch_disable_pf_vf_vports(esw); + esw_set_passing_vport_metadata(esw, false); err_vport_metadata: esw_offloads_steering_cleanup(esw); return err; @@ -2182,13 +2169,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, return err; } -void esw_offloads_cleanup(struct mlx5_eswitch *esw) +void esw_offloads_disable(struct mlx5_eswitch *esw) { mlx5_rdma_disable_roce(esw->dev); esw_offloads_devcom_cleanup(esw); esw_offloads_unload_all_reps(esw); - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) - mlx5_eswitch_disable_passing_vport_metadata(esw); + mlx5_eswitch_disable_pf_vf_vports(esw); + esw_set_passing_vport_metadata(esw, false); esw_offloads_steering_cleanup(esw); esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; } @@ -2349,7 +2336,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, break; } - if (esw->offloads.num_flows > 0) { + if (atomic64_read(&esw->offloads.num_flows) > 0) { NL_SET_ERR_MSG_MOD(extack, "Can't set inline mode when flows are configured"); return -EOPNOTSUPP; @@ -2459,7 +2446,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, if (esw->offloads.encap == encap) return 0; - if (esw->offloads.num_flows > 0) { + if (atomic64_read(&esw->offloads.num_flows) > 0) { NL_SET_ERR_MSG_MOD(extack, "Can't set encapsulation when flows are configured"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 7ac1249eadc3..b84a225bbe86 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) +int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, + enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask, + u32 *id) { u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; @@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) MLX5_SET(alloc_flow_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_FLOW_COUNTER); + MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) @@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) return err; } +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) +{ + return mlx5_cmd_fc_bulk_alloc(dev, 0, id); +} + int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) { u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; @@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, return 0; } -struct mlx5_cmd_fc_bulk { - u32 id; - int num; - int outlen; - u32 out[0]; -}; - -struct mlx5_cmd_fc_bulk * -mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num) -{ - struct mlx5_cmd_fc_bulk *b; - int outlen = - MLX5_ST_SZ_BYTES(query_flow_counter_out) + - MLX5_ST_SZ_BYTES(traffic_counter) * num; - - b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL); - if (!b) - return NULL; - - b->id = id; - b->num = num; - b->outlen = outlen; - - return b; -} - -void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b) +int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len) { - kfree(b); + return MLX5_ST_SZ_BYTES(query_flow_counter_out) + + MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len; } -int -mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) +int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len, + u32 *out) { + int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len); u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; MLX5_SET(query_flow_counter_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_COUNTER); MLX5_SET(query_flow_counter_in, in, op_mod, 0); - MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id); - MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num); - return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen); -} - -void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, - struct mlx5_cmd_fc_bulk *b, u32 id, - u64 *packets, u64 *bytes) -{ - int index = id - b->id; - void *stats; - - if (index < 0 || index >= b->num) { - mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n", - id, b->id, b->id + b->num - 1); - return; - } - - stats = MLX5_ADDR_OF(query_flow_counter_out, b->out, - flow_statistics[index]); - *packets = MLX5_GET64(traffic_counter, stats, packets); - *bytes = MLX5_GET64(traffic_counter, stats, octets); + MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id); + MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index e340f9af2f5a..bc4606306009 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -78,20 +78,16 @@ struct mlx5_flow_cmds { }; int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); +int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, + enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask, + u32 *id); int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, u64 *packets, u64 *bytes); -struct mlx5_cmd_fc_bulk; - -struct mlx5_cmd_fc_bulk * -mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num); -void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b); -int -mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b); -void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, - struct mlx5_cmd_fc_bulk *b, u32 id, - u64 *packets, u64 *bytes); +int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len); +int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len, + u32 *out); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 1834d9f3aa1c..1804cf3c3814 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -40,6 +40,8 @@ #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) /* Max number of counters to query in bulk read is 32K */ #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) +#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) +#define MLX5_FC_POOL_USED_BUFF_RATIO 10 struct mlx5_fc_cache { u64 packets; @@ -58,12 +60,18 @@ struct mlx5_fc { u64 lastpackets; u64 lastbytes; + struct mlx5_fc_bulk *bulk; u32 id; bool aging; struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; }; +static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev); +static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool); +static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool); +static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc); + /* locking scheme: * * It is the responsibility of the user to prevent concurrent calls or bad @@ -75,7 +83,7 @@ struct mlx5_fc { * access to counter list: * - create (user context) * - mlx5_fc_create() only adds to an addlist to be used by - * mlx5_fc_stats_query_work(). addlist is a lockless single linked list + * mlx5_fc_stats_work(). addlist is a lockless single linked list * that doesn't require any additional synchronization when adding single * node. * - spawn thread to do the actual destroy @@ -136,81 +144,87 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev, spin_unlock(&fc_stats->counters_idr_lock); } -/* The function returns the last counter that was queried so the caller - * function can continue calling it till all counters are queried. - */ -static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev, - struct mlx5_fc *first, - u32 last_id) +static int get_max_bulk_query_len(struct mlx5_core_dev *dev) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - struct mlx5_fc *counter = NULL; - struct mlx5_cmd_fc_bulk *b; - bool more = false; - u32 afirst_id; - int num; - int err; + return min_t(int, MLX5_SW_MAX_COUNTERS_BULK, + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); +} - int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK, - (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); +static void update_counter_cache(int index, u32 *bulk_raw_data, + struct mlx5_fc_cache *cache) +{ + void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data, + flow_statistics[index]); + u64 packets = MLX5_GET64(traffic_counter, stats, packets); + u64 bytes = MLX5_GET64(traffic_counter, stats, octets); - /* first id must be aligned to 4 when using bulk query */ - afirst_id = first->id & ~0x3; + if (cache->packets == packets) + return; - /* number of counters to query inc. the last counter */ - num = ALIGN(last_id - afirst_id + 1, 4); - if (num > max_bulk) { - num = max_bulk; - last_id = afirst_id + num - 1; - } + cache->packets = packets; + cache->bytes = bytes; + cache->lastuse = jiffies; +} - b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num); - if (!b) { - mlx5_core_err(dev, "Error allocating resources for bulk query\n"); - return NULL; - } +static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev, + struct mlx5_fc *first, + u32 last_id) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + bool query_more_counters = (first->id <= last_id); + int max_bulk_len = get_max_bulk_query_len(dev); + u32 *data = fc_stats->bulk_query_out; + struct mlx5_fc *counter = first; + u32 bulk_base_id; + int bulk_len; + int err; - err = mlx5_cmd_fc_bulk_query(dev, b); - if (err) { - mlx5_core_err(dev, "Error doing bulk query: %d\n", err); - goto out; - } + while (query_more_counters) { + /* first id must be aligned to 4 when using bulk query */ + bulk_base_id = counter->id & ~0x3; - counter = first; - list_for_each_entry_from(counter, &fc_stats->counters, list) { - struct mlx5_fc_cache *c = &counter->cache; - u64 packets; - u64 bytes; + /* number of counters to query inc. the last counter */ + bulk_len = min_t(int, max_bulk_len, + ALIGN(last_id - bulk_base_id + 1, 4)); - if (counter->id > last_id) { - more = true; - break; + err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, + data); + if (err) { + mlx5_core_err(dev, "Error doing bulk query: %d\n", err); + return; } + query_more_counters = false; - mlx5_cmd_fc_bulk_get(dev, b, - counter->id, &packets, &bytes); + list_for_each_entry_from(counter, &fc_stats->counters, list) { + int counter_index = counter->id - bulk_base_id; + struct mlx5_fc_cache *cache = &counter->cache; - if (c->packets == packets) - continue; + if (counter->id >= bulk_base_id + bulk_len) { + query_more_counters = true; + break; + } - c->packets = packets; - c->bytes = bytes; - c->lastuse = jiffies; + update_counter_cache(counter_index, data, cache); + } } - -out: - mlx5_cmd_fc_bulk_free(b); - - return more ? counter : NULL; } -static void mlx5_free_fc(struct mlx5_core_dev *dev, - struct mlx5_fc *counter) +static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter) { mlx5_cmd_fc_free(dev, counter->id); kfree(counter); } +static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + if (counter->bulk) + mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter); + else + mlx5_fc_free(dev, counter); +} + static void mlx5_fc_stats_work(struct work_struct *work) { struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, @@ -234,7 +248,7 @@ static void mlx5_fc_stats_work(struct work_struct *work) llist_for_each_entry_safe(counter, tmp, dellist, dellist) { mlx5_fc_stats_remove(dev, counter); - mlx5_free_fc(dev, counter); + mlx5_fc_release(dev, counter); } if (time_before(now, fc_stats->next_query) || @@ -244,32 +258,62 @@ static void mlx5_fc_stats_work(struct work_struct *work) counter = list_first_entry(&fc_stats->counters, struct mlx5_fc, list); - while (counter) - counter = mlx5_fc_stats_query(dev, counter, last->id); + if (counter) + mlx5_fc_stats_query_counter_range(dev, counter, last->id); fc_stats->next_query = now + fc_stats->sampling_interval; } -struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) +static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc *counter; int err; counter = kzalloc(sizeof(*counter), GFP_KERNEL); if (!counter) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&counter->list); err = mlx5_cmd_fc_alloc(dev, &counter->id); - if (err) - goto err_out; + if (err) { + kfree(counter); + return ERR_PTR(err); + } + + return counter; +} + +static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter; + + if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) { + counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool); + if (!IS_ERR(counter)) + return counter; + } + + return mlx5_fc_single_alloc(dev); +} + +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) +{ + struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging); + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + int err; + + if (IS_ERR(counter)) + return counter; + + INIT_LIST_HEAD(&counter->list); + counter->aging = aging; if (aging) { u32 id = counter->id; counter->cache.lastuse = jiffies; - counter->aging = true; + counter->lastbytes = counter->cache.bytes; + counter->lastpackets = counter->cache.packets; idr_preload(GFP_KERNEL); spin_lock(&fc_stats->counters_idr_lock); @@ -290,10 +334,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) return counter; err_out_alloc: - mlx5_cmd_fc_free(dev, counter->id); -err_out: - kfree(counter); - + mlx5_fc_release(dev, counter); return ERR_PTR(err); } EXPORT_SYMBOL(mlx5_fc_create); @@ -317,13 +358,15 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) return; } - mlx5_free_fc(dev, counter); + mlx5_fc_release(dev, counter); } EXPORT_SYMBOL(mlx5_fc_destroy); int mlx5_init_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + int max_bulk_len; + int max_out_len; spin_lock_init(&fc_stats->counters_idr_lock); idr_init(&fc_stats->counters_idr); @@ -331,14 +374,25 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) init_llist_head(&fc_stats->addlist); init_llist_head(&fc_stats->dellist); + max_bulk_len = get_max_bulk_query_len(dev); + max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len); + fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL); + if (!fc_stats->bulk_query_out) + return -ENOMEM; + fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); if (!fc_stats->wq) - return -ENOMEM; + goto err_wq_create; fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD; INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work); + mlx5_fc_pool_init(&fc_stats->fc_pool, dev); return 0; + +err_wq_create: + kfree(fc_stats->bulk_query_out); + return -ENOMEM; } void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) @@ -348,18 +402,21 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) struct mlx5_fc *counter; struct mlx5_fc *tmp; + mlx5_fc_pool_cleanup(&fc_stats->fc_pool); cancel_delayed_work_sync(&dev->priv.fc_stats.work); destroy_workqueue(dev->priv.fc_stats.wq); dev->priv.fc_stats.wq = NULL; + kfree(fc_stats->bulk_query_out); + idr_destroy(&fc_stats->counters_idr); tmplist = llist_del_all(&fc_stats->addlist); llist_for_each_entry_safe(counter, tmp, tmplist, addlist) - mlx5_free_fc(dev, counter); + mlx5_fc_release(dev, counter); list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list) - mlx5_free_fc(dev, counter); + mlx5_fc_release(dev, counter); } int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, @@ -406,3 +463,243 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, fc_stats->sampling_interval = min_t(unsigned long, interval, fc_stats->sampling_interval); } + +/* Flow counter bluks */ + +struct mlx5_fc_bulk { + struct list_head pool_list; + u32 base_id; + int bulk_len; + unsigned long *bitmask; + struct mlx5_fc fcs[0]; +}; + +static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, + u32 id) +{ + counter->bulk = bulk; + counter->id = id; +} + +static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk) +{ + return bitmap_weight(bulk->bitmask, bulk->bulk_len); +} + +static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) +{ + enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask; + struct mlx5_fc_bulk *bulk; + int err = -ENOMEM; + int bulk_len; + u32 base_id; + int i; + + alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); + bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; + + bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), + GFP_KERNEL); + if (!bulk) + goto err_alloc_bulk; + + bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), + GFP_KERNEL); + if (!bulk->bitmask) + goto err_alloc_bitmask; + + err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id); + if (err) + goto err_mlx5_cmd_bulk_alloc; + + bulk->base_id = base_id; + bulk->bulk_len = bulk_len; + for (i = 0; i < bulk_len; i++) { + mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i); + set_bit(i, bulk->bitmask); + } + + return bulk; + +err_mlx5_cmd_bulk_alloc: + kfree(bulk->bitmask); +err_alloc_bitmask: + kfree(bulk); +err_alloc_bulk: + return ERR_PTR(err); +} + +static int +mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk) +{ + if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) { + mlx5_core_err(dev, "Freeing bulk before all counters were released\n"); + return -EBUSY; + } + + mlx5_cmd_fc_free(dev, bulk->base_id); + kfree(bulk->bitmask); + kfree(bulk); + + return 0; +} + +static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk) +{ + int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len); + + if (free_fc_index >= bulk->bulk_len) + return ERR_PTR(-ENOSPC); + + clear_bit(free_fc_index, bulk->bitmask); + return &bulk->fcs[free_fc_index]; +} + +static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc) +{ + int fc_index = fc->id - bulk->base_id; + + if (test_bit(fc_index, bulk->bitmask)) + return -EINVAL; + + set_bit(fc_index, bulk->bitmask); + return 0; +} + +/* Flow counters pool API */ + +static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev) +{ + fc_pool->dev = dev; + mutex_init(&fc_pool->pool_lock); + INIT_LIST_HEAD(&fc_pool->fully_used); + INIT_LIST_HEAD(&fc_pool->partially_used); + INIT_LIST_HEAD(&fc_pool->unused); + fc_pool->available_fcs = 0; + fc_pool->used_fcs = 0; + fc_pool->threshold = 0; +} + +static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + struct mlx5_fc_bulk *bulk; + struct mlx5_fc_bulk *tmp; + + list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list) + mlx5_fc_bulk_destroy(dev, bulk); + list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list) + mlx5_fc_bulk_destroy(dev, bulk); + list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list) + mlx5_fc_bulk_destroy(dev, bulk); +} + +static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool) +{ + fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD, + fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO); +} + +static struct mlx5_fc_bulk * +mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + struct mlx5_fc_bulk *new_bulk; + + new_bulk = mlx5_fc_bulk_create(dev); + if (!IS_ERR(new_bulk)) + fc_pool->available_fcs += new_bulk->bulk_len; + mlx5_fc_pool_update_threshold(fc_pool); + return new_bulk; +} + +static void +mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + + fc_pool->available_fcs -= bulk->bulk_len; + mlx5_fc_bulk_destroy(dev, bulk); + mlx5_fc_pool_update_threshold(fc_pool); +} + +static struct mlx5_fc * +mlx5_fc_pool_acquire_from_list(struct list_head *src_list, + struct list_head *next_list, + bool move_non_full_bulk) +{ + struct mlx5_fc_bulk *bulk; + struct mlx5_fc *fc; + + if (list_empty(src_list)) + return ERR_PTR(-ENODATA); + + bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list); + fc = mlx5_fc_bulk_acquire_fc(bulk); + if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0) + list_move(&bulk->pool_list, next_list); + return fc; +} + +static struct mlx5_fc * +mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool) +{ + struct mlx5_fc_bulk *new_bulk; + struct mlx5_fc *fc; + + mutex_lock(&fc_pool->pool_lock); + + fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used, + &fc_pool->fully_used, false); + if (IS_ERR(fc)) + fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused, + &fc_pool->partially_used, + true); + if (IS_ERR(fc)) { + new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool); + if (IS_ERR(new_bulk)) { + fc = ERR_CAST(new_bulk); + goto out; + } + fc = mlx5_fc_bulk_acquire_fc(new_bulk); + list_add(&new_bulk->pool_list, &fc_pool->partially_used); + } + fc_pool->available_fcs--; + fc_pool->used_fcs++; + +out: + mutex_unlock(&fc_pool->pool_lock); + return fc; +} + +static void +mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + struct mlx5_fc_bulk *bulk = fc->bulk; + int bulk_free_fcs_amount; + + mutex_lock(&fc_pool->pool_lock); + + if (mlx5_fc_bulk_release_fc(bulk, fc)) { + mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n"); + goto unlock; + } + + fc_pool->available_fcs++; + fc_pool->used_fcs--; + + bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk); + if (bulk_free_fcs_amount == 1) + list_move_tail(&bulk->pool_list, &fc_pool->partially_used); + if (bulk_free_fcs_amount == bulk->bulk_len) { + list_del(&bulk->pool_list); + if (fc_pool->available_fcs > fc_pool->threshold) + mlx5_fc_pool_free_bulk(fc_pool, bulk); + else + list_add(&bulk->pool_list, &fc_pool->unused); + } + +unlock: + mutex_unlock(&fc_pool->pool_lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b15b27a497fc..fa0e991f1983 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1217,8 +1217,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) { int err = 0; - if (cleanup) + if (cleanup) { + mlx5_unregister_device(dev); mlx5_drain_health_wq(dev); + } mutex_lock(&dev->intf_state_mutex); if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { @@ -1369,7 +1371,6 @@ static void remove_one(struct pci_dev *pdev) mlx5_crdump_disable(dev); mlx5_devlink_unregister(devlink); - mlx5_unregister_device(dev); if (mlx5_unload_one(dev, true)) { mlx5_core_err(dev, "mlx5_unload_one failed\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index c912d82ca64b..30f7848a6f88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -122,12 +122,13 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline_mode) { switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode)) + break; + /* fall through */ case MLX5_CAP_INLINE_MODE_L2: *min_inline_mode = MLX5_INLINE_MODE_L2; break; - case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: - mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); - break; case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: *min_inline_mode = MLX5_INLINE_MODE_NONE; break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index eda9c23e87b2..5a8e94c0a95a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1625,7 +1625,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, } flow_block_cb_incref(block_cb); err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, - mlxsw_sp_port, ingress); + mlxsw_sp_port, ingress, f->extack); if (err) goto err_block_bind; @@ -5026,6 +5026,26 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) return 0; } +static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct devlink_resource_size_params kvd_size_params; + u32 kvd_size; + + if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) + return -EIO; + + kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); + devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + + return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, + kvd_size, MLXSW_SP_RESOURCE_KVD, + DEVLINK_RESOURCE_ID_PARENT_TOP, + &kvd_size_params); +} + static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) { return mlxsw_sp1_resources_kvd_register(mlxsw_core); @@ -5033,7 +5053,7 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) { - return 0; + return mlxsw_sp2_resources_kvd_register(mlxsw_core); } static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6664119fb0c8..db17ba35ec84 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -623,7 +623,8 @@ struct mlxsw_sp_acl_rule_info { unsigned int priority; struct mlxsw_afk_element_values values; struct mlxsw_afa_block *act_block; - u8 action_created:1; + u8 action_created:1, + egress_bind_blocker:1; unsigned int counter_index; }; @@ -642,6 +643,7 @@ struct mlxsw_sp_acl_block { struct mlxsw_sp *mlxsw_sp; unsigned int rule_count; unsigned int disable_count; + unsigned int egress_blocker_rule_count; struct net *net; }; @@ -657,7 +659,8 @@ void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block); int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_port *mlxsw_sp_port, - bool ingress); + bool ingress, + struct netlink_ext_ack *extack); int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_port *mlxsw_sp_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index e8ac90564dbe..1aaab8446270 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -239,7 +239,8 @@ mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block, int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_port *mlxsw_sp_port, - bool ingress) + bool ingress, + struct netlink_ext_ack *extack) { struct mlxsw_sp_acl_block_binding *binding; int err; @@ -247,6 +248,11 @@ int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp, if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress))) return -EEXIST; + if (!ingress && block->egress_blocker_rule_count) { + NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules"); + return -EOPNOTSUPP; + } + binding = kzalloc(sizeof(*binding), GFP_KERNEL); if (!binding) return -ENOMEM; @@ -672,6 +678,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl_block *block = ruleset->ht_key.block; int err; err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei); @@ -689,14 +696,14 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, * one, to be directly bound to device. The rest of the * rulesets are bound by "Goto action set". */ - err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, - ruleset->ht_key.block); + err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block); if (err) goto err_ruleset_block_bind; } list_add_tail(&rule->list, &mlxsw_sp->acl->rules); - ruleset->ht_key.block->rule_count++; + block->rule_count++; + block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker; return 0; err_ruleset_block_bind: @@ -712,7 +719,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl_block *block = ruleset->ht_key.block; + block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker; ruleset->ht_key.block->rule_count--; list_del(&rule->list); if (!ruleset->ht_key.chain_index && diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 202e9a246019..0ad1a24abfc6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -78,6 +78,16 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid; u16 fid_index; + if (mlxsw_sp_acl_block_is_egress_bound(block)) { + NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress"); + return -EOPNOTSUPP; + } + + /* Forbid block with this rulei to be bound + * to egress in future. + */ + rulei->egress_bind_blocker = 1; + fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); fid_index = mlxsw_sp_fid_index(fid); err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, @@ -257,6 +267,12 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, flow_rule_match_tcp(rule, &match); + if (match.mask->flags & htons(0x0E00)) { + NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits"); + dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n"); + return -EINVAL; + } + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, ntohs(match.key->flags), ntohs(match.mask->flags)); @@ -390,6 +406,12 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); return -EOPNOTSUPP; } + + /* Forbid block with this rulei to be bound + * to egress in future. + */ + rulei->egress_bind_blocker = 1; + if (match.mask->vlan_id != 0) mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VID, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index e618be7ce6c6..a330b369e899 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2943,7 +2943,7 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed) val = nh_grp->count; for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; - val ^= nh->ifindex; + val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed); } return jhash(&val, sizeof(val), seed); default: @@ -2961,7 +2961,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed) list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev; - val ^= dev->ifindex; + val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed); } return jhash(&val, sizeof(val), seed); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 13e6bf13ac4d..15a8be6bad27 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1434,7 +1434,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, } static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, - const struct skb_frag_struct *fragment, + const skb_frag_t *fragment, unsigned int frame_length) { /* called only from within lan743x_tx_xmit_frame @@ -1607,9 +1607,8 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, goto finish; for (j = 0; j < nr_frags; j++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]); - frag = &(skb_shinfo(skb)->frags[j]); if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) { /* upon error no need to call * lan743x_tx_frame_end diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index d8b7fba96d58..61fe92719982 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1286,7 +1286,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) { u8 *va; struct vlan_ethhdr *veh; - struct skb_frag_struct *frag; + skb_frag_t *frag; __wsum vsum; va = addr; @@ -1306,8 +1306,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) skb->len -= VLAN_HLEN; skb->data_len -= VLAN_HLEN; frag = skb_shinfo(skb)->frags; - frag->page_offset += VLAN_HLEN; - skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN); + skb_frag_off_add(frag, VLAN_HLEN); + skb_frag_size_sub(frag, VLAN_HLEN); } } @@ -1318,7 +1318,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) { struct myri10ge_priv *mgp = ss->mgp; struct sk_buff *skb; - struct skb_frag_struct *rx_frags; + skb_frag_t *rx_frags; struct myri10ge_rx_buf *rx; int i, idx, remainder, bytes; struct pci_dev *pdev = mgp->pdev; @@ -1351,7 +1351,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) return 0; } rx_frags = skb_shinfo(skb)->frags; - /* Fill skb_frag_struct(s) with data from our receive */ + /* Fill skb_frag_t(s) with data from our receive */ for (i = 0, remainder = len; remainder > 0; i++) { myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); skb_fill_page_desc(skb, i, rx->info[idx].page, @@ -1364,8 +1364,8 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) } /* remove padding */ - rx_frags[0].page_offset += MXGEFW_PAD; - rx_frags[0].size -= MXGEFW_PAD; + skb_frag_off_add(&rx_frags[0], MXGEFW_PAD); + skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD); len -= MXGEFW_PAD; skb->len = len; @@ -2628,7 +2628,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, struct myri10ge_slice_state *ss; struct mcp_kreq_ether_send *req; struct myri10ge_tx_buf *tx; - struct skb_frag_struct *frag; + skb_frag_t *frag; struct netdev_queue *netdev_queue; dma_addr_t bus; u32 low; @@ -3037,7 +3037,6 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr) static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) { struct myri10ge_priv *mgp = netdev_priv(dev); - int error = 0; netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu); if (mgp->running) { @@ -3049,7 +3048,7 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) } else dev->mtu = new_mtu; - return error; + return 0; } /* diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 5a54fe848de4..1b019fdfcd97 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -2,10 +2,12 @@ /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> +#include <linux/mpls.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> +#include <net/tc_act/tc_mpls.h> #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_tunnel_key.h> @@ -25,6 +27,80 @@ NFP_FL_TUNNEL_KEY | \ NFP_FL_TUNNEL_GENEVE_OPT) +static int +nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + size_t act_size = sizeof(struct nfp_fl_push_mpls); + u32 mpls_lse = 0; + + push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS; + push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; + + /* BOS is optional in the TC action but required for offload. */ + if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) { + mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT; + } else { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push"); + return -EOPNOTSUPP; + } + + /* Leave MPLS TC as a default value of 0 if not explicitly set. */ + if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET) + mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT; + + /* Proto, label and TTL are enforced and verified for MPLS push. */ + mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT; + mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT; + push_mpls->ethtype = act->mpls_push.proto; + push_mpls->lse = cpu_to_be32(mpls_lse); + + return 0; +} + +static void +nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls, + const struct flow_action_entry *act) +{ + size_t act_size = sizeof(struct nfp_fl_pop_mpls); + + pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS; + pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; + pop_mpls->ethtype = act->mpls_pop.proto; +} + +static void +nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls, + const struct flow_action_entry *act) +{ + size_t act_size = sizeof(struct nfp_fl_set_mpls); + u32 mpls_lse = 0, mpls_mask = 0; + + set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS; + set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; + + if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) { + mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT; + mpls_mask |= MPLS_LS_LABEL_MASK; + } + if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) { + mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT; + mpls_mask |= MPLS_LS_TC_MASK; + } + if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) { + mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT; + mpls_mask |= MPLS_LS_S_MASK; + } + if (act->mpls_mangle.ttl) { + mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT; + mpls_mask |= MPLS_LS_TTL_MASK; + } + + set_mpls->lse = cpu_to_be32(mpls_lse); + set_mpls->lse_mask = cpu_to_be32(mpls_mask); +} + static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) { size_t act_size = sizeof(struct nfp_fl_pop_vlan); @@ -97,7 +173,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, struct nfp_fl_payload *nfp_flow, bool last, struct net_device *in_dev, enum nfp_flower_tun_type tun_type, int *tun_out_cnt, - struct netlink_ext_ack *extack) + bool pkt_host, struct netlink_ext_ack *extack) { size_t act_size = sizeof(struct nfp_fl_output); struct nfp_flower_priv *priv = app->priv; @@ -142,6 +218,20 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, return gid; } output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); + } else if (nfp_flower_internal_port_can_offload(app, out_dev)) { + if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware"); + return -EOPNOTSUPP; + } + + if (nfp_flow->pre_tun_rule.dev || !pkt_host) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action"); + return -EOPNOTSUPP; + } + + nfp_flow->pre_tun_rule.dev = out_dev; + + return 0; } else { /* Set action output parameters. */ output->flags = cpu_to_be16(tmp_flags); @@ -809,7 +899,7 @@ nfp_flower_output_action(struct nfp_app *app, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt, u32 *csum_updated, + int *out_cnt, u32 *csum_updated, bool pkt_host, struct netlink_ext_ack *extack) { struct nfp_flower_priv *priv = app->priv; @@ -831,7 +921,7 @@ nfp_flower_output_action(struct nfp_app *app, output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type, - tun_out_cnt, extack); + tun_out_cnt, pkt_host, extack); if (err) return err; @@ -863,30 +953,37 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, int *out_cnt, u32 *csum_updated, - struct nfp_flower_pedit_acts *set_act, + struct nfp_flower_pedit_acts *set_act, bool *pkt_host, struct netlink_ext_ack *extack, int act_idx) { struct nfp_fl_set_ipv4_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; struct nfp_fl_push_vlan *psh_v; + struct nfp_fl_push_mpls *psh_m; struct nfp_fl_pop_vlan *pop_v; + struct nfp_fl_pop_mpls *pop_m; + struct nfp_fl_set_mpls *set_m; int err; switch (act->id) { case FLOW_ACTION_DROP: nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); break; + case FLOW_ACTION_REDIRECT_INGRESS: case FLOW_ACTION_REDIRECT: err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, true, tun_type, tun_out_cnt, - out_cnt, csum_updated, extack); + out_cnt, csum_updated, *pkt_host, + extack); if (err) return err; break; + case FLOW_ACTION_MIRRED_INGRESS: case FLOW_ACTION_MIRRED: err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, false, tun_type, tun_out_cnt, - out_cnt, csum_updated, extack); + out_cnt, csum_updated, *pkt_host, + extack); if (err) return err; break; @@ -975,6 +1072,54 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, */ *csum_updated &= ~act->csum_flags; break; + case FLOW_ACTION_MPLS_PUSH: + if (*a_len + + sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS"); + return -EOPNOTSUPP; + } + + psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len]; + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + err = nfp_fl_push_mpls(psh_m, act, extack); + if (err) + return err; + *a_len += sizeof(struct nfp_fl_push_mpls); + break; + case FLOW_ACTION_MPLS_POP: + if (*a_len + + sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS"); + return -EOPNOTSUPP; + } + + pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len]; + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + nfp_fl_pop_mpls(pop_m, act); + *a_len += sizeof(struct nfp_fl_pop_mpls); + break; + case FLOW_ACTION_MPLS_MANGLE: + if (*a_len + + sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS"); + return -EOPNOTSUPP; + } + + set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len]; + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + nfp_fl_set_mpls(set_m, act); + *a_len += sizeof(struct nfp_fl_set_mpls); + break; + case FLOW_ACTION_PTYPE: + /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */ + if (act->ptype != PACKET_HOST) + return -EOPNOTSUPP; + + *pkt_host = true; + break; default: /* Currently we do not handle any other actions. */ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list"); @@ -1030,6 +1175,7 @@ int nfp_flower_compile_action(struct nfp_app *app, struct nfp_flower_pedit_acts set_act; enum nfp_flower_tun_type tun_type; struct flow_action_entry *act; + bool pkt_host = false; u32 csum_updated = 0; memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); @@ -1046,7 +1192,7 @@ int nfp_flower_compile_action(struct nfp_app *app, err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, netdev, &tun_type, &tun_out_cnt, &out_cnt, &csum_updated, - &set_act, extack, i); + &set_act, &pkt_host, extack, i); if (err) return err; act_cnt++; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 0f1706ae5bfc..7eb2ec8969c3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -68,8 +68,11 @@ #define NFP_FL_ACTION_OPCODE_OUTPUT 0 #define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1 #define NFP_FL_ACTION_OPCODE_POP_VLAN 2 +#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3 +#define NFP_FL_ACTION_OPCODE_POP_MPLS 4 #define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6 #define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 +#define NFP_FL_ACTION_OPCODE_SET_MPLS 8 #define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 #define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10 #define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11 @@ -217,7 +220,8 @@ struct nfp_fl_set_ipv4_tun { __be16 tun_flags; u8 ttl; u8 tos; - __be32 extra; + __be16 outer_vlan_tpid; + __be16 outer_vlan_tci; u8 tun_len; u8 res2; __be16 tun_proto; @@ -232,6 +236,24 @@ struct nfp_fl_push_geneve { u8 opt_data[]; }; +struct nfp_fl_push_mpls { + struct nfp_fl_act_head head; + __be16 ethtype; + __be32 lse; +}; + +struct nfp_fl_pop_mpls { + struct nfp_fl_act_head head; + __be16 ethtype; +}; + +struct nfp_fl_set_mpls { + struct nfp_fl_act_head head; + __be16 reserved; + __be32 lse_mask; + __be32 lse; +}; + /* Metadata with L2 (1W/4B) * ---------------------------------------------------------------- * 3 2 1 @@ -462,6 +484,7 @@ enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18, NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19, NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20, + NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21, NFP_FLOWER_CMSG_TYPE_MAX = 32, }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index eb846133943b..7a20447cca19 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -781,6 +781,7 @@ static int nfp_flower_init(struct nfp_app *app) INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv); + app_priv->pre_tun_rule_cnt = 0; return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index af9441d5787f..31d94592a7c0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -42,6 +42,7 @@ struct nfp_app; #define NFP_FL_FEATS_VLAN_PCP BIT(3) #define NFP_FL_FEATS_VF_RLIM BIT(4) #define NFP_FL_FEATS_FLOW_MOD BIT(5) +#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6) #define NFP_FL_FEATS_FLOW_MERGE BIT(30) #define NFP_FL_FEATS_LAG BIT(31) @@ -162,6 +163,7 @@ struct nfp_fl_internal_ports { * @qos_stats_work: Workqueue for qos stats processing * @qos_rate_limiters: Current active qos rate limiters * @qos_stats_lock: Lock on qos stats updates + * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded */ struct nfp_flower_priv { struct nfp_app *app; @@ -193,6 +195,7 @@ struct nfp_flower_priv { struct delayed_work qos_stats_work; unsigned int qos_rate_limiters; spinlock_t qos_stats_lock; /* Protect the qos stats */ + int pre_tun_rule_cnt; }; /** @@ -218,6 +221,7 @@ struct nfp_fl_qos { * @block_shared: Flag indicating if offload applies to shared blocks * @mac_list: List entry of reprs that share the same offloaded MAC * @qos_table: Stored info on filters implementing qos + * @on_bridge: Indicates if the repr is attached to a bridge */ struct nfp_flower_repr_priv { struct nfp_repr *nfp_repr; @@ -227,6 +231,7 @@ struct nfp_flower_repr_priv { bool block_shared; struct list_head mac_list; struct nfp_fl_qos qos_table; + bool on_bridge; }; /** @@ -280,6 +285,11 @@ struct nfp_fl_payload { char *action_data; struct list_head linked_flows; bool in_hw; + struct { + struct net_device *dev; + __be16 vlan_tci; + __be16 port_idx; + } pre_tun_rule; }; struct nfp_fl_payload_link { @@ -333,6 +343,11 @@ static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay) return flow_pay->tc_flower_cookie == (unsigned long)flow_pay; } +static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev) +{ + return netif_is_ovs_master(netdev); +} + int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, unsigned int host_ctx_split); void nfp_flower_metadata_cleanup(struct nfp_app *app); @@ -415,4 +430,8 @@ void nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, struct net_device *netdev); +int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, + struct nfp_fl_payload *flow); +int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, + struct nfp_fl_payload *flow); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index e209f150c5f2..ff8a9f1a38f8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -61,6 +61,11 @@ NFP_FLOWER_LAYER_IPV4 | \ NFP_FLOWER_LAYER_IPV6) +#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ + (NFP_FLOWER_LAYER_PORT | \ + NFP_FLOWER_LAYER_MAC | \ + NFP_FLOWER_LAYER_IPV4) + struct nfp_flower_merge_check { union { struct { @@ -489,6 +494,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) flow_pay->meta.flags = 0; INIT_LIST_HEAD(&flow_pay->linked_flows); flow_pay->in_hw = false; + flow_pay->pre_tun_rule.dev = NULL; return flow_pay; @@ -732,28 +738,62 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, return act_off; } -static int nfp_fl_verify_post_tun_acts(char *acts, int len) +static int +nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan) { struct nfp_fl_act_head *a; unsigned int act_off = 0; while (act_off < len) { a = (struct nfp_fl_act_head *)&acts[act_off]; - if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) + + if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off) + *vlan = (struct nfp_fl_push_vlan *)a; + else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) return -EOPNOTSUPP; act_off += a->len_lw << NFP_FL_LW_SIZ; } + /* Ensure any VLAN push also has an egress action. */ + if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan)) + return -EOPNOTSUPP; + return 0; } static int +nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan) +{ + struct nfp_fl_set_ipv4_tun *tun; + struct nfp_fl_act_head *a; + unsigned int act_off = 0; + + while (act_off < len) { + a = (struct nfp_fl_act_head *)&acts[act_off]; + + if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) { + tun = (struct nfp_fl_set_ipv4_tun *)a; + tun->outer_vlan_tpid = vlan->vlan_tpid; + tun->outer_vlan_tci = vlan->vlan_tci; + + return 0; + } + + act_off += a->len_lw << NFP_FL_LW_SIZ; + } + + /* Return error if no tunnel action is found. */ + return -EOPNOTSUPP; +} + +static int nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2, struct nfp_fl_payload *merge_flow) { unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; + struct nfp_fl_push_vlan *post_tun_push_vlan = NULL; bool tunnel_act = false; char *merge_act; int err; @@ -790,18 +830,36 @@ nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, sub2_act_len -= pre_off2; /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes - * a tunnel, sub_flow 2 can only have output actions for a valid merge. + * a tunnel, there are restrictions on what sub_flow 2 actions lead to a + * valid merge. */ if (tunnel_act) { char *post_tun_acts = &sub_flow2->action_data[pre_off2]; - err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len); + err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len, + &post_tun_push_vlan); if (err) return err; + + if (post_tun_push_vlan) { + pre_off2 += sizeof(*post_tun_push_vlan); + sub2_act_len -= sizeof(*post_tun_push_vlan); + } } /* Copy remaining actions from sub_flows 1 and 2. */ memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); + + if (post_tun_push_vlan) { + /* Update tunnel action in merge to include VLAN push. */ + err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len, + post_tun_push_vlan); + if (err) + return err; + + merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan); + } + merge_act += sub1_act_len; memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); @@ -945,6 +1003,106 @@ err_destroy_merge_flow: } /** + * nfp_flower_validate_pre_tun_rule() + * @app: Pointer to the APP handle + * @flow: Pointer to NFP flow representation of rule + * @extack: Netlink extended ACK report + * + * Verifies the flow as a pre-tunnel rule. + * + * Return: negative value on error, 0 if verified. + */ +static int +nfp_flower_validate_pre_tun_rule(struct nfp_app *app, + struct nfp_fl_payload *flow, + struct netlink_ext_ack *extack) +{ + struct nfp_flower_meta_tci *meta_tci; + struct nfp_flower_mac_mpls *mac; + struct nfp_fl_act_head *act; + u8 *mask = flow->mask_data; + bool vlan = false; + int act_offset; + u8 key_layer; + + meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; + if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { + u16 vlan_tci = be16_to_cpu(meta_tci->tci); + + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); + vlan = true; + } else { + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + } + + key_layer = meta_tci->nfp_flow_key_layer; + if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); + return -EOPNOTSUPP; + } + + if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required"); + return -EOPNOTSUPP; + } + + /* Skip fields known to exist. */ + mask += sizeof(struct nfp_flower_meta_tci); + mask += sizeof(struct nfp_flower_in_port); + + /* Ensure destination MAC address is fully matched. */ + mac = (struct nfp_flower_mac_mpls *)mask; + if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); + return -EOPNOTSUPP; + } + + if (key_layer & NFP_FLOWER_LAYER_IPV4) { + int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); + int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); + int i; + + mask += sizeof(struct nfp_flower_mac_mpls); + + /* Ensure proto and flags are the only IP layer fields. */ + for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++) + if (mask[i] && i != ip_flags && i != ip_proto) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); + return -EOPNOTSUPP; + } + } + + /* Action must be a single egress or pop_vlan and egress. */ + act_offset = 0; + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; + if (vlan) { + if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action"); + return -EOPNOTSUPP; + } + + act_offset += act->len_lw << NFP_FL_LW_SIZ; + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; + } + + if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected"); + return -EOPNOTSUPP; + } + + act_offset += act->len_lw << NFP_FL_LW_SIZ; + + /* Ensure there are no more actions after egress. */ + if (act_offset != flow->meta.act_len) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action"); + return -EOPNOTSUPP; + } + + return 0; +} + +/** * nfp_flower_add_offload() - Adds a new flow to hardware. * @app: Pointer to the APP handle * @netdev: netdev structure. @@ -994,6 +1152,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; + if (flow_pay->pre_tun_rule.dev) { + err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack); + if (err) + goto err_destroy_flow; + } + err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); if (err) goto err_destroy_flow; @@ -1006,8 +1170,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, goto err_release_metadata; } - err = nfp_flower_xmit_flow(app, flow_pay, - NFP_FLOWER_CMSG_TYPE_FLOW_ADD); + if (flow_pay->pre_tun_rule.dev) + err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); + else + err = nfp_flower_xmit_flow(app, flow_pay, + NFP_FLOWER_CMSG_TYPE_FLOW_ADD); if (err) goto err_remove_rhash; @@ -1149,8 +1316,11 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, goto err_free_merge_flow; } - err = nfp_flower_xmit_flow(app, nfp_flow, - NFP_FLOWER_CMSG_TYPE_FLOW_DEL); + if (nfp_flow->pre_tun_rule.dev) + err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); + else + err = nfp_flower_xmit_flow(app, nfp_flow, + NFP_FLOWER_CMSG_TYPE_FLOW_DEL); /* Fall through on error. */ err_free_merge_flow: diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index a7a80f4b722a..def8c198b016 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -15,6 +15,24 @@ #define NFP_FL_MAX_ROUTES 32 +#define NFP_TUN_PRE_TUN_RULE_LIMIT 32 +#define NFP_TUN_PRE_TUN_RULE_DEL 0x1 +#define NFP_TUN_PRE_TUN_IDX_BIT 0x8 + +/** + * struct nfp_tun_pre_run_rule - rule matched before decap + * @flags: options for the rule offset + * @port_idx: index of destination MAC address for the rule + * @vlan_tci: VLAN info associated with MAC + * @host_ctx_id: stats context of rule to update + */ +struct nfp_tun_pre_tun_rule { + __be32 flags; + __be16 port_idx; + __be16 vlan_tci; + __be32 host_ctx_id; +}; + /** * struct nfp_tun_active_tuns - periodic message of active tunnels * @seq: sequence number of the message @@ -124,11 +142,12 @@ enum nfp_flower_mac_offload_cmd { /** * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC - * @ht_node: Hashtable entry - * @addr: Offloaded MAC address - * @index: Offloaded index for given MAC address - * @ref_count: Number of devs using this MAC address - * @repr_list: List of reprs sharing this MAC address + * @ht_node: Hashtable entry + * @addr: Offloaded MAC address + * @index: Offloaded index for given MAC address + * @ref_count: Number of devs using this MAC address + * @repr_list: List of reprs sharing this MAC address + * @bridge_count: Number of bridge/internal devs with MAC */ struct nfp_tun_offloaded_mac { struct rhash_head ht_node; @@ -136,6 +155,7 @@ struct nfp_tun_offloaded_mac { u16 index; int ref_count; struct list_head repr_list; + int bridge_count; }; static const struct rhashtable_params offloaded_macs_params = { @@ -556,6 +576,8 @@ nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry, list_del(&repr_priv->mac_list); list_add_tail(&repr_priv->mac_list, &entry->repr_list); + } else if (nfp_flower_is_supported_bridge(netdev)) { + entry->bridge_count++; } entry->ref_count++; @@ -572,20 +594,35 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { - nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); - return 0; + if (entry->bridge_count || + !nfp_flower_is_supported_bridge(netdev)) { + nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, + netdev, mod); + return 0; + } + + /* MAC is global but matches need to go to pre_tun table. */ + nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT; } - /* Assign a global index if non-repr or MAC address is now shared. */ - if (entry || !port) { - ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, - NFP_MAX_MAC_INDEX, GFP_KERNEL); - if (ida_idx < 0) - return ida_idx; + if (!nfp_mac_idx) { + /* Assign a global index if non-repr or MAC is now shared. */ + if (entry || !port) { + ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, + NFP_MAX_MAC_INDEX, GFP_KERNEL); + if (ida_idx < 0) + return ida_idx; - nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); - } else { - nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); + nfp_mac_idx = + nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); + + if (nfp_flower_is_supported_bridge(netdev)) + nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT; + + } else { + nfp_mac_idx = + nfp_tunnel_get_mac_idx_from_phy_port_id(port); + } } if (!entry) { @@ -654,6 +691,25 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, list_del(&repr_priv->mac_list); } + if (nfp_flower_is_supported_bridge(netdev)) { + entry->bridge_count--; + + if (!entry->bridge_count && entry->ref_count) { + u16 nfp_mac_idx; + + nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; + if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, + false)) { + nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", + netdev_name(netdev)); + return 0; + } + + entry->index = nfp_mac_idx; + return 0; + } + } + /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { u16 nfp_mac_idx; @@ -713,6 +769,9 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, return 0; repr_priv = repr->app_priv; + if (repr_priv->on_bridge) + return 0; + mac_offloaded = &repr_priv->mac_offloaded; off_mac = &repr_priv->offloaded_mac_addr[0]; port = nfp_repr_get_port_id(netdev); @@ -828,10 +887,119 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app, if (err) nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", netdev_name(netdev)); + } else if (event == NETDEV_CHANGEUPPER) { + /* If a repr is attached to a bridge then tunnel packets + * entering the physical port are directed through the bridge + * datapath and cannot be directly detunneled. Therefore, + * associated offloaded MACs and indexes should not be used + * by fw for detunneling. + */ + struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *upper = info->upper_dev; + struct nfp_flower_repr_priv *repr_priv; + struct nfp_repr *repr; + + if (!nfp_netdev_is_nfp_repr(netdev) || + !nfp_flower_is_supported_bridge(upper)) + return NOTIFY_OK; + + repr = netdev_priv(netdev); + if (repr->app != app) + return NOTIFY_OK; + + repr_priv = repr->app_priv; + + if (info->linking) { + if (nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_DEL)) + nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n", + netdev_name(netdev)); + repr_priv->on_bridge = true; + } else { + repr_priv->on_bridge = false; + + if (!(netdev->flags & IFF_UP)) + return NOTIFY_OK; + + if (nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_ADD)) + nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", + netdev_name(netdev)); + } } return NOTIFY_OK; } +int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, + struct nfp_fl_payload *flow) +{ + struct nfp_flower_priv *app_priv = app->priv; + struct nfp_tun_offloaded_mac *mac_entry; + struct nfp_tun_pre_tun_rule payload; + struct net_device *internal_dev; + int err; + + if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT) + return -ENOSPC; + + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); + + internal_dev = flow->pre_tun_rule.dev; + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; + payload.host_ctx_id = flow->meta.host_ctx_id; + + /* Lookup MAC index for the pre-tunnel rule egress device. + * Note that because the device is always an internal port, it will + * have a constant global index so does not need to be tracked. + */ + mac_entry = nfp_tunnel_lookup_offloaded_macs(app, + internal_dev->dev_addr); + if (!mac_entry) + return -ENOENT; + + payload.port_idx = cpu_to_be16(mac_entry->index); + + /* Copy mac id and vlan to flow - dev may not exist at delete time. */ + flow->pre_tun_rule.vlan_tci = payload.vlan_tci; + flow->pre_tun_rule.port_idx = payload.port_idx; + + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, + sizeof(struct nfp_tun_pre_tun_rule), + (unsigned char *)&payload, GFP_KERNEL); + if (err) + return err; + + app_priv->pre_tun_rule_cnt++; + + return 0; +} + +int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, + struct nfp_fl_payload *flow) +{ + struct nfp_flower_priv *app_priv = app->priv; + struct nfp_tun_pre_tun_rule payload; + u32 tmp_flags = 0; + int err; + + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); + + tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL; + payload.flags = cpu_to_be32(tmp_flags); + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; + payload.port_idx = flow->pre_tun_rule.port_idx; + + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, + sizeof(struct nfp_tun_pre_tun_rule), + (unsigned char *)&payload, GFP_KERNEL); + if (err) + return err; + + app_priv->pre_tun_rule_cnt--; + + return 0; +} + int nfp_tunnel_config_start(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 9903805717da..6f97b554f7da 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -975,7 +975,7 @@ static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle) static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - const struct skb_frag_struct *frag; + const skb_frag_t *frag; int f, nr_frags, wr_idx, md_bytes; struct nfp_net_tx_ring *tx_ring; struct nfp_net_r_vector *r_vec; @@ -1155,7 +1155,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); while (todo--) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; struct nfp_net_tx_buf *tx_buf; struct sk_buff *skb; int fidx, nr_frags; @@ -1270,7 +1270,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) static void nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; struct netdev_queue *nd_q; while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 3d73970b3a2e..219b0b863c89 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -993,14 +993,12 @@ static int w90p910_ether_probe(struct platform_device *pdev) ether->txirq = platform_get_irq(pdev, 0); if (ether->txirq < 0) { - dev_err(&pdev->dev, "failed to get ether tx irq\n"); error = -ENXIO; goto failed_free_io; } ether->rxirq = platform_get_irq(pdev, 1); if (ether->rxirq < 0) { - dev_err(&pdev->dev, "failed to get ether rx irq\n"); error = -ENXIO; goto failed_free_io; } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index b327b29f5d57..ecca794c55e2 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -6126,8 +6126,7 @@ static void nv_remove(struct pci_dev *pci_dev) #ifdef CONFIG_PM_SLEEP static int nv_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int i; diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index a391cf6ee4b2..55a29ec76680 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -66,15 +66,6 @@ config QLCNIC_HWMON This data is available via the hwmon sysfs interface. -config QLGE - tristate "QLogic QLGE 10Gb Ethernet Driver Support" - depends on PCI - ---help--- - This driver supports QLogic ISP8XXX 10Gb Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called qlge. - config NETXEN_NIC tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" depends on PCI diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile index 6cd2e333a5fc..1ae4a0743bd5 100644 --- a/drivers/net/ethernet/qlogic/Makefile +++ b/drivers/net/ethernet/qlogic/Makefile @@ -5,7 +5,6 @@ obj-$(CONFIG_QLA3XXX) += qla3xxx.o obj-$(CONFIG_QLCNIC) += qlcnic/ -obj-$(CONFIG_QLGE) += qlge/ obj-$(CONFIG_NETXEN_NIC) += netxen/ obj-$(CONFIG_QED) += qed/ obj-$(CONFIG_QEDE)+= qede/ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 58e2eaf77014..c692a41e4548 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1980,7 +1980,7 @@ netxen_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) { struct netxen_skb_frag *nf; - struct skb_frag_struct *frag; + skb_frag_t *frag; int i, nr_frags; dma_addr_t map; @@ -2043,7 +2043,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct pci_dev *pdev; int i, k; int delta = 0; - struct skb_frag_struct *frag; + skb_frag_t *frag; u32 producer; int frag_count; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 9f36e7948222..1a5fc2ae351c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1631,10 +1631,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, } } -static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack void +__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_pstorm_per_queue_stat pstats; u32 pstats_addr = 0, pstats_len = 0; @@ -1661,10 +1660,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, HILO_64_REGPAIR(pstats.error_drop_pkts); } -static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack void +__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct tstorm_per_port_stat tstats; u32 tstats_addr, tstats_len; @@ -1709,10 +1707,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, } } -static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack +void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_ustorm_per_queue_stat ustats; u32 ustats_addr = 0, ustats_len = 0; @@ -1751,10 +1748,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, } } -static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack void +__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_mstorm_per_queue_stat mstats; u32 mstats_addr = 0, mstats_len = 0; @@ -1780,9 +1776,9 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); } -static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats) +static noinline_for_stack void +__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats) { struct qed_eth_stats_common *p_common = &p_stats->common; struct port_stats port_stats; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 14f26bf3b388..ac61f614de37 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -581,7 +581,7 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) { struct qlcnic_skb_frag *nf; - struct skb_frag_struct *frag; + skb_frag_t *frag; int i, nr_frags; dma_addr_t map; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 707665b62eb7..bebe38d74d66 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1385,15 +1385,13 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, } for (i = 0; i < nr_frags; i++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); - tpbuf->length = frag->size; - tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, - frag->page.p, frag->page_offset, - tpbuf->length, DMA_TO_DEVICE); + tpbuf->length = skb_frag_size(frag); + tpbuf->dma_addr = skb_frag_dma_map(adpt->netdev->dev.parent, + frag, 0, tpbuf->length, + DMA_TO_DEVICE); ret = dma_mapping_error(adpt->netdev->dev.parent, tpbuf->dma_addr); if (ret) diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 59c2349b59df..c84ab052ef26 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -544,7 +544,6 @@ static int emac_probe_resources(struct platform_device *pdev, struct emac_adapter *adpt) { struct net_device *netdev = adpt->netdev; - struct resource *res; char maddr[ETH_ALEN]; int ret = 0; @@ -556,22 +555,17 @@ static int emac_probe_resources(struct platform_device *pdev, /* Core 0 interrupt */ ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(&pdev->dev, - "error: missing core0 irq resource (error=%i)\n", ret); + if (ret < 0) return ret; - } adpt->irq.irq = ret; /* base register address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - adpt->base = devm_ioremap_resource(&pdev->dev, res); + adpt->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(adpt->base)) return PTR_ERR(adpt->base); /* CSR register address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - adpt->csr = devm_ioremap_resource(&pdev->dev, res); + adpt->csr = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(adpt->csr)) return PTR_ERR(adpt->csr); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index e1dd6ea60d67..fa6eae2e7ed8 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -61,7 +61,7 @@ /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ -static const int multicast_filter_limit = 32; +#define MC_FILTER_LIMIT 32 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -271,7 +271,6 @@ enum rtl_registers { Config3 = 0x54, Config4 = 0x55, Config5 = 0x56, - MultiIntr = 0x5c, PHYAR = 0x60, PHYstatus = 0x6c, RxMaxSize = 0xda, @@ -539,11 +538,11 @@ enum rtl_tx_desc_bit_1 { TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ #define GTTCPHO_SHIFT 18 -#define GTTCPHO_MAX 0x7fU +#define GTTCPHO_MAX 0x7f /* Second doubleword. */ #define TCPHO_SHIFT 18 -#define TCPHO_MAX 0x3ffU +#define TCPHO_MAX 0x3ff #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ @@ -569,6 +568,11 @@ enum rtl_rx_desc_bit { #define RsvdMask 0x3fffc000 +#define RTL_GSO_MAX_SIZE_V1 32000 +#define RTL_GSO_MAX_SEGS_V1 24 +#define RTL_GSO_MAX_SIZE_V2 64000 +#define RTL_GSO_MAX_SEGS_V2 64 + struct TxDesc { __le32 opts1; __le32 opts2; @@ -846,6 +850,14 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) return RTL_R32(tp, OCPDR); } +static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, + u16 set) +{ + u16 data = r8168_mac_ocp_read(tp, reg); + + r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); +} + #define OCP_STD_PHY_BASE 0xa400 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) @@ -1414,18 +1426,22 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) } switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: options = RTL_R8(tp, Config1) & ~PMEnable; if (wolopts) options |= PMEnable; RTL_W8(tp, Config1, options); break; - default: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_51: options = RTL_R8(tp, Config2) & ~PME_SIGNAL; if (wolopts) options |= PME_SIGNAL; RTL_W8(tp, Config2, options); break; + default: + break; } rtl_lock_config_regs(tp); @@ -4146,54 +4162,46 @@ static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_versi static void rtl_set_rx_mode(struct net_device *dev) { + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; struct rtl8169_private *tp = netdev_priv(dev); - u32 mc_filter[2]; /* Multicast hash filter */ - int rx_mode; - u32 tmp = 0; + u32 tmp; if (dev->flags & IFF_PROMISC) { /* Unconditionally log net taps. */ netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); - rx_mode = - AcceptBroadcast | AcceptMulticast | AcceptMyPhys | - AcceptAllPhys; - mc_filter[1] = mc_filter[0] = 0xffffffff; - } else if ((netdev_mc_count(dev) > multicast_filter_limit) || - (dev->flags & IFF_ALLMULTI)) { - /* Too many to filter perfectly -- accept all multicasts. */ - rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; - mc_filter[1] = mc_filter[0] = 0xffffffff; + rx_mode |= AcceptAllPhys; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; } else { struct netdev_hw_addr *ha; - rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; netdev_for_each_mc_addr(ha, dev) { - int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; - mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - rx_mode |= AcceptMulticast; + u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); } } if (dev->features & NETIF_F_RXALL) rx_mode |= (AcceptErr | AcceptRunt); - tmp = (RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; - - if (tp->mac_version > RTL_GIGA_MAC_VER_06) { - u32 data = mc_filter[0]; - - mc_filter[0] = swab32(mc_filter[1]); - mc_filter[1] = swab32(data); - } - - if (tp->mac_version == RTL_GIGA_MAC_VER_35) - mc_filter[1] = mc_filter[0] = 0xffffffff; - RTL_W32(tp, MAR0 + 4, mc_filter[1]); RTL_W32(tp, MAR0 + 0, mc_filter[0]); - RTL_W32(tp, RxConfig, tmp); + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode); } DECLARE_RTL_COND(rtl_csiar_cond) @@ -4407,7 +4415,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) { static const struct ephy_info e_info_8168c_2[] = { { 0x01, 0, 0x0001 }, - { 0x03, 0x0400, 0x0220 } + { 0x03, 0x0400, 0x0020 } }; rtl_set_def_aspm_entry_latency(tp); @@ -4454,7 +4462,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) static const struct ephy_info e_info_8168d_4[] = { { 0x0b, 0x0000, 0x0048 }, { 0x19, 0x0020, 0x0050 }, - { 0x0c, 0x0100, 0x0020 } + { 0x0c, 0x0100, 0x0020 }, + { 0x10, 0x0004, 0x0000 }, }; rtl_set_def_aspm_entry_latency(tp); @@ -4504,7 +4513,9 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) { static const struct ephy_info e_info_8168e_2[] = { { 0x09, 0x0000, 0x0080 }, - { 0x19, 0x0000, 0x0224 } + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, }; rtl_set_def_aspm_entry_latency(tp); @@ -4566,7 +4577,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) { 0x06, 0x00c0, 0x0020 }, { 0x08, 0x0001, 0x0002 }, { 0x09, 0x0000, 0x0080 }, - { 0x19, 0x0000, 0x0224 } + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, }; rtl_hw_start_8168f(tp); @@ -4581,8 +4594,9 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) static const struct ephy_info e_info_8168f_1[] = { { 0x06, 0x00c0, 0x0020 }, { 0x0f, 0xffff, 0x5200 }, - { 0x1e, 0x0000, 0x4000 }, - { 0x19, 0x0000, 0x0224 } + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, }; rtl_hw_start_8168f(tp); @@ -4621,8 +4635,8 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) { static const struct ephy_info e_info_8168g_1[] = { - { 0x00, 0x0000, 0x0008 }, - { 0x0c, 0x37d0, 0x0820 }, + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, { 0x1e, 0x0000, 0x0001 }, { 0x19, 0x8000, 0x0000 } }; @@ -4638,10 +4652,15 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) { static const struct ephy_info e_info_8168g_2[] = { - { 0x00, 0x0000, 0x0008 }, - { 0x0c, 0x3df0, 0x0200 }, - { 0x19, 0xffff, 0xfc00 }, - { 0x1e, 0xffff, 0x20eb } + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 }, + { 0x00, 0xffff, 0x10a3 }, + { 0x06, 0xffff, 0xf050 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x4000, 0x0000 }, }; rtl_hw_start_8168g(tp); @@ -4655,11 +4674,16 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) static void rtl_hw_start_8411_2(struct rtl8169_private *tp) { static const struct ephy_info e_info_8411_2[] = { - { 0x00, 0x0000, 0x0008 }, - { 0x0c, 0x3df0, 0x0200 }, - { 0x0f, 0xffff, 0x5200 }, - { 0x19, 0x0020, 0x0000 }, - { 0x1e, 0x0000, 0x2000 } + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x00, 0x0000, 0x0080 }, + { 0x06, 0x0000, 0x0010 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x0000, 0x4000 }, }; rtl_hw_start_8168g(tp); @@ -4809,16 +4833,15 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp) static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) { - int rg_saw_cnt; - u32 data; static const struct ephy_info e_info_8168h_1[] = { { 0x1e, 0x0800, 0x0001 }, { 0x1d, 0x0000, 0x0800 }, { 0x05, 0xffff, 0x2089 }, { 0x06, 0xffff, 0x5881 }, - { 0x04, 0xffff, 0x154a }, + { 0x04, 0xffff, 0x854a }, { 0x01, 0xffff, 0x068b } }; + int rg_saw_cnt; /* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); @@ -4863,31 +4886,13 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) sw_cnt_1ms_ini = 16000000/rg_saw_cnt; sw_cnt_1ms_ini &= 0x0fff; - data = r8168_mac_ocp_read(tp, 0xd412); - data &= ~0x0fff; - data |= sw_cnt_1ms_ini; - r8168_mac_ocp_write(tp, 0xd412, data); + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); } - data = r8168_mac_ocp_read(tp, 0xe056); - data &= ~0xf0; - data |= 0x70; - r8168_mac_ocp_write(tp, 0xe056, data); - - data = r8168_mac_ocp_read(tp, 0xe052); - data &= ~0x6000; - data |= 0x8008; - r8168_mac_ocp_write(tp, 0xe052, data); - - data = r8168_mac_ocp_read(tp, 0xe0d6); - data &= ~0x01ff; - data |= 0x017f; - r8168_mac_ocp_write(tp, 0xe0d6, data); - - data = r8168_mac_ocp_read(tp, 0xd420); - data &= ~0x0fff; - data |= 0x047f; - r8168_mac_ocp_write(tp, 0xd420, data); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); r8168_mac_ocp_write(tp, 0xe63e, 0x0001); r8168_mac_ocp_write(tp, 0xe63e, 0x0000); @@ -4969,12 +4974,11 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) { - u32 data; static const struct ephy_info e_info_8168ep_3[] = { - { 0x00, 0xffff, 0x10a3 }, - { 0x19, 0xffff, 0x7c00 }, - { 0x1e, 0xffff, 0x20eb }, - { 0x0d, 0xffff, 0x1666 } + { 0x00, 0x0000, 0x0080 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, }; /* disable aspm and clock request before access ephy */ @@ -4986,18 +4990,9 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); - data = r8168_mac_ocp_read(tp, 0xd3e2); - data &= 0xf000; - data |= 0x0271; - r8168_mac_ocp_write(tp, 0xd3e2, data); - - data = r8168_mac_ocp_read(tp, 0xd3e4); - data &= 0xff00; - r8168_mac_ocp_write(tp, 0xd3e4, data); - - data = r8168_mac_ocp_read(tp, 0xe860); - data |= 0x0080; - r8168_mac_ocp_write(tp, 0xe860, data); + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); rtl_hw_aspm_clkreq_enable(tp, true); } @@ -5240,10 +5235,7 @@ static void rtl_hw_start(struct rtl8169_private *tp) RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); rtl_init_rxcfg(tp); rtl_set_tx_config_registers(tp); - rtl_set_rx_mode(tp->dev); - /* no early-rx interrupts */ - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); rtl_irq_enable(tp); } @@ -5507,44 +5499,6 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; } -static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, - struct net_device *dev); -/* r8169_csum_workaround() - * The hw limites the value the transport offset. When the offset is out of the - * range, calculate the checksum by sw. - */ -static void r8169_csum_workaround(struct rtl8169_private *tp, - struct sk_buff *skb) -{ - if (skb_is_gso(skb)) { - netdev_features_t features = tp->dev->features; - struct sk_buff *segs, *nskb; - - features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); - segs = skb_gso_segment(skb, features); - if (IS_ERR(segs) || !segs) - goto drop; - - do { - nskb = segs; - segs = segs->next; - nskb->next = NULL; - rtl8169_start_xmit(nskb, tp->dev); - } while (segs); - - dev_consume_skb_any(skb); - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (skb_checksum_help(skb) < 0) - goto drop; - - rtl8169_start_xmit(skb, tp->dev); - } else { -drop: - tp->dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); - } -} - /* msdn_giant_send_check() * According to the document of microsoft, the TCP Pseudo Header excludes the * packet length for IPv6 TCP large packets. @@ -5594,13 +5548,6 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, u32 mss = skb_shinfo(skb)->gso_size; if (mss) { - if (transport_offset > GTTCPHO_MAX) { - netif_warn(tp, tx_err, tp->dev, - "Invalid transport offset 0x%x for TSO\n", - transport_offset); - return false; - } - switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts[0] |= TD1_GTSENV4; @@ -5623,16 +5570,6 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, } else if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 ip_protocol; - if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - return !(skb_checksum_help(skb) || eth_skb_pad(skb)); - - if (transport_offset > TCPHO_MAX) { - netif_warn(tp, tx_err, tp->dev, - "Invalid transport offset 0x%x\n", - transport_offset); - return false; - } - switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts[1] |= TD1_IPv4_CS; @@ -5695,6 +5632,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, struct device *d = tp_to_dev(tp); dma_addr_t mapping; u32 opts[2], len; + bool stop_queue; + bool door_bell; int frags; if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { @@ -5709,10 +5648,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, opts[0] = DescOwn; if (rtl_chip_supports_csum_v2(tp)) { - if (!rtl8169_tso_csum_v2(tp, skb, opts)) { - r8169_csum_workaround(tp, skb); - return NETDEV_TX_OK; - } + if (!rtl8169_tso_csum_v2(tp, skb, opts)) + goto err_dma_0; } else { rtl8169_tso_csum_v1(skb, opts); } @@ -5740,13 +5677,13 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); - netdev_sent_queue(dev, skb->len); - skb_tx_timestamp(skb); /* Force memory writes to complete before releasing descriptor */ dma_wmb(); + door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry); /* Force all memory writes to complete before notifying device */ @@ -5754,14 +5691,19 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->cur_tx += frags + 1; - RTL_W8(tp, TxPoll, NPQ); - - if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { + stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); + if (unlikely(stop_queue)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. */ smp_wmb(); netif_stop_queue(dev); + } + + if (door_bell) + RTL_W8(tp, TxPoll, NPQ); + + if (unlikely(stop_queue)) { /* Sync with rtl_tx: * - publish queue status and cur_tx ring index (write barrier) * - refresh dirty_tx ring index (read barrier). @@ -5789,6 +5731,39 @@ err_stop_0: return NETDEV_TX_BUSY; } +static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + int transport_offset = skb_transport_offset(skb); + struct rtl8169_private *tp = netdev_priv(dev); + + if (skb_is_gso(skb)) { + if (transport_offset > GTTCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_ALL_TSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->len < ETH_ZLEN) { + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_34: + features &= ~NETIF_F_CSUM_MASK; + break; + default: + break; + } + } + + if (transport_offset > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; + } + + return vlan_features_check(skb, features); +} + static void rtl8169_pcierr_interrupt(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -5908,23 +5883,6 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) skb_checksum_none_assert(skb); } -static struct sk_buff *rtl8169_try_rx_copy(void *data, - struct rtl8169_private *tp, - int pkt_size, - dma_addr_t addr) -{ - struct sk_buff *skb; - struct device *d = tp_to_dev(tp); - - dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); - prefetch(data); - skb = napi_alloc_skb(&tp->napi, pkt_size); - if (skb) - skb_copy_to_linear_data(skb, data, pkt_size); - - return skb; -} - static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) { unsigned int cur_rx, rx_left; @@ -5960,17 +5918,13 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget goto process_pkt; } } else { + unsigned int pkt_size; struct sk_buff *skb; - dma_addr_t addr; - int pkt_size; process_pkt: - addr = le64_to_cpu(desc->addr); + pkt_size = status & GENMASK(13, 0); if (likely(!(dev->features & NETIF_F_RXFCS))) - pkt_size = (status & 0x00003fff) - 4; - else - pkt_size = status & 0x00003fff; - + pkt_size -= ETH_FCS_LEN; /* * The driver does not support incoming fragmented * frames. They are seen as a symptom of over-mtu @@ -5982,15 +5936,23 @@ process_pkt: goto release_descriptor; } - skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], - tp, pkt_size, addr); - if (!skb) { + dma_sync_single_for_cpu(tp_to_dev(tp), + le64_to_cpu(desc->addr), + pkt_size, DMA_FROM_DEVICE); + + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { dev->stats.rx_dropped++; goto release_descriptor; } + prefetch(tp->Rx_databuff[entry]); + skb_copy_to_linear_data(skb, tp->Rx_databuff[entry], + pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + rtl8169_rx_csum(skb, status); - skb_put(skb, pkt_size); skb->protocol = eth_type_trans(skb, dev); rtl8169_rx_vlan_tag(desc, skb); @@ -6331,7 +6293,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->multicast = dev->stats.multicast; /* - * Fetch additonal counter values missing in stats collected by driver + * Fetch additional counter values missing in stats collected by driver * from tally counters. */ if (pm_runtime_active(&pdev->dev)) @@ -6555,6 +6517,7 @@ static const struct net_device_ops rtl_netdev_ops = { .ndo_stop = rtl8169_close, .ndo_get_stats64 = rtl8169_get_stats64, .ndo_start_xmit = rtl8169_start_xmit, + .ndo_features_check = rtl8169_features_check, .ndo_tx_timeout = rtl8169_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = rtl8169_change_mtu, @@ -6691,8 +6654,6 @@ static int r8169_mdio_register(struct rtl8169_private *tp) static void rtl_hw_init_8168g(struct rtl8169_private *tp) { - u32 data; - tp->ocp_base = OCP_STD_PHY_BASE; RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); @@ -6707,16 +6668,12 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp) msleep(1); RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); - data = r8168_mac_ocp_read(tp, 0xe8de); - data &= ~(1 << 14); - r8168_mac_ocp_write(tp, 0xe8de, data); + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) return; - data = r8168_mac_ocp_read(tp, 0xe8de); - data |= (1 << 15); - r8168_mac_ocp_write(tp, 0xe8de, data); + r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15)); rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); } @@ -6916,11 +6873,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); - /* don't enable SG, IP_CSUM and TSO by default - it might not work - * properly for all devices */ - dev->features |= NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; @@ -6938,8 +6893,22 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Disallow toggling */ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; - if (rtl_chip_supports_csum_v2(tp)) + if (rtl_chip_supports_csum_v2(tp)) { dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; + } else { + dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; + } + + /* RTL8168e-vl has a HW issue with TSO */ + if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + dev->vlan_features &= ~NETIF_F_ALL_TSO; + dev->hw_features &= ~NETIF_F_ALL_TSO; + dev->features &= ~NETIF_F_ALL_TSO; + } dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 16d6952c312a..0ec13f520e90 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -508,7 +508,7 @@ static ssize_t efx_ef10_show_link_control_flag(struct device *dev, struct device_attribute *attr, char *buf) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", ((efx->mcdi->fn_flags) & @@ -520,7 +520,7 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev, struct device_attribute *attr, char *buf) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", ((efx->mcdi->fn_flags) & diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index ab58b837df47..2fef7402233e 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2517,7 +2517,7 @@ static struct notifier_block efx_netdev_notifier = { static ssize_t show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", efx->phy_type); } static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); @@ -2526,7 +2526,7 @@ static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr, char *buf) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); struct efx_mcdi_iface *mcdi = efx_mcdi(efx); return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled); @@ -2534,7 +2534,7 @@ static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr, static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); struct efx_mcdi_iface *mcdi = efx_mcdi(efx); bool enable = count > 0 && *buf != '0'; @@ -3654,7 +3654,7 @@ static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) static int efx_pm_freeze(struct device *dev) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); rtnl_lock(); @@ -3675,7 +3675,7 @@ static int efx_pm_freeze(struct device *dev) static int efx_pm_thaw(struct device *dev) { int rc; - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); rtnl_lock(); diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 9b15c39ac670..eecc348b1c32 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2256,7 +2256,7 @@ static struct notifier_block ef4_netdev_notifier = { static ssize_t show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) { - struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef4_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", efx->phy_type); } static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); @@ -2999,7 +2999,7 @@ static int ef4_pci_probe(struct pci_dev *pci_dev, static int ef4_pm_freeze(struct device *dev) { - struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef4_nic *efx = dev_get_drvdata(dev); rtnl_lock(); @@ -3020,7 +3020,7 @@ static int ef4_pm_freeze(struct device *dev) static int ef4_pm_thaw(struct device *dev) { int rc; - struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef4_nic *efx = dev_get_drvdata(dev); rtnl_lock(); diff --git a/drivers/net/ethernet/sfc/falcon/falcon_boards.c b/drivers/net/ethernet/sfc/falcon/falcon_boards.c index 839189dab98e..605f486fa675 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon_boards.c +++ b/drivers/net/ethernet/sfc/falcon/falcon_boards.c @@ -357,7 +357,7 @@ fail_on: static ssize_t show_phy_flash_cfg(struct device *dev, struct device_attribute *attr, char *buf) { - struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef4_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL)); } @@ -365,7 +365,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef4_nic *efx = dev_get_drvdata(dev); enum ef4_phy_mode old_mode, new_mode; int err; @@ -454,13 +454,13 @@ static int sfe4001_init(struct ef4_nic *efx) #if IS_ENABLED(CONFIG_SENSORS_LM90) board->hwmon_client = - i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info); + i2c_new_client_device(&board->i2c_adap, &sfe4001_hwmon_info); #else board->hwmon_client = - i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr); + i2c_new_dummy_device(&board->i2c_adap, sfe4001_hwmon_info.addr); #endif - if (!board->hwmon_client) - return -EIO; + if (IS_ERR(board->hwmon_client)) + return PTR_ERR(board->hwmon_client); /* Raise board/PHY high limit from 85 to 90 degrees Celsius */ rc = i2c_smbus_write_byte_data(board->hwmon_client, @@ -468,9 +468,9 @@ static int sfe4001_init(struct ef4_nic *efx) if (rc) goto fail_hwmon; - board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539); - if (!board->ioexp_client) { - rc = -EIO; + board->ioexp_client = i2c_new_dummy_device(&board->i2c_adap, PCA9539); + if (IS_ERR(board->ioexp_client)) { + rc = PTR_ERR(board->ioexp_client); goto fail_hwmon; } diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 31ec56091a5d..65e81ec1b314 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -274,7 +274,7 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, vaddr = kmap_atomic(skb_frag_page(f)); - efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, + efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f), skb_frag_size(f), copy_buf); kunmap_atomic(vaddr); } diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 6e07f5ebacfc..85eaccbbbac1 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -191,6 +191,8 @@ struct sis900_private { unsigned int tx_full; /* The Tx queue is full. */ u8 host_bridge_rev; u8 chipset_rev; + /* EEPROM data */ + int eeprom_size; }; MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>"); @@ -475,6 +477,8 @@ static int sis900_probe(struct pci_dev *pci_dev, sis_priv->pci_dev = pci_dev; spin_lock_init(&sis_priv->lock); + sis_priv->eeprom_size = 24; + pci_set_drvdata(pci_dev, net_dev); ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma); @@ -2122,6 +2126,68 @@ static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *w wol->supported = (WAKE_PHY | WAKE_MAGIC); } +static int sis900_get_eeprom_len(struct net_device *dev) +{ + struct sis900_private *sis_priv = netdev_priv(dev); + + return sis_priv->eeprom_size; +} + +static int sis900_read_eeprom(struct net_device *net_dev, u8 *buf) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + int wait, ret = -EAGAIN; + u16 signature; + u16 *ebuf = (u16 *)buf; + int i; + + if (sis_priv->chipset_rev == SIS96x_900_REV) { + sw32(mear, EEREQ); + for (wait = 0; wait < 2000; wait++) { + if (sr32(mear) & EEGNT) { + /* read 16 bits, and index by 16 bits */ + for (i = 0; i < sis_priv->eeprom_size / 2; i++) + ebuf[i] = (u16)read_eeprom(ioaddr, i); + ret = 0; + break; + } + udelay(1); + } + sw32(mear, EEDONE); + } else { + signature = (u16)read_eeprom(ioaddr, EEPROMSignature); + if (signature != 0xffff && signature != 0x0000) { + /* read 16 bits, and index by 16 bits */ + for (i = 0; i < sis_priv->eeprom_size / 2; i++) + ebuf[i] = (u16)read_eeprom(ioaddr, i); + ret = 0; + } + } + return ret; +} + +#define SIS900_EEPROM_MAGIC 0xBABE +static int sis900_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) +{ + struct sis900_private *sis_priv = netdev_priv(dev); + u8 *eebuf; + int res; + + eebuf = kmalloc(sis_priv->eeprom_size, GFP_KERNEL); + if (!eebuf) + return -ENOMEM; + + eeprom->magic = SIS900_EEPROM_MAGIC; + spin_lock_irq(&sis_priv->lock); + res = sis900_read_eeprom(dev, eebuf); + spin_unlock_irq(&sis_priv->lock); + if (!res) + memcpy(data, eebuf + eeprom->offset, eeprom->len); + kfree(eebuf); + return res; +} + static const struct ethtool_ops sis900_ethtool_ops = { .get_drvinfo = sis900_get_drvinfo, .get_msglevel = sis900_get_msglevel, @@ -2132,6 +2198,8 @@ static const struct ethtool_ops sis900_ethtool_ops = { .set_wol = sis900_set_wol, .get_link_ksettings = sis900_get_link_ksettings, .set_link_ksettings = sis900_set_link_ksettings, + .get_eeprom_len = sis900_get_eeprom_len, + .get_eeprom = sis900_get_eeprom, }; /** diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 51a7b48db4bc..87ab0b5da91e 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1573,10 +1573,8 @@ static int ave_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "IRQ not found\n"); + if (irq < 0) return irq; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(dev, res); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 3a14cdd01f5f..f2197b066ed1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -333,6 +333,9 @@ static void *tegra_eqos_probe(struct platform_device *pdev, usleep_range(2000, 4000); gpiod_set_value(eqos->reset, 0); + /* MDIO bus was already reset just above */ + data->mdio_bus_data->needs_reset = false; + eqos->rst = devm_reset_control_get(&pdev->dev, "eqos"); if (IS_ERR(eqos->rst)) { err = PTR_ERR(eqos->rst); @@ -428,13 +431,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) * resource initialization is done in the glue logic. */ stmmac_res.irq = platform_get_irq(pdev, 0); - if (stmmac_res.irq < 0) { - if (stmmac_res.irq != -EPROBE_DEFER) - dev_err(&pdev->dev, - "IRQ configuration information not found\n"); - + if (stmmac_res.irq < 0) return stmmac_res.irq; - } stmmac_res.wol_irq = stmmac_res.irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 4304c1abc5d1..40c42637ad75 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -348,7 +348,9 @@ int stmmac_mdio_register(struct net_device *ndev) max_addr = PHY_MAX_ADDR; } - new_bus->reset = &stmmac_mdio_reset; + if (mdio_bus_data->needs_reset) + new_bus->reset = &stmmac_mdio_reset; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name, priv->plat->bus_id); new_bus->priv = ndev; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 86f9c07a38cf..d5d08e11c353 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -63,6 +63,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat) plat->has_gmac = 1; plat->force_sf_dma_mode = 1; + plat->mdio_bus_data->needs_reset = true; plat->mdio_bus_data->phy_mask = 0; /* Set default value for multicast hash bins */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 154daf4d1072..eaf8f08f2e91 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -342,10 +342,16 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, mdio = true; } - if (mdio) + if (mdio) { plat->mdio_bus_data = devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data), GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + + plat->mdio_bus_data->needs_reset = true; + } + return 0; } @@ -522,13 +528,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) } /* clock setup */ - plat->stmmac_clk = devm_clk_get(&pdev->dev, - STMMAC_RESOURCE_NAME); - if (IS_ERR(plat->stmmac_clk)) { - dev_warn(&pdev->dev, "Cannot get CSR clock\n"); - plat->stmmac_clk = NULL; + if (!of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) { + plat->stmmac_clk = devm_clk_get(&pdev->dev, + STMMAC_RESOURCE_NAME); + if (IS_ERR(plat->stmmac_clk)) { + dev_warn(&pdev->dev, "Cannot get CSR clock\n"); + plat->stmmac_clk = NULL; + } + clk_prepare_enable(plat->stmmac_clk); } - clk_prepare_enable(plat->stmmac_clk); plat->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(plat->pclk)) { @@ -609,13 +617,8 @@ int stmmac_get_platform_resources(struct platform_device *pdev, * probe if needed before we went too far with resource allocation. */ stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); - if (stmmac_res->irq < 0) { - if (stmmac_res->irq != -EPROBE_DEFER) { - dev_err(&pdev->dev, - "MAC IRQ configuration information not found\n"); - } + if (stmmac_res->irq < 0) return stmmac_res->irq; - } /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq * The external wake up irq can be passed through the platform code diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 6fc05c106afc..c91876f8c536 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -2034,7 +2034,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, __skb_frag_set_page(frag, page->buffer); __skb_frag_ref(frag); - frag->page_offset = off; + skb_frag_off_set(frag, off); skb_frag_size_set(frag, hlen - swivel); /* any more data? */ @@ -2058,7 +2058,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, __skb_frag_set_page(frag, page->buffer); __skb_frag_ref(frag); - frag->page_offset = 0; + skb_frag_off_set(frag, 0); skb_frag_size_set(frag, hlen); RX_USED_ADD(page, hlen + cp->crc_size); } @@ -2816,7 +2816,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, DMA_TO_DEVICE); - tabort = cas_calc_tabort(cp, fragp->page_offset, len); + tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len); if (unlikely(tabort)) { void *addr; @@ -2827,7 +2827,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, addr = cas_page_map(skb_frag_page(fragp)); memcpy(tx_tiny_buf(cp, ring, entry), - addr + fragp->page_offset + len - tabort, + addr + skb_frag_off(fragp) + len - tabort, tabort); cas_page_unmap(addr); mapping = tx_tiny_map(cp, ring, entry, tentry); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 0bc5863bffeb..f5fd1f3c07cc 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6695,7 +6695,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, len = skb_frag_size(frag); mapping = np->ops->map_page(np->device, skb_frag_page(frag), - frag->page_offset, len, + skb_frag_off(frag), len, DMA_TO_DEVICE); rp->tx_buffs[prod].skb = NULL; diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index baa3088b475c..646e67236b65 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1088,7 +1088,7 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, vaddr = kmap_atomic(skb_frag_page(f)); blen = skb_frag_size(f); blen += 8 - (blen & 7); - err = ldc_map_single(lp, vaddr + f->page_offset, + err = ldc_map_single(lp, vaddr + skb_frag_off(f), blen, cookies + nc, ncookies - nc, map_perm); kunmap_atomic(vaddr); @@ -1124,7 +1124,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - docopy |= f->page_offset & 7; + docopy |= skb_frag_off(f) & 7; } if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || skb_tailroom(skb) < pad || diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c index 031cf9c3435a..8c4195a9a2cc 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c @@ -503,7 +503,7 @@ static int xlgmac_map_tx_skb(struct xlgmac_channel *channel, struct xlgmac_desc_data *desc_data; unsigned int offset, datalen, len; struct xlgmac_pkt_info *pkt_info; - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int tso, vlan; dma_addr_t skb_dma; unsigned int i; diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 1f8e9601592a..a1f5a1e61040 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -116,7 +116,7 @@ static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata, struct sk_buff *skb, struct xlgmac_pkt_info *pkt_info) { - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int context_desc; unsigned int len; unsigned int i; diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 5d6960fe3309..0f8a924fc60c 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1501,7 +1501,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb, bdx_tx_db_inc_wptr(db); for (i = 0; i < nr_frags; i++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; frag = &skb_shinfo(skb)->frags[i]; db->wptr->len = skb_frag_size(frag); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 642843945031..1b2702f74455 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1116,7 +1116,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; struct page *page = skb_frag_page(frag); - u32 page_offset = frag->page_offset; + u32 page_offset = skb_frag_off(frag); u32 buf_len = skb_frag_size(frag); dma_addr_t desc_dma; u32 desc_dma_32; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3544e1991579..86884c863013 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -435,7 +435,7 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, skb_frag_t *frag = skb_shinfo(skb)->frags + i; slots_used += fill_pg_buf(skb_frag_page(frag), - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag), &pb[slots_used]); } return slots_used; @@ -449,7 +449,7 @@ static int count_skb_frag_slots(struct sk_buff *skb) for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned long size = skb_frag_size(frag); - unsigned long offset = frag->page_offset; + unsigned long offset = skb_frag_off(frag); /* Skip unused frames from start of page */ offset &= ~PAGE_MASK; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 20f14c5fbb7e..48ca213c0ada 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -21,6 +21,19 @@ config MDIO_BUS if MDIO_BUS +config MDIO_ASPEED + tristate "ASPEED MDIO bus controller" + depends on ARCH_ASPEED || COMPILE_TEST + depends on OF_MDIO && HAS_IOMEM + help + This module provides a driver for the independent MDIO bus + controllers found in the ASPEED AST2600 SoC. This is a driver for the + third revision of the ASPEED MDIO register interface - the first two + revisions are the "old" and "new" interfaces found in the AST2400 and + AST2500, embedded in the MAC. For legacy reasons, FTGMAC100 driver + continues to drive the embedded MDIO controller for the AST2400 and + AST2500 SoCs, so say N if AST2600 support is not required. + config MDIO_BCM_IPROC tristate "Broadcom iProc MDIO bus controller" depends on ARCH_BCM_IPROC || COMPILE_TEST @@ -159,8 +172,8 @@ config MDIO_MSCC_MIIM config MDIO_OCTEON tristate "Octeon and some ThunderX SOCs MDIO buses" - depends on 64BIT - depends on HAS_IOMEM && OF_MDIO + depends on (64BIT && OF_MDIO) || COMPILE_TEST + depends on HAS_IOMEM select MDIO_CAVIUM help This module provides a driver for the Octeon and ThunderX MDIO diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 839acb292c38..ba07c27e4208 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -22,6 +22,7 @@ libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o obj-$(CONFIG_PHYLINK) += phylink.o obj-$(CONFIG_PHYLIB) += libphy.o +obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o diff --git a/drivers/net/phy/mdio-aspeed.c b/drivers/net/phy/mdio-aspeed.c new file mode 100644 index 000000000000..cad820568f75 --- /dev/null +++ b/drivers/net/phy/mdio-aspeed.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (C) 2019 IBM Corp. */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/platform_device.h> + +#define DRV_NAME "mdio-aspeed" + +#define ASPEED_MDIO_CTRL 0x0 +#define ASPEED_MDIO_CTRL_FIRE BIT(31) +#define ASPEED_MDIO_CTRL_ST BIT(28) +#define ASPEED_MDIO_CTRL_ST_C45 0 +#define ASPEED_MDIO_CTRL_ST_C22 1 +#define ASPEED_MDIO_CTRL_OP GENMASK(27, 26) +#define MDIO_C22_OP_WRITE 0b01 +#define MDIO_C22_OP_READ 0b10 +#define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21) +#define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16) +#define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0) + +#define ASPEED_MDIO_DATA 0x4 +#define ASPEED_MDIO_DATA_MDC_THRES GENMASK(31, 24) +#define ASPEED_MDIO_DATA_MDIO_EDGE BIT(23) +#define ASPEED_MDIO_DATA_MDIO_LATCH GENMASK(22, 20) +#define ASPEED_MDIO_DATA_IDLE BIT(16) +#define ASPEED_MDIO_DATA_MIIRDATA GENMASK(15, 0) + +#define ASPEED_MDIO_INTERVAL_US 100 +#define ASPEED_MDIO_TIMEOUT_US (ASPEED_MDIO_INTERVAL_US * 10) + +struct aspeed_mdio { + void __iomem *base; +}; + +static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct aspeed_mdio *ctx = bus->priv; + u32 ctrl; + u32 data; + int rc; + + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, + regnum); + + /* Just clause 22 for the moment */ + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + ctrl = ASPEED_MDIO_CTRL_FIRE + | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22) + | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_READ) + | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr) + | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum); + + iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); + + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, + data & ASPEED_MDIO_DATA_IDLE, + ASPEED_MDIO_INTERVAL_US, + ASPEED_MDIO_TIMEOUT_US); + if (rc < 0) + return rc; + + return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data); +} + +static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + struct aspeed_mdio *ctx = bus->priv; + u32 ctrl; + + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n", + __func__, addr, regnum, val); + + /* Just clause 22 for the moment */ + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + ctrl = ASPEED_MDIO_CTRL_FIRE + | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22) + | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_WRITE) + | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr) + | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum) + | FIELD_PREP(ASPEED_MDIO_CTRL_MIIWDATA, val); + + iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); + + return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, + !(ctrl & ASPEED_MDIO_CTRL_FIRE), + ASPEED_MDIO_INTERVAL_US, + ASPEED_MDIO_TIMEOUT_US); +} + +static int aspeed_mdio_probe(struct platform_device *pdev) +{ + struct aspeed_mdio *ctx; + struct mii_bus *bus; + int rc; + + bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*ctx)); + if (!bus) + return -ENOMEM; + + ctx = bus->priv; + ctx->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctx->base)) + return PTR_ERR(ctx->base); + + bus->name = DRV_NAME; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id); + bus->parent = &pdev->dev; + bus->read = aspeed_mdio_read; + bus->write = aspeed_mdio_write; + + rc = of_mdiobus_register(bus, pdev->dev.of_node); + if (rc) { + dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); + return rc; + } + + platform_set_drvdata(pdev, bus); + + return 0; +} + +static int aspeed_mdio_remove(struct platform_device *pdev) +{ + mdiobus_unregister(platform_get_drvdata(pdev)); + + return 0; +} + +static const struct of_device_id aspeed_mdio_of_match[] = { + { .compatible = "aspeed,ast2600-mdio", }, + { }, +}; + +static struct platform_driver aspeed_mdio_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = aspeed_mdio_of_match, + }, + .probe = aspeed_mdio_probe, + .remove = aspeed_mdio_remove, +}; + +module_platform_driver(aspeed_mdio_driver); + +MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h index ed5f9bb5448d..b7f89ad27465 100644 --- a/drivers/net/phy/mdio-cavium.h +++ b/drivers/net/phy/mdio-cavium.h @@ -108,6 +108,8 @@ static inline u64 oct_mdio_readq(u64 addr) return cvmx_read_csr(addr); } #else +#include <linux/io-64-nonatomic-lo-hi.h> + #define oct_mdio_writeq(val, addr) writeq(val, (void *)addr) #define oct_mdio_readq(addr) readq((void *)addr) #endif diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index 717cc2a056e8..34990eaa3298 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c @@ -328,7 +328,6 @@ static int xgene_mdio_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct mii_bus *mdio_bus; const struct of_device_id *of_id; - struct resource *res; struct xgene_mdio_pdata *pdata; void __iomem *csr_base; int mdio_id = 0, ret = 0; @@ -355,8 +354,7 @@ static int xgene_mdio_probe(struct platform_device *pdev) pdata->mdio_id = mdio_id; pdata->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - csr_base = devm_ioremap_resource(dev, res); + csr_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(csr_base)) return PTR_ERR(csr_base); pdata->mac_csr_addr = csr_base; diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index fcf31335a8b6..dacb4f680fd4 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1005,7 +1005,7 @@ static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; *len = skb_frag_size(frag); - return kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index b39ee714fb01..e39f41efda3e 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -221,6 +221,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, int tailroom = skb_tailroom(skb); u32 packet_len; u32 padbytes = 0xffff0000; + void *ptr; padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4; @@ -256,13 +257,11 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, } packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len; - skb_push(skb, 4); - cpu_to_le32s(&packet_len); - skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); + ptr = skb_push(skb, 4); + put_unaligned_le32(packet_len, ptr); if (padlen) { - cpu_to_le32s(&padbytes); - memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); + put_unaligned_le32(padbytes, skb_tail_pointer(skb)); skb_put(skb, sizeof(padbytes)); } diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 0bc457ba8574..daa54486ab09 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1366,8 +1366,7 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) return 0; skb_trim(skb, skb->len - 4); - memcpy(&rx_hdr, skb_tail_pointer(skb), 4); - le32_to_cpus(&rx_hdr); + rx_hdr = get_unaligned_le32(skb_tail_pointer(skb)); pkt_cnt = (u16)rx_hdr; hdr_off = (u16)(rx_hdr >> 16); @@ -1422,6 +1421,7 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) int frame_size = dev->maxpacket; int mss = skb_shinfo(skb)->gso_size; int headroom; + void *ptr; tx_hdr1 = skb->len; tx_hdr2 = mss; @@ -1436,13 +1436,9 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) return NULL; } - skb_push(skb, 4); - cpu_to_le32s(&tx_hdr2); - skb_copy_to_linear_data(skb, &tx_hdr2, 4); - - skb_push(skb, 4); - cpu_to_le32s(&tx_hdr1); - skb_copy_to_linear_data(skb, &tx_hdr1, 4); + ptr = skb_push(skb, 8); + put_unaligned_le32(tx_hdr1, ptr); + put_unaligned_le32(tx_hdr2, ptr + 4); return skb; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 3d92ea6fcc02..769bb262fbec 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1258,8 +1258,7 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) return; } - memcpy(&intdata, urb->transfer_buffer, 4); - le32_to_cpus(&intdata); + intdata = get_unaligned_le32(urb->transfer_buffer); if (intdata & INT_ENP_PHY_INT) { netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); @@ -2730,6 +2729,7 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags) { u32 tx_cmd_a, tx_cmd_b; + void *ptr; if (skb_cow_head(skb, TX_OVERHEAD)) { dev_kfree_skb_any(skb); @@ -2758,13 +2758,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_; } - skb_push(skb, 4); - cpu_to_le32s(&tx_cmd_b); - memcpy(skb->data, &tx_cmd_b, 4); - - skb_push(skb, 4); - cpu_to_le32s(&tx_cmd_a); - memcpy(skb->data, &tx_cmd_a, 4); + ptr = skb_push(skb, 8); + put_unaligned_le32(tx_cmd_a, ptr); + put_unaligned_le32(tx_cmd_b, ptr + 4); return skb; } @@ -3105,16 +3101,13 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) struct sk_buff *skb2; unsigned char *packet; - memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); - le32_to_cpus(&rx_cmd_a); + rx_cmd_a = get_unaligned_le32(skb->data); skb_pull(skb, sizeof(rx_cmd_a)); - memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); - le32_to_cpus(&rx_cmd_b); + rx_cmd_b = get_unaligned_le32(skb->data); skb_pull(skb, sizeof(rx_cmd_b)); - memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c)); - le16_to_cpus(&rx_cmd_c); + rx_cmd_c = get_unaligned_le16(skb->data); skb_pull(skb, sizeof(rx_cmd_c)); packet = skb->data; diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 1417a22962a1..9556d431885f 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -661,8 +661,7 @@ static void smsc75xx_status(struct usbnet *dev, struct urb *urb) return; } - memcpy(&intdata, urb->transfer_buffer, 4); - le32_to_cpus(&intdata); + intdata = get_unaligned_le32(urb->transfer_buffer); netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata); @@ -2181,12 +2180,10 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) struct sk_buff *ax_skb; unsigned char *packet; - memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); - le32_to_cpus(&rx_cmd_a); + rx_cmd_a = get_unaligned_le32(skb->data); skb_pull(skb, 4); - memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); - le32_to_cpus(&rx_cmd_b); + rx_cmd_b = get_unaligned_le32(skb->data); skb_pull(skb, 4 + RXW_PADDING); packet = skb->data; @@ -2258,6 +2255,7 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { u32 tx_cmd_a, tx_cmd_b; + void *ptr; if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) { dev_kfree_skb_any(skb); @@ -2278,13 +2276,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, tx_cmd_b = 0; } - skb_push(skb, 4); - cpu_to_le32s(&tx_cmd_b); - memcpy(skb->data, &tx_cmd_b, 4); - - skb_push(skb, 4); - cpu_to_le32s(&tx_cmd_a); - memcpy(skb->data, &tx_cmd_a, 4); + ptr = skb_push(skb, 8); + put_unaligned_le32(tx_cmd_a, ptr); + put_unaligned_le32(tx_cmd_b, ptr + 4); return skb; } diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 35f39f23d881..c5d4a0060124 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -115,6 +115,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, u32 padbytes = 0xffff0000; u32 packet_len; int padlen; + void *ptr; padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4; @@ -133,14 +134,12 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, return NULL; } - skb_push(skb, 4); + ptr = skb_push(skb, 4); packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4); - cpu_to_le32s(&packet_len); - skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); + put_unaligned_le32(packet_len, ptr); if (padlen) { - cpu_to_le32s(&padbytes); - memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); + put_unaligned_le32(padbytes, skb_tail_pointer(skb)); skb_put(skb, sizeof(padbytes)); } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 72514c46b478..58952a79b05f 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1324,11 +1324,11 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) total_len += skb_headlen(skb); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i]; + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; total_len += skb_frag_size(f); - sg_set_page(&urb->sg[i + s], f->page.p, f->size, - f->page_offset); + sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f), + skb_frag_off(f)); } urb->transfer_buffer_length = total_len; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 2a1918f25e47..216acf37ca7c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -657,13 +657,12 @@ static void vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, struct vmxnet3_rx_buf_info *rbi) { - struct skb_frag_struct *frag = skb_shinfo(skb)->frags + - skb_shinfo(skb)->nr_frags; + skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); __skb_frag_set_page(frag, rbi->page); - frag->page_offset = 0; + skb_frag_off_set(frag, 0); skb_frag_size_set(frag, rcd->len); skb->data_len += rcd->len; skb->truesize += PAGE_SIZE; @@ -755,7 +754,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; u32 buf_size; buf_offset = 0; @@ -956,7 +955,7 @@ static int txd_estimate(const struct sk_buff *skb) int i; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); } diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 0606416dc971..12dad659bf68 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -6970,7 +6970,8 @@ exit: return ret; } -static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) +static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index f23cb2f3d296..34121fbf32e3 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2392,7 +2392,8 @@ out: return ret; } -static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw) +static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 74834131cf7c..fd3b2b3d1b5c 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1052,8 +1052,7 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb) if (nr_frags) { seq_printf(s, " nr_frags = %d\n", nr_frags); for (i = 0; i < nr_frags; i++) { - const struct skb_frag_struct *frag = - &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); p = skb_frag_address_safe(frag); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index eae00aafaa88..8b01ef8269da 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1657,7 +1657,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, len); } else { frag = &skb_shinfo(skb)->frags[f]; - len = frag->size; + len = skb_frag_size(frag); wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len); } @@ -1678,8 +1678,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, if (!headlen) { pa = skb_frag_dma_map(dev, frag, - frag->size - len, lenmss, - DMA_TO_DEVICE); + skb_frag_size(frag) - len, + lenmss, DMA_TO_DEVICE); vring->ctx[i].mapped_as = wil_mapped_as_page; } else { pa = dma_map_single(dev, @@ -1900,8 +1900,7 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif, /* middle segments */ for (; f < nr_frags; f++) { - const struct skb_frag_struct *frag = - &skb_shinfo(skb)->frags[f]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; int len = skb_frag_size(frag); *_d = *d; diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index dc040cd4ab06..71b7ad4b6454 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -1471,7 +1471,7 @@ static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil, /* Rest of the descriptors are from the SKB fragments */ for (f = 0; f < nr_frags; f++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; - int len = frag->size; + int len = skb_frag_size(frag); wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f, len, descs_used); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index b1e5d64ca60d..74229fcb63a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -3256,28 +3256,16 @@ static void rs_add_debugfs(void *priv, void *priv_sta, struct dentry *dir) { struct iwl_lq_sta *lq_sta = priv_sta; - lq_sta->rs_sta_dbgfs_scale_table_file = - debugfs_create_file("rate_scale_table", 0600, dir, - lq_sta, &rs_sta_dbgfs_scale_table_ops); - lq_sta->rs_sta_dbgfs_stats_table_file = - debugfs_create_file("rate_stats_table", 0400, dir, - lq_sta, &rs_sta_dbgfs_stats_table_ops); - lq_sta->rs_sta_dbgfs_rate_scale_data_file = - debugfs_create_file("rate_scale_data", 0400, dir, - lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); - lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = - debugfs_create_u8("tx_agg_tid_enable", 0600, dir, - &lq_sta->tx_agg_tid_en); -} + debugfs_create_file("rate_scale_table", 0600, dir, lq_sta, + &rs_sta_dbgfs_scale_table_ops); + debugfs_create_file("rate_stats_table", 0400, dir, lq_sta, + &rs_sta_dbgfs_stats_table_ops); + debugfs_create_file("rate_scale_data", 0400, dir, lq_sta, + &rs_sta_dbgfs_rate_scale_data_ops); + debugfs_create_u8("tx_agg_tid_enable", 0600, dir, + &lq_sta->tx_agg_tid_en); -static void rs_remove_debugfs(void *priv, void *priv_sta) -{ - struct iwl_lq_sta *lq_sta = priv_sta; - debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); } #endif @@ -3303,7 +3291,6 @@ static const struct rate_control_ops rs_ops = { .free_sta = rs_free_sta, #ifdef CONFIG_MAC80211_DEBUGFS .add_sta_debugfs = rs_add_debugfs, - .remove_sta_debugfs = rs_remove_debugfs, #endif }; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h index b7a1854cd202..68a840d739e8 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h @@ -356,10 +356,6 @@ struct iwl_lq_sta { struct iwl_traffic_load load[IWL_MAX_TID_COUNT]; u8 tx_agg_tid_en; #ifdef CONFIG_MAC80211_DEBUGFS - struct dentry *rs_sta_dbgfs_scale_table_file; - struct dentry *rs_sta_dbgfs_stats_table_file; - struct dentry *rs_sta_dbgfs_rate_scale_data_file; - struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; u32 dbg_fixed_rate; #endif struct iwl_priv *drv; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 1c904b5226aa..b74bd58f3f45 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4035,7 +4035,8 @@ out_unlock: return ret; } -static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) +static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index d3f04acfbacb..e4415e58fa78 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -4127,10 +4127,6 @@ static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta, MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600); } - -void rs_remove_sta_debugfs(void *mvm, void *mvm_sta) -{ -} #endif /* @@ -4158,7 +4154,6 @@ static const struct rate_control_ops rs_mvm_ops_drv = { .rate_update = rs_drv_rate_update, #ifdef CONFIG_MAC80211_DEBUGFS .add_sta_debugfs = rs_drv_add_sta_debugfs, - .remove_sta_debugfs = rs_remove_sta_debugfs, #endif .capa = RATE_CTRL_CAPA_VHT_EXT_NSS_BW, }; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 772e54f0696f..f86c2891310a 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2216,7 +2216,8 @@ static int mac80211_hwsim_roc(struct ieee80211_hw *hw, return 0; } -static int mac80211_hwsim_croc(struct ieee80211_hw *hw) +static int mac80211_hwsim_croc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct mac80211_hwsim_data *hwsim = hw->priv; @@ -2594,7 +2595,7 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz = { }, }; -static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband) +static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband) { if (sband->band == NL80211_BAND_2GHZ) sband->iftype_data = @@ -2805,12 +2806,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, TDLS_WIDER_BW); - - /* We only have SW crypto and only implement the A-MPDU API - * (but don't really build A-MPDUs) so can have extended key - * support - */ - ieee80211_hw_set(hw, EXT_KEY_ID_NATIVE); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); @@ -2897,7 +2892,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, sband->ht_cap.mcs.rx_mask[1] = 0xff; sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - mac80211_hswim_he_capab(sband); + mac80211_hwsim_he_capab(sband); hw->wiphy->bands[band] = sband; } @@ -3233,6 +3228,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, { struct mac80211_hwsim_data *data2; struct ieee80211_rx_status rx_status; + struct ieee80211_hdr *hdr; const u8 *dst; int frame_data_len; void *frame_data; @@ -3299,6 +3295,12 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); + hdr = (void *)skb->data; + + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) + rx_status.boottime_ns = ktime_get_boottime_ns(); + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); data2->rx_pkts++; data2->rx_bytes += skb->len; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c index b920be1f5718..c6c1ce69bcbc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c @@ -17,10 +17,8 @@ mt76_wmac_probe(struct platform_device *pdev) int ret; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "Failed to get device IRQ\n"); + if (irq < 0) return irq; - } mem_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mem_base)) { diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 49df3bb08d41..ce5e92d82efc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1818,7 +1818,8 @@ out: return status; } -static int rsi_mac80211_cancel_roc(struct ieee80211_hw *hw) +static int rsi_mac80211_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index b74dc8bc9755..547ad538d8b6 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5749,7 +5749,8 @@ static void wlcore_roc_complete_work(struct work_struct *work) ieee80211_remain_on_channel_expired(wl->hw); } -static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw) +static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1d9940d4e8c7..3ef07b63613e 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -136,12 +136,12 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) static u16 frag_get_pending_idx(skb_frag_t *frag) { - return (u16)frag->page_offset; + return (u16)skb_frag_off(frag); } static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) { - frag->page_offset = pending_idx; + skb_frag_off_set(frag, pending_idx); } static inline pending_ring_idx_t pending_index(unsigned i) @@ -1055,7 +1055,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s int j; skb->truesize += skb->data_len; for (j = 0; j < i; j++) - put_page(frags[j].page.p); + put_page(skb_frag_page(&frags[j])); return -ENOMEM; } @@ -1067,8 +1067,8 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s BUG(); offset += len; - frags[i].page.p = page; - frags[i].page_offset = 0; + __skb_frag_set_page(&frags[i], page); + skb_frag_off_set(&frags[i], 0); skb_frag_size_set(&frags[i], len); } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8d33970a2950..b930d5f95222 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -531,7 +531,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb) for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned long size = skb_frag_size(frag); - unsigned long offset = frag->page_offset; + unsigned long offset = skb_frag_off(frag); /* Skip unused frames from start of page */ offset &= ~PAGE_MASK; @@ -674,8 +674,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* Requests for all the frags. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - tx = xennet_make_txreqs(queue, tx, skb, - skb_frag_page(frag), frag->page_offset, + tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag), + skb_frag_off(frag), skb_frag_size(frag)); } @@ -1040,7 +1040,7 @@ err: if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; - skb_shinfo(skb)->frags[0].page_offset = rx->offset; + skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset); skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); skb->data_len = rx->status; skb->len += rx->status; diff --git a/drivers/nfc/nxp-nci/Kconfig b/drivers/nfc/nxp-nci/Kconfig index 12df2c8cc51d..e1f71deab6fc 100644 --- a/drivers/nfc/nxp-nci/Kconfig +++ b/drivers/nfc/nxp-nci/Kconfig @@ -2,10 +2,9 @@ config NFC_NXP_NCI tristate "NXP-NCI NFC driver" depends on NFC_NCI - default n ---help--- - Generic core driver for NXP NCI chips such as the NPC100 - or PN7150 families. + Generic core driver for NXP NCI chips such as the NPC100 (PN547), + NPC300 (PN548) or PN7150 families. This is a driver based on the NCI NFC kernel layers and will thus not work with NXP libnfc library. @@ -23,4 +22,4 @@ config NFC_NXP_NCI_I2C To compile this driver as a module, choose m here. The module will be called nxp_nci_i2c. - Say Y if unsure. + Say N if unsure. diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c index 8dafc696719f..a0ce95a287c5 100644 --- a/drivers/nfc/nxp-nci/core.c +++ b/drivers/nfc/nxp-nci/core.c @@ -11,10 +11,8 @@ */ #include <linux/delay.h> -#include <linux/gpio.h> #include <linux/module.h> #include <linux/nfc.h> -#include <linux/platform_data/nxp-nci.h> #include <net/nfc/nci_core.h> diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 4aeb3861b409..307bd2afbe05 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -12,8 +12,6 @@ * Copyright (C) 2012 Intel Corporation. All rights reserved. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/acpi.h> #include <linux/delay.h> #include <linux/i2c.h> @@ -21,9 +19,6 @@ #include <linux/module.h> #include <linux/nfc.h> #include <linux/gpio/consumer.h> -#include <linux/of_gpio.h> -#include <linux/of_irq.h> -#include <linux/platform_data/nxp-nci.h> #include <asm/unaligned.h> #include <net/nfc/nfc.h> @@ -38,8 +33,8 @@ struct nxp_nci_i2c_phy { struct i2c_client *i2c_dev; struct nci_dev *ndev; - unsigned int gpio_en; - unsigned int gpio_fw; + struct gpio_desc *gpiod_en; + struct gpio_desc *gpiod_fw; int hard_fault; /* * < 0 if hardware error occurred (e.g. i2c err) @@ -52,8 +47,8 @@ static int nxp_nci_i2c_set_mode(void *phy_id, { struct nxp_nci_i2c_phy *phy = (struct nxp_nci_i2c_phy *) phy_id; - gpio_set_value(phy->gpio_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0); - gpio_set_value(phy->gpio_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0); + gpiod_set_value(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0); + gpiod_set_value(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0); usleep_range(10000, 15000); if (mode == NXP_NCI_MODE_COLD) @@ -250,116 +245,55 @@ exit_irq_none: return IRQ_NONE; } -static int nxp_nci_i2c_parse_devtree(struct i2c_client *client) -{ - struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client); - struct device_node *pp; - int r; - - pp = client->dev.of_node; - if (!pp) - return -ENODEV; - - r = of_get_named_gpio(pp, "enable-gpios", 0); - if (r == -EPROBE_DEFER) - r = of_get_named_gpio(pp, "enable-gpios", 0); - if (r < 0) { - nfc_err(&client->dev, "Failed to get EN gpio, error: %d\n", r); - return r; - } - phy->gpio_en = r; - - r = of_get_named_gpio(pp, "firmware-gpios", 0); - if (r == -EPROBE_DEFER) - r = of_get_named_gpio(pp, "firmware-gpios", 0); - if (r < 0) { - nfc_err(&client->dev, "Failed to get FW gpio, error: %d\n", r); - return r; - } - phy->gpio_fw = r; - - return 0; -} - -static int nxp_nci_i2c_acpi_config(struct nxp_nci_i2c_phy *phy) -{ - struct i2c_client *client = phy->i2c_dev; - struct gpio_desc *gpiod_en, *gpiod_fw; +static const struct acpi_gpio_params firmware_gpios = { 1, 0, false }; +static const struct acpi_gpio_params enable_gpios = { 2, 0, false }; - gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2, GPIOD_OUT_LOW); - gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1, GPIOD_OUT_LOW); - - if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw)) { - nfc_err(&client->dev, "No GPIOs\n"); - return -EINVAL; - } - - phy->gpio_en = desc_to_gpio(gpiod_en); - phy->gpio_fw = desc_to_gpio(gpiod_fw); - - return 0; -} +static const struct acpi_gpio_mapping acpi_nxp_nci_gpios[] = { + { "enable-gpios", &enable_gpios, 1 }, + { "firmware-gpios", &firmware_gpios, 1 }, + { } +}; static int nxp_nci_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { + struct device *dev = &client->dev; struct nxp_nci_i2c_phy *phy; - struct nxp_nci_nfc_platform_data *pdata; int r; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(&client->dev, "Need I2C_FUNC_I2C\n"); - r = -ENODEV; - goto probe_exit; + return -ENODEV; } phy = devm_kzalloc(&client->dev, sizeof(struct nxp_nci_i2c_phy), GFP_KERNEL); - if (!phy) { - r = -ENOMEM; - goto probe_exit; - } + if (!phy) + return -ENOMEM; phy->i2c_dev = client; i2c_set_clientdata(client, phy); - pdata = client->dev.platform_data; - - if (!pdata && client->dev.of_node) { - r = nxp_nci_i2c_parse_devtree(client); - if (r < 0) { - nfc_err(&client->dev, "Failed to get DT data\n"); - goto probe_exit; - } - } else if (pdata) { - phy->gpio_en = pdata->gpio_en; - phy->gpio_fw = pdata->gpio_fw; - } else if (ACPI_HANDLE(&client->dev)) { - r = nxp_nci_i2c_acpi_config(phy); - if (r < 0) - goto probe_exit; - goto nci_probe; - } else { - nfc_err(&client->dev, "No platform data\n"); - r = -EINVAL; - goto probe_exit; - } + r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios); + if (r) + return r; - r = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_en, - GPIOF_OUT_INIT_LOW, "nxp_nci_en"); - if (r < 0) - goto probe_exit; + phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(phy->gpiod_en)) { + nfc_err(dev, "Failed to get EN gpio\n"); + return PTR_ERR(phy->gpiod_en); + } - r = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_fw, - GPIOF_OUT_INIT_LOW, "nxp_nci_fw"); - if (r < 0) - goto probe_exit; + phy->gpiod_fw = devm_gpiod_get(dev, "firmware", GPIOD_OUT_LOW); + if (IS_ERR(phy->gpiod_fw)) { + nfc_err(dev, "Failed to get FW gpio\n"); + return PTR_ERR(phy->gpiod_fw); + } -nci_probe: r = nxp_nci_probe(phy, &client->dev, &i2c_phy_ops, NXP_NCI_I2C_MAX_PAYLOAD, &phy->ndev); if (r < 0) - goto probe_exit; + return r; r = request_threaded_irq(client->irq, NULL, nxp_nci_i2c_irq_thread_fn, @@ -368,7 +302,6 @@ nci_probe: if (r < 0) nfc_err(&client->dev, "Unable to register IRQ handler\n"); -probe_exit: return r; } @@ -390,14 +323,15 @@ MODULE_DEVICE_TABLE(i2c, nxp_nci_i2c_id_table); static const struct of_device_id of_nxp_nci_i2c_match[] = { { .compatible = "nxp,nxp-nci-i2c", }, - {}, + {} }; MODULE_DEVICE_TABLE(of, of_nxp_nci_i2c_match); #ifdef CONFIG_ACPI -static struct acpi_device_id acpi_id[] = { +static const struct acpi_device_id acpi_id[] = { + { "NXP1001" }, { "NXP7471" }, - { }, + { } }; MODULE_DEVICE_TABLE(acpi, acpi_id); #endif @@ -406,7 +340,7 @@ static struct i2c_driver nxp_nci_i2c_driver = { .driver = { .name = NXP_NCI_I2C_DRIVER_NAME, .acpi_match_table = ACPI_PTR(acpi_id), - .of_match_table = of_match_ptr(of_nxp_nci_i2c_match), + .of_match_table = of_nxp_nci_i2c_match, }, .probe = nxp_nci_i2c_probe, .id_table = nxp_nci_i2c_id_table, diff --git a/drivers/nfc/nxp-nci/nxp-nci.h b/drivers/nfc/nxp-nci/nxp-nci.h index 6fe7c45544bf..ae3fb2735a4e 100644 --- a/drivers/nfc/nxp-nci/nxp-nci.h +++ b/drivers/nfc/nxp-nci/nxp-nci.h @@ -14,7 +14,6 @@ #include <linux/completion.h> #include <linux/firmware.h> #include <linux/nfc.h> -#include <linux/platform_data/nxp-nci.h> #include <net/nfc/nci_core.h> diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c index 5b6393e3ea27..0dcfdc806f57 100644 --- a/drivers/ptp/ptp_dte.c +++ b/drivers/ptp/ptp_dte.c @@ -248,11 +248,8 @@ static int ptp_dte_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ptp_dte->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(ptp_dte->regs)) { - dev_err(dev, - "%s: io remap failed\n", __func__); + if (IS_ERR(ptp_dte->regs)) return PTR_ERR(ptp_dte->regs); - } spin_lock_init(&ptp_dte->lock); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 4d0caeebc802..5aa0f1268bca 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3515,7 +3515,7 @@ static int qeth_get_elements_for_frags(struct sk_buff *skb) int cnt, elements = 0; for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; elements += qeth_get_elements_for_range( (addr_t)skb_frag_address(frag), diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 7796799bf04a..9ff9429395eb 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -346,7 +346,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 00dd47bcbb1e..587d4bbb7d22 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1522,8 +1522,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(skb_frag_page(frag)) - + frag->page_offset; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index ba4603d76284..a20ddc301c89 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(fcoe_get_wwn); u32 fcoe_fc_crc(struct fc_frame *fp) { struct sk_buff *skb = fp_skb(fp); - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned char *data; unsigned long off, len, clen; u32 crc; @@ -318,7 +318,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; - off = frag->page_offset; + off = skb_frag_off(frag); len = skb_frag_size(frag); while (len > 0) { clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index a42babde036d..42542720962f 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1077,7 +1077,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 7c96a01eef6c..0b8a614be11e 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -120,4 +120,6 @@ source "drivers/staging/kpc2000/Kconfig" source "drivers/staging/isdn/Kconfig" +source "drivers/staging/qlge/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index fcaac9693b83..741152511a10 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -50,3 +50,4 @@ obj-$(CONFIG_EROFS_FS) += erofs/ obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/ obj-$(CONFIG_KPC2000) += kpc2000/ obj-$(CONFIG_ISDN_CAPI) += isdn/ +obj-$(CONFIG_QLGE) += qlge/ diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig index 1e3012b9991c..5319909eb2f6 100644 --- a/drivers/staging/octeon/Kconfig +++ b/drivers/staging/octeon/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 config OCTEON_ETHERNET tristate "Cavium Networks Octeon Ethernet support" - depends on CAVIUM_OCTEON_SOC && NETDEVICES + depends on CAVIUM_OCTEON_SOC || COMPILE_TEST + depends on NETDEVICES select PHYLIB select MDIO_OCTEON help diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h index 1e114422993a..ef9e767b0e2e 100644 --- a/drivers/staging/octeon/ethernet-defines.h +++ b/drivers/staging/octeon/ethernet-defines.h @@ -21,8 +21,6 @@ #ifndef __ETHERNET_DEFINES_H__ #define __ETHERNET_DEFINES_H__ -#include <asm/octeon/cvmx-config.h> - #ifdef CONFIG_NETFILTER #define REUSE_SKBUFFS_WITHOUT_FREE 0 #else diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index 2aee64fdaec5..ffac0c4b3f5c 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -13,15 +13,11 @@ #include <generated/utsrelease.h> #include <net/dst.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-mdio.h" #include "ethernet-util.h" -#include <asm/octeon/cvmx-gmxx-defs.h> - static void cvm_oct_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c index 0d26c4a93ec1..532594957ebc 100644 --- a/drivers/staging/octeon/ethernet-mem.c +++ b/drivers/staging/octeon/ethernet-mem.c @@ -9,13 +9,10 @@ #include <linux/netdevice.h> #include <linux/slab.h> -#include <asm/octeon/octeon.h> - +#include "octeon-ethernet.h" #include "ethernet-mem.h" #include "ethernet-defines.h" -#include <asm/octeon/cvmx-fpa.h> - /** * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs * @pool: Pool to allocate an skbuff for diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c index c15376d33891..d91fd5ce9e68 100644 --- a/drivers/staging/octeon/ethernet-rgmii.c +++ b/drivers/staging/octeon/ethernet-rgmii.c @@ -12,19 +12,11 @@ #include <linux/ratelimit.h> #include <net/dst.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-util.h" #include "ethernet-mdio.h" -#include <asm/octeon/cvmx-helper.h> - -#include <asm/octeon/cvmx-ipd-defs.h> -#include <asm/octeon/cvmx-npi-defs.h> -#include <asm/octeon/cvmx-gmxx-defs.h> - static DEFINE_SPINLOCK(global_register_lock); static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable) diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 5e271245273c..0e65955c746b 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -23,23 +23,12 @@ #include <net/xfrm.h> #endif /* CONFIG_XFRM */ -#include <asm/octeon/octeon.h> - +#include "octeon-ethernet.h" #include "ethernet-defines.h" #include "ethernet-mem.h" #include "ethernet-rx.h" -#include "octeon-ethernet.h" #include "ethernet-util.h" -#include <asm/octeon/cvmx-helper.h> -#include <asm/octeon/cvmx-wqe.h> -#include <asm/octeon/cvmx-fau.h> -#include <asm/octeon/cvmx-pow.h> -#include <asm/octeon/cvmx-pip.h> -#include <asm/octeon/cvmx-scratch.h> - -#include <asm/octeon/cvmx-gmxx-defs.h> - static atomic_t oct_rx_ready = ATOMIC_INIT(0); static struct oct_rx_group { diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h index 096553d8fc99..ff6482fa20d6 100644 --- a/drivers/staging/octeon/ethernet-rx.h +++ b/drivers/staging/octeon/ethernet-rx.h @@ -5,8 +5,6 @@ * Copyright (c) 2003-2007 Cavium Networks */ -#include <asm/octeon/cvmx-fau.h> - void cvm_oct_poll_controller(struct net_device *dev); void cvm_oct_rx_initialize(void); void cvm_oct_rx_shutdown(void); diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c index a4a8f094e2b4..d7fbd9159302 100644 --- a/drivers/staging/octeon/ethernet-sgmii.c +++ b/drivers/staging/octeon/ethernet-sgmii.c @@ -11,17 +11,11 @@ #include <linux/ratelimit.h> #include <net/dst.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-util.h" #include "ethernet-mdio.h" -#include <asm/octeon/cvmx-helper.h> - -#include <asm/octeon/cvmx-gmxx-defs.h> - int cvm_oct_sgmii_open(struct net_device *dev) { return cvm_oct_common_open(dev, cvm_oct_link_poll); diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c index 01efdf2a2c20..c582403e6a1f 100644 --- a/drivers/staging/octeon/ethernet-spi.c +++ b/drivers/staging/octeon/ethernet-spi.c @@ -10,18 +10,10 @@ #include <linux/interrupt.h> #include <net/dst.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-util.h" -#include <asm/octeon/cvmx-spi.h> - -#include <asm/octeon/cvmx-npi-defs.h> -#include <asm/octeon/cvmx-spxx-defs.h> -#include <asm/octeon/cvmx-stxx-defs.h> - static int number_spi_ports; static int need_retrain[2] = { 0, 0 }; diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 20f513fbaa85..c64728fc21f2 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -22,21 +22,11 @@ #include <linux/atomic.h> #include <net/sch_generic.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-tx.h" #include "ethernet-util.h" -#include <asm/octeon/cvmx-wqe.h> -#include <asm/octeon/cvmx-fau.h> -#include <asm/octeon/cvmx-pip.h> -#include <asm/octeon/cvmx-pko.h> -#include <asm/octeon/cvmx-helper.h> - -#include <asm/octeon/cvmx-gmxx-defs.h> - #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb)) /* @@ -280,12 +270,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) hw_buffer.s.size = skb_headlen(skb); CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; + skb_frag_t *fs = skb_shinfo(skb)->frags + i; hw_buffer.s.addr = - XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + - fs->page_offset)); - hw_buffer.s.size = fs->size; + XKPHYS_TO_PHYS((u64)skb_frag_address(fs)); + hw_buffer.s.size = skb_frag_size(fs); CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; } hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h index 31a82873e15c..2af83a12ca78 100644 --- a/drivers/staging/octeon/ethernet-util.h +++ b/drivers/staging/octeon/ethernet-util.h @@ -5,10 +5,6 @@ * Copyright (c) 2003-2007 Cavium Networks */ -#include <asm/octeon/cvmx-pip.h> -#include <asm/octeon/cvmx-helper.h> -#include <asm/octeon/cvmx-helper-util.h> - /** * cvm_oct_get_buffer_ptr - convert packet data address to pointer * @packet_ptr: Packet data hardware address diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 8847a11c212f..8889494adf1f 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -19,24 +19,14 @@ #include <net/dst.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-mem.h" #include "ethernet-rx.h" #include "ethernet-tx.h" #include "ethernet-mdio.h" #include "ethernet-util.h" -#include <asm/octeon/cvmx-pip.h> -#include <asm/octeon/cvmx-pko.h> -#include <asm/octeon/cvmx-fau.h> -#include <asm/octeon/cvmx-ipd.h> -#include <asm/octeon/cvmx-helper.h> -#include <asm/octeon/cvmx-asxx-defs.h> -#include <asm/octeon/cvmx-gmxx-defs.h> - #define OCTEON_MAX_MTU 65392 static int num_packet_buffers = 1024; diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index be570d33685a..a8a864b40913 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -13,7 +13,34 @@ #include <linux/of.h> #include <linux/phy.h> -#include <asm/octeon/cvmx-helper-board.h> + +#ifdef CONFIG_MIPS + +#include <asm/octeon/octeon.h> + +#include <asm/octeon/cvmx-asxx-defs.h> +#include <asm/octeon/cvmx-config.h> +#include <asm/octeon/cvmx-fau.h> +#include <asm/octeon/cvmx-gmxx-defs.h> +#include <asm/octeon/cvmx-helper.h> +#include <asm/octeon/cvmx-helper-util.h> +#include <asm/octeon/cvmx-ipd.h> +#include <asm/octeon/cvmx-ipd-defs.h> +#include <asm/octeon/cvmx-npi-defs.h> +#include <asm/octeon/cvmx-pip.h> +#include <asm/octeon/cvmx-pko.h> +#include <asm/octeon/cvmx-pow.h> +#include <asm/octeon/cvmx-scratch.h> +#include <asm/octeon/cvmx-spi.h> +#include <asm/octeon/cvmx-spxx-defs.h> +#include <asm/octeon/cvmx-stxx-defs.h> +#include <asm/octeon/cvmx-wqe.h> + +#else + +#include "octeon-stubs.h" + +#endif /** * This is the definition of the Ethernet driver's private diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h new file mode 100644 index 000000000000..a4ac3bfb62a8 --- /dev/null +++ b/drivers/staging/octeon/octeon-stubs.h @@ -0,0 +1,1429 @@ +#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512 +#define XKPHYS_TO_PHYS(p) (p) + +#define OCTEON_IRQ_WORKQ0 0 +#define OCTEON_IRQ_RML 0 +#define OCTEON_IRQ_TIMER1 0 +#define OCTEON_IS_MODEL(x) 0 +#define octeon_has_feature(x) 0 +#define octeon_get_clock_rate() 0 + +#define CVMX_SYNCIOBDMA do { } while(0) + +#define CVMX_HELPER_INPUT_TAG_TYPE 0 +#define CVMX_HELPER_FIRST_MBUFF_SKIP 7 +#define CVMX_FAU_REG_END (2048) +#define CVMX_FPA_OUTPUT_BUFFER_POOL (2) +#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 16 +#define CVMX_FPA_PACKET_POOL (0) +#define CVMX_FPA_PACKET_POOL_SIZE 16 +#define CVMX_FPA_WQE_POOL (1) +#define CVMX_FPA_WQE_POOL_SIZE 16 +#define CVMX_GMXX_RXX_ADR_CAM_EN(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CTL(a, b) ((a)+(b)) +#define CVMX_GMXX_PRTX_CFG(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_FRM_MAX(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_JABBER(a, b) ((a)+(b)) +#define CVMX_IPD_CTL_STATUS 0 +#define CVMX_PIP_FRM_LEN_CHKX(a) (a) +#define CVMX_PIP_NUM_INPUT_PORTS 1 +#define CVMX_SCR_SCRATCH 0 +#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 2 +#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 2 +#define CVMX_IPD_SUB_PORT_FCS 0 +#define CVMX_SSO_WQ_IQ_DIS 0 +#define CVMX_SSO_WQ_INT 0 +#define CVMX_POW_WQ_INT 0 +#define CVMX_SSO_WQ_INT_PC 0 +#define CVMX_NPI_RSL_INT_BLOCKS 0 +#define CVMX_POW_WQ_INT_PC 0 + +typedef union { + uint64_t u64; + struct { + uint64_t bufs:8; + uint64_t ip_offset:8; + uint64_t vlan_valid:1; + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + uint64_t vlan_cfi:1; + uint64_t vlan_id:12; + uint64_t pr:4; + uint64_t unassigned2:8; + uint64_t dec_ipcomp:1; + uint64_t tcp_or_udp:1; + uint64_t dec_ipsec:1; + uint64_t is_v6:1; + uint64_t software:1; + uint64_t L4_error:1; + uint64_t is_frag:1; + uint64_t IP_exc:1; + uint64_t is_bcast:1; + uint64_t is_mcast:1; + uint64_t not_IP:1; + uint64_t rcv_error:1; + uint64_t err_code:8; + } s; + struct { + uint64_t bufs:8; + uint64_t ip_offset:8; + uint64_t vlan_valid:1; + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + uint64_t vlan_cfi:1; + uint64_t vlan_id:12; + uint64_t port:12; + uint64_t dec_ipcomp:1; + uint64_t tcp_or_udp:1; + uint64_t dec_ipsec:1; + uint64_t is_v6:1; + uint64_t software:1; + uint64_t L4_error:1; + uint64_t is_frag:1; + uint64_t IP_exc:1; + uint64_t is_bcast:1; + uint64_t is_mcast:1; + uint64_t not_IP:1; + uint64_t rcv_error:1; + uint64_t err_code:8; + } s_cn68xx; + + struct { + uint64_t unused1:16; + uint64_t vlan:16; + uint64_t unused2:32; + } svlan; + struct { + uint64_t bufs:8; + uint64_t unused:8; + uint64_t vlan_valid:1; + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + uint64_t vlan_cfi:1; + uint64_t vlan_id:12; + uint64_t pr:4; + uint64_t unassigned2:12; + uint64_t software:1; + uint64_t unassigned3:1; + uint64_t is_rarp:1; + uint64_t is_arp:1; + uint64_t is_bcast:1; + uint64_t is_mcast:1; + uint64_t not_IP:1; + uint64_t rcv_error:1; + uint64_t err_code:8; + } snoip; + +} cvmx_pip_wqe_word2; + +union cvmx_pip_wqe_word0 { + struct { + uint64_t next_ptr:40; + uint8_t unused; + uint16_t hw_chksum; + } cn38xx; + struct { + uint64_t pknd:6; /* 0..5 */ + uint64_t unused2:2; /* 6..7 */ + uint64_t bpid:6; /* 8..13 */ + uint64_t unused1:18; /* 14..31 */ + uint64_t l2ptr:8; /* 32..39 */ + uint64_t l3ptr:8; /* 40..47 */ + uint64_t unused0:8; /* 48..55 */ + uint64_t l4ptr:8; /* 56..63 */ + } cn68xx; +}; + +union cvmx_wqe_word0 { + uint64_t u64; + union cvmx_pip_wqe_word0 pip; +}; + +union cvmx_wqe_word1 { + uint64_t u64; + struct { + uint64_t tag:32; + uint64_t tag_type:2; + uint64_t varies:14; + uint64_t len:16; + }; + struct { + uint64_t tag:32; + uint64_t tag_type:2; + uint64_t zero_2:3; + uint64_t grp:6; + uint64_t zero_1:1; + uint64_t qos:3; + uint64_t zero_0:1; + uint64_t len:16; + } cn68xx; + struct { + uint64_t tag:32; + uint64_t tag_type:2; + uint64_t zero_2:1; + uint64_t grp:4; + uint64_t qos:3; + uint64_t ipprt:6; + uint64_t len:16; + } cn38xx; +}; + +union cvmx_buf_ptr { + void *ptr; + uint64_t u64; + struct { + uint64_t i:1; + uint64_t back:4; + uint64_t pool:3; + uint64_t size:16; + uint64_t addr:40; + } s; +}; + +typedef struct { + union cvmx_wqe_word0 word0; + union cvmx_wqe_word1 word1; + cvmx_pip_wqe_word2 word2; + union cvmx_buf_ptr packet_ptr; + uint8_t packet_data[96]; +} cvmx_wqe_t; + +typedef union { + uint64_t u64; + struct { + uint64_t reserved_20_63:44; + uint64_t link_up:1; /**< Is the physical link up? */ + uint64_t full_duplex:1; /**< 1 if the link is full duplex */ + uint64_t speed:18; /**< Speed of the link in Mbps */ + } s; +} cvmx_helper_link_info_t; + +typedef enum { + CVMX_FAU_REG_32_START = 0, +} cvmx_fau_reg_32_t; + +typedef enum { + CVMX_FAU_OP_SIZE_8 = 0, + CVMX_FAU_OP_SIZE_16 = 1, + CVMX_FAU_OP_SIZE_32 = 2, + CVMX_FAU_OP_SIZE_64 = 3 +} cvmx_fau_op_size_t; + +typedef enum { + CVMX_SPI_MODE_UNKNOWN = 0, + CVMX_SPI_MODE_TX_HALFPLEX = 1, + CVMX_SPI_MODE_RX_HALFPLEX = 2, + CVMX_SPI_MODE_DUPLEX = 3 +} cvmx_spi_mode_t; + +typedef enum { + CVMX_HELPER_INTERFACE_MODE_DISABLED, + CVMX_HELPER_INTERFACE_MODE_RGMII, + CVMX_HELPER_INTERFACE_MODE_GMII, + CVMX_HELPER_INTERFACE_MODE_SPI, + CVMX_HELPER_INTERFACE_MODE_PCIE, + CVMX_HELPER_INTERFACE_MODE_XAUI, + CVMX_HELPER_INTERFACE_MODE_SGMII, + CVMX_HELPER_INTERFACE_MODE_PICMG, + CVMX_HELPER_INTERFACE_MODE_NPI, + CVMX_HELPER_INTERFACE_MODE_LOOP, +} cvmx_helper_interface_mode_t; + +typedef enum { + CVMX_POW_WAIT = 1, + CVMX_POW_NO_WAIT = 0, +} cvmx_pow_wait_t; + +typedef enum { + CVMX_PKO_LOCK_NONE = 0, + CVMX_PKO_LOCK_ATOMIC_TAG = 1, + CVMX_PKO_LOCK_CMD_QUEUE = 2, +} cvmx_pko_lock_t; + +typedef enum { + CVMX_PKO_SUCCESS, + CVMX_PKO_INVALID_PORT, + CVMX_PKO_INVALID_QUEUE, + CVMX_PKO_INVALID_PRIORITY, + CVMX_PKO_NO_MEMORY, + CVMX_PKO_PORT_ALREADY_SETUP, + CVMX_PKO_CMD_QUEUE_INIT_ERROR +} cvmx_pko_status_t; + +enum cvmx_pow_tag_type { + CVMX_POW_TAG_TYPE_ORDERED = 0L, + CVMX_POW_TAG_TYPE_ATOMIC = 1L, + CVMX_POW_TAG_TYPE_NULL = 2L, + CVMX_POW_TAG_TYPE_NULL_NULL = 3L +}; + +union cvmx_ipd_ctl_status { + uint64_t u64; + struct cvmx_ipd_ctl_status_s { + uint64_t reserved_18_63:46; + uint64_t use_sop:1; + uint64_t rst_done:1; + uint64_t clken:1; + uint64_t no_wptr:1; + uint64_t pq_apkt:1; + uint64_t pq_nabuf:1; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } s; + struct cvmx_ipd_ctl_status_cn30xx { + uint64_t reserved_10_63:54; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn30xx; + struct cvmx_ipd_ctl_status_cn38xxp2 { + uint64_t reserved_9_63:55; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn38xxp2; + struct cvmx_ipd_ctl_status_cn50xx { + uint64_t reserved_15_63:49; + uint64_t no_wptr:1; + uint64_t pq_apkt:1; + uint64_t pq_nabuf:1; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn50xx; + struct cvmx_ipd_ctl_status_cn58xx { + uint64_t reserved_12_63:52; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn58xx; + struct cvmx_ipd_ctl_status_cn63xxp1 { + uint64_t reserved_16_63:48; + uint64_t clken:1; + uint64_t no_wptr:1; + uint64_t pq_apkt:1; + uint64_t pq_nabuf:1; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn63xxp1; +}; + +union cvmx_ipd_sub_port_fcs { + uint64_t u64; + struct cvmx_ipd_sub_port_fcs_s { + uint64_t port_bit:32; + uint64_t reserved_32_35:4; + uint64_t port_bit2:4; + uint64_t reserved_40_63:24; + } s; + struct cvmx_ipd_sub_port_fcs_cn30xx { + uint64_t port_bit:3; + uint64_t reserved_3_63:61; + } cn30xx; + struct cvmx_ipd_sub_port_fcs_cn38xx { + uint64_t port_bit:32; + uint64_t reserved_32_63:32; + } cn38xx; +}; + +union cvmx_ipd_sub_port_qos_cnt { + uint64_t u64; + struct cvmx_ipd_sub_port_qos_cnt_s { + uint64_t cnt:32; + uint64_t port_qos:9; + uint64_t reserved_41_63:23; + } s; +}; +typedef struct { + uint32_t dropped_octets; + uint32_t dropped_packets; + uint32_t pci_raw_packets; + uint32_t octets; + uint32_t packets; + uint32_t multicast_packets; + uint32_t broadcast_packets; + uint32_t len_64_packets; + uint32_t len_65_127_packets; + uint32_t len_128_255_packets; + uint32_t len_256_511_packets; + uint32_t len_512_1023_packets; + uint32_t len_1024_1518_packets; + uint32_t len_1519_max_packets; + uint32_t fcs_align_err_packets; + uint32_t runt_packets; + uint32_t runt_crc_packets; + uint32_t oversize_packets; + uint32_t oversize_crc_packets; + uint32_t inb_packets; + uint64_t inb_octets; + uint16_t inb_errors; +} cvmx_pip_port_status_t; + +typedef struct { + uint32_t packets; + uint64_t octets; + uint64_t doorbell; +} cvmx_pko_port_status_t; + +union cvmx_pip_frm_len_chkx { + uint64_t u64; + struct cvmx_pip_frm_len_chkx_s { + uint64_t reserved_32_63:32; + uint64_t maxlen:16; + uint64_t minlen:16; + } s; +}; + +union cvmx_gmxx_rxx_frm_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_frm_ctl_s { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t pad_len:1; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_11:1; + uint64_t ptp_mode:1; + uint64_t reserved_13_63:51; + } s; + struct cvmx_gmxx_rxx_frm_ctl_cn30xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t pad_len:1; + uint64_t reserved_9_63:55; + } cn30xx; + struct cvmx_gmxx_rxx_frm_ctl_cn31xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t reserved_8_63:56; + } cn31xx; + struct cvmx_gmxx_rxx_frm_ctl_cn50xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t reserved_7_8:2; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_63:53; + } cn50xx; + struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t reserved_7_8:2; + uint64_t pre_align:1; + uint64_t reserved_10_63:54; + } cn56xxp1; + struct cvmx_gmxx_rxx_frm_ctl_cn58xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t pad_len:1; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_63:53; + } cn58xx; + struct cvmx_gmxx_rxx_frm_ctl_cn61xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t reserved_7_8:2; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_11:1; + uint64_t ptp_mode:1; + uint64_t reserved_13_63:51; + } cn61xx; +}; + +union cvmx_gmxx_rxx_int_reg { + uint64_t u64; + struct cvmx_gmxx_rxx_int_reg_s { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t maxerr:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t lenerr:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t hg2fld:1; + uint64_t hg2cc:1; + uint64_t reserved_29_63:35; + } s; + struct cvmx_gmxx_rxx_int_reg_cn30xx { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t maxerr:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t lenerr:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t reserved_19_63:45; + } cn30xx; + struct cvmx_gmxx_rxx_int_reg_cn50xx { + uint64_t reserved_0_0:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t reserved_6_6:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t pause_drp:1; + uint64_t reserved_20_63:44; + } cn50xx; + struct cvmx_gmxx_rxx_int_reg_cn52xx { + uint64_t reserved_0_0:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t reserved_5_6:2; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t reserved_9_9:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t reserved_16_18:3; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t hg2fld:1; + uint64_t hg2cc:1; + uint64_t reserved_29_63:35; + } cn52xx; + struct cvmx_gmxx_rxx_int_reg_cn56xxp1 { + uint64_t reserved_0_0:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t reserved_5_6:2; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t reserved_9_9:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t reserved_16_18:3; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t reserved_27_63:37; + } cn56xxp1; + struct cvmx_gmxx_rxx_int_reg_cn58xx { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t maxerr:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t lenerr:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t pause_drp:1; + uint64_t reserved_20_63:44; + } cn58xx; + struct cvmx_gmxx_rxx_int_reg_cn61xx { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t reserved_5_6:2; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t reserved_9_9:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t reserved_16_18:3; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t hg2fld:1; + uint64_t hg2cc:1; + uint64_t reserved_29_63:35; + } cn61xx; +}; + +union cvmx_gmxx_prtx_cfg { + uint64_t u64; + struct cvmx_gmxx_prtx_cfg_s { + uint64_t reserved_22_63:42; + uint64_t pknd:6; + uint64_t reserved_14_15:2; + uint64_t tx_idle:1; + uint64_t rx_idle:1; + uint64_t reserved_9_11:3; + uint64_t speed_msb:1; + uint64_t reserved_4_7:4; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } s; + struct cvmx_gmxx_prtx_cfg_cn30xx { + uint64_t reserved_4_63:60; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } cn30xx; + struct cvmx_gmxx_prtx_cfg_cn52xx { + uint64_t reserved_14_63:50; + uint64_t tx_idle:1; + uint64_t rx_idle:1; + uint64_t reserved_9_11:3; + uint64_t speed_msb:1; + uint64_t reserved_4_7:4; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } cn52xx; +}; + +union cvmx_gmxx_rxx_adr_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_ctl_s { + uint64_t reserved_4_63:60; + uint64_t cam_mode:1; + uint64_t mcst:2; + uint64_t bcst:1; + } s; +}; + +union cvmx_pip_prt_tagx { + uint64_t u64; + struct cvmx_pip_prt_tagx_s { + uint64_t reserved_54_63:10; + uint64_t portadd_en:1; + uint64_t inc_hwchk:1; + uint64_t reserved_50_51:2; + uint64_t grptagbase_msb:2; + uint64_t reserved_46_47:2; + uint64_t grptagmask_msb:2; + uint64_t reserved_42_43:2; + uint64_t grp_msb:2; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t grptag_mskip:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } s; + struct cvmx_pip_prt_tagx_cn30xx { + uint64_t reserved_40_63:24; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t reserved_30_30:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } cn30xx; + struct cvmx_pip_prt_tagx_cn50xx { + uint64_t reserved_40_63:24; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t grptag_mskip:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } cn50xx; +}; + +union cvmx_spxx_int_reg { + uint64_t u64; + struct cvmx_spxx_int_reg_s { + uint64_t reserved_32_63:32; + uint64_t spf:1; + uint64_t reserved_12_30:19; + uint64_t calerr:1; + uint64_t syncerr:1; + uint64_t diperr:1; + uint64_t tpaovr:1; + uint64_t rsverr:1; + uint64_t drwnng:1; + uint64_t clserr:1; + uint64_t spiovr:1; + uint64_t reserved_2_3:2; + uint64_t abnorm:1; + uint64_t prtnxa:1; + } s; +}; + +union cvmx_spxx_int_msk { + uint64_t u64; + struct cvmx_spxx_int_msk_s { + uint64_t reserved_12_63:52; + uint64_t calerr:1; + uint64_t syncerr:1; + uint64_t diperr:1; + uint64_t tpaovr:1; + uint64_t rsverr:1; + uint64_t drwnng:1; + uint64_t clserr:1; + uint64_t spiovr:1; + uint64_t reserved_2_3:2; + uint64_t abnorm:1; + uint64_t prtnxa:1; + } s; +}; + +union cvmx_pow_wq_int { + uint64_t u64; + struct cvmx_pow_wq_int_s { + uint64_t wq_int:16; + uint64_t iq_dis:16; + uint64_t reserved_32_63:32; + } s; +}; + +union cvmx_sso_wq_int_thrx { + uint64_t u64; + struct { + uint64_t iq_thr:12; + uint64_t reserved_12_13:2; + uint64_t ds_thr:12; + uint64_t reserved_26_27:2; + uint64_t tc_thr:4; + uint64_t tc_en:1; + uint64_t reserved_33_63:31; + } s; +}; + +union cvmx_stxx_int_reg { + uint64_t u64; + struct cvmx_stxx_int_reg_s { + uint64_t reserved_9_63:55; + uint64_t syncerr:1; + uint64_t frmerr:1; + uint64_t unxfrm:1; + uint64_t nosync:1; + uint64_t diperr:1; + uint64_t datovr:1; + uint64_t ovrbst:1; + uint64_t calpar1:1; + uint64_t calpar0:1; + } s; +}; + +union cvmx_stxx_int_msk { + uint64_t u64; + struct cvmx_stxx_int_msk_s { + uint64_t reserved_8_63:56; + uint64_t frmerr:1; + uint64_t unxfrm:1; + uint64_t nosync:1; + uint64_t diperr:1; + uint64_t datovr:1; + uint64_t ovrbst:1; + uint64_t calpar1:1; + uint64_t calpar0:1; + } s; +}; + +union cvmx_pow_wq_int_pc { + uint64_t u64; + struct cvmx_pow_wq_int_pc_s { + uint64_t reserved_0_7:8; + uint64_t pc_thr:20; + uint64_t reserved_28_31:4; + uint64_t pc:28; + uint64_t reserved_60_63:4; + } s; +}; + +union cvmx_pow_wq_int_thrx { + uint64_t u64; + struct cvmx_pow_wq_int_thrx_s { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_23_23:1; + uint64_t ds_thr:11; + uint64_t reserved_11_11:1; + uint64_t iq_thr:11; + } s; + struct cvmx_pow_wq_int_thrx_cn30xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_18_23:6; + uint64_t ds_thr:6; + uint64_t reserved_6_11:6; + uint64_t iq_thr:6; + } cn30xx; + struct cvmx_pow_wq_int_thrx_cn31xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_20_23:4; + uint64_t ds_thr:8; + uint64_t reserved_8_11:4; + uint64_t iq_thr:8; + } cn31xx; + struct cvmx_pow_wq_int_thrx_cn52xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_21_23:3; + uint64_t ds_thr:9; + uint64_t reserved_9_11:3; + uint64_t iq_thr:9; + } cn52xx; + struct cvmx_pow_wq_int_thrx_cn63xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_22_23:2; + uint64_t ds_thr:10; + uint64_t reserved_10_11:2; + uint64_t iq_thr:10; + } cn63xx; +}; + +union cvmx_npi_rsl_int_blocks { + uint64_t u64; + struct cvmx_npi_rsl_int_blocks_s { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t reserved_28_29:2; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t reserved_13_14:2; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } s; + struct cvmx_npi_rsl_int_blocks_cn30xx { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t rint_29:1; + uint64_t rint_28:1; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t rint_14:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn30xx; + struct cvmx_npi_rsl_int_blocks_cn38xx { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t rint_29:1; + uint64_t rint_28:1; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t rint_14:1; + uint64_t rint_13:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn38xx; + struct cvmx_npi_rsl_int_blocks_cn50xx { + uint64_t reserved_31_63:33; + uint64_t iob:1; + uint64_t lmc1:1; + uint64_t agl:1; + uint64_t reserved_24_27:4; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t reserved_21_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t reserved_15_15:1; + uint64_t rad:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t reserved_8_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn50xx; +}; + +typedef union { + uint64_t u64; + struct { + uint64_t total_bytes:16; + uint64_t segs:6; + uint64_t dontfree:1; + uint64_t ignore_i:1; + uint64_t ipoffp1:7; + uint64_t gather:1; + uint64_t rsp:1; + uint64_t wqp:1; + uint64_t n2:1; + uint64_t le:1; + uint64_t reg0:11; + uint64_t subone0:1; + uint64_t reg1:11; + uint64_t subone1:1; + uint64_t size0:2; + uint64_t size1:2; + } s; +} cvmx_pko_command_word0_t; + +union cvmx_ciu_timx { + uint64_t u64; + struct cvmx_ciu_timx_s { + uint64_t reserved_37_63:27; + uint64_t one_shot:1; + uint64_t len:36; + } s; +}; + +union cvmx_gmxx_rxx_rx_inbnd { + uint64_t u64; + struct cvmx_gmxx_rxx_rx_inbnd_s { + uint64_t status:1; + uint64_t speed:2; + uint64_t duplex:1; + uint64_t reserved_4_63:60; + } s; +}; + +static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg, + int32_t value) +{ + return value; +} + +static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value) +{ } + +static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value) +{ } + +static inline uint64_t cvmx_scratch_read64(uint64_t address) +{ + return 0; +} + +static inline void cvmx_scratch_write64(uint64_t address, uint64_t value) +{ } + +static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work) +{ + return 0; +} + +static inline void *cvmx_phys_to_ptr(uint64_t physical_address) +{ + return (void *)(physical_address); +} + +static inline uint64_t cvmx_ptr_to_phys(void *ptr) +{ + return (unsigned long)ptr; +} + +static inline int cvmx_helper_get_interface_num(int ipd_port) +{ + return ipd_port; +} + +static inline int cvmx_helper_get_interface_index_num(int ipd_port) +{ + return ipd_port; +} + +static inline void cvmx_fpa_enable(void) +{ } + +static inline uint64_t cvmx_read_csr(uint64_t csr_addr) +{ + return 0; +} + +static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val) +{ } + +static inline int cvmx_helper_setup_red(int pass_thresh, int drop_thresh) +{ + return 0; +} + +static inline void *cvmx_fpa_alloc(uint64_t pool) +{ + return NULL; +} + +static inline void cvmx_fpa_free(void *ptr, uint64_t pool, + uint64_t num_cache_lines) +{ } + +static inline int octeon_is_simulation(void) +{ + return 1; +} + +static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear, + cvmx_pip_port_status_t *status) +{ } + +static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear, + cvmx_pko_port_status_t *status) +{ } + +static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int + interface) +{ + return 0; +} + +static inline cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port) +{ + cvmx_helper_link_info_t ret = { .u64 = 0 }; + + return ret; +} + +static inline int cvmx_helper_link_set(int ipd_port, + cvmx_helper_link_info_t link_info) +{ + return 0; +} + +static inline int cvmx_helper_initialize_packet_io_global(void) +{ + return 0; +} + +static inline int cvmx_helper_get_number_of_interfaces(void) +{ + return 2; +} + +static inline int cvmx_helper_ports_on_interface(int interface) +{ + return 1; +} + +static inline int cvmx_helper_get_ipd_port(int interface, int port) +{ + return 0; +} + +static inline int cvmx_helper_ipd_and_packet_input_enable(void) +{ + return 0; +} + +static inline void cvmx_ipd_disable(void) +{ } + +static inline void cvmx_ipd_free_ptr(void) +{ } + +static inline void cvmx_pko_disable(void) +{ } + +static inline void cvmx_pko_shutdown(void) +{ } + +static inline int cvmx_pko_get_base_queue_per_core(int port, int core) +{ + return port; +} + +static inline int cvmx_pko_get_base_queue(int port) +{ + return port; +} + +static inline int cvmx_pko_get_num_queues(int port) +{ + return port; +} + +static inline unsigned int cvmx_get_core_num(void) +{ + return 0; +} + +static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, + cvmx_pow_wait_t wait) +{ } + +static inline void cvmx_pow_work_request_async(int scr_addr, + cvmx_pow_wait_t wait) +{ } + +static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr) +{ + cvmx_wqe_t *wqe = (void *)(unsigned long)scr_addr; + + return wqe; +} + +static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait) +{ + return (void *)(unsigned long)wait; +} + +static inline int cvmx_spi_restart_interface(int interface, + cvmx_spi_mode_t mode, int timeout) +{ + return 0; +} + +static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, + cvmx_fau_reg_32_t reg, + int32_t value) +{ } + +static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed( + int interface, + int port) +{ + union cvmx_gmxx_rxx_rx_inbnd r; + r.u64 = 0; + return r; +} + +static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, + cvmx_pko_lock_t use_locking) +{ } + +static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port, + uint64_t queue, cvmx_pko_command_word0_t pko_command, + union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking) +{ + cvmx_pko_status_t ret = 0; + + return ret; +} + +static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port) +{ } + +static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos) +{ } + +static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work) +{ + return 0; +} + +static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp) +{ } + +static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, + enum cvmx_pow_tag_type tag_type, + uint64_t qos, uint64_t grp) +{ } + +#define CVMX_ASXX_RX_CLK_SETX(a, b) ((a)+(b)) +#define CVMX_ASXX_TX_CLK_SETX(a, b) ((a)+(b)) +#define CVMX_CIU_TIMX(a) (a) +#define CVMX_GMXX_RXX_ADR_CAM0(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CAM1(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CAM2(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CAM3(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CAM4(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CAM5(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_FRM_CTL(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_INT_REG(a, b) ((a)+(b)) +#define CVMX_GMXX_SMACX(a, b) ((a)+(b)) +#define CVMX_PIP_PRT_TAGX(a) (a) +#define CVMX_POW_PP_GRP_MSKX(a) (a) +#define CVMX_POW_WQ_INT_THRX(a) (a) +#define CVMX_SPXX_INT_MSK(a) (a) +#define CVMX_SPXX_INT_REG(a) (a) +#define CVMX_SSO_PPX_GRP_MSK(a) (a) +#define CVMX_SSO_WQ_INT_THRX(a) (a) +#define CVMX_STXX_INT_MSK(a) (a) +#define CVMX_STXX_INT_REG(a) (a) diff --git a/drivers/staging/qlge/Kconfig b/drivers/staging/qlge/Kconfig new file mode 100644 index 000000000000..a3cb25a3ab80 --- /dev/null +++ b/drivers/staging/qlge/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +config QLGE + tristate "QLogic QLGE 10Gb Ethernet Driver Support" + depends on ETHERNET && PCI + help + This driver supports QLogic ISP8XXX 10Gb Ethernet cards. + + To compile this driver as a module, choose M here. The module will be + called qlge. diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/staging/qlge/Makefile index 1dc2568e820c..1dc2568e820c 100644 --- a/drivers/net/ethernet/qlogic/qlge/Makefile +++ b/drivers/staging/qlge/Makefile diff --git a/drivers/staging/qlge/TODO b/drivers/staging/qlge/TODO new file mode 100644 index 000000000000..51c509084e80 --- /dev/null +++ b/drivers/staging/qlge/TODO @@ -0,0 +1,46 @@ +* reception stalls permanently (until admin intervention) if the rx buffer + queues become empty because of allocation failures (ex. under memory + pressure) +* commit 7c734359d350 ("qlge: Size RX buffers based on MTU.", v2.6.33-rc1) + introduced dead code in the receive routines, which should be rewritten + anyways by the admission of the author himself, see the comment above + ql_build_rx_skb(). That function is now used exclusively to handle packets + that underwent header splitting but it still contains code to handle non + split cases. +* truesize accounting is incorrect (ex: a 9000B frame has skb->truesize 10280 + while containing two frags of order-1 allocations, ie. >16K) +* while in that area, using two 8k buffers to store one 9k frame is a poor + choice of buffer size. +* in the "chain of large buffers" case, the driver uses an skb allocated with + head room but only puts data in the frags. +* rename "rx" queues to "completion" queues. Calling tx completion queues "rx + queues" is confusing. +* struct rx_ring is used for rx and tx completions, with some members relevant + to one case only +* there is an inordinate amount of disparate debugging code, most of which is + of questionable value. In particular, qlge_dbg.c has hundreds of lines of + code bitrotting away in ifdef land (doesn't compile since commit + 18c49b91777c ("qlge: do vlan cleanup", v3.1-rc1), 8 years ago). +* triggering an ethtool regdump will hexdump a 176k struct to dmesg depending + on some module parameters. +* the flow control implementation in firmware is buggy (sends a flood of pause + frames, resets the link, device and driver buffer queues become + desynchronized), disable it by default +* some structures are initialized redundantly (ex. memset 0 after + alloc_etherdev()) +* the driver has a habit of using runtime checks where compile time checks are + possible (ex. ql_free_rx_buffers(), ql_alloc_rx_buffers()) +* reorder struct members to avoid holes if it doesn't impact performance +* in terms of namespace, the driver uses either qlge_, ql_ (used by + other qlogic drivers, with clashes, ex: ql_sem_spinlock) or nothing (with + clashes, ex: struct ob_mac_iocb_req). Rename everything to use the "qlge_" + prefix. +* avoid legacy/deprecated apis (ex. replace pci_dma_*, replace pci_enable_msi, + use pci_iomap) +* some "while" loops could be rewritten with simple "for", ex. + ql_wait_reg_rdy(), ql_start_rx_ring()) +* remove duplicate and useless comments +* fix weird line wrapping (all over, ex. the ql_set_routing_reg() calls in + qlge_set_multicast_list()). +* fix weird indentation (all over, ex. the for loops in qlge_get_stats()) +* fix checkpatch issues diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/staging/qlge/qlge.h index ad7c5eb8a3b6..ad7c5eb8a3b6 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 31389ab8bdf7..31389ab8bdf7 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c index a6886cc5654c..a6886cc5654c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/staging/qlge/qlge_ethtool.c diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 6cae33072496..6cae33072496 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c index 957c72985a06..957c72985a06 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c +++ b/drivers/staging/qlge/qlge_mpi.c diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index 9d4f1dab0968..6fa7726185de 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -284,9 +284,9 @@ static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb, for (frag = 0; frag < numfrags; frag++) { count = add_physinfo_entries(page_to_pfn( skb_frag_page(&skb_shinfo(skb)->frags[frag])), - skb_shinfo(skb)->frags[frag].page_offset, - skb_shinfo(skb)->frags[frag].size, count, - frags_max, frags); + skb_frag_off(&skb_shinfo(skb)->frags[frag]), + skb_frag_size(&skb_shinfo(skb)->frags[frag]), + count, frags_max, frags); /* add_physinfo_entries only returns * zero if the frags array is out of room * That should never happen because we diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 24309d937d8c..fcdc4211e3c2 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -899,9 +899,9 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx]; sg_init_table(&ccmd->sg, 1); - sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag), - dfrag->page_offset); - get_page(dfrag->page.p); + sg_set_page(&ccmd->sg, skb_frag_page(dfrag), + skb_frag_size(dfrag), skb_frag_off(dfrag)); + get_page(skb_frag_page(dfrag)); cmd->se_cmd.t_data_sg = &ccmd->sg; cmd->se_cmd.t_data_nents = 1; @@ -1403,7 +1403,8 @@ static void cxgbit_lro_skb_dump(struct sk_buff *skb) pdu_cb->ddigest, pdu_cb->frags); for (i = 0; i < ssi->nr_frags; i++) pr_info("skb 0x%p, frag %d, off %u, sz %u.\n", - skb, i, ssi->frags[i].page_offset, ssi->frags[i].size); + skb, i, skb_frag_off(&ssi->frags[i]), + skb_frag_size(&ssi->frags[i])); } static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk) @@ -1447,7 +1448,7 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) hpdu_cb->frags++; hpdu_cb->hfrag_idx = hfrag_idx; - len = hssi->frags[hfrag_idx].size; + len = skb_frag_size(&hssi->frags[hfrag_idx]); hskb->len += len; hskb->data_len += len; hskb->truesize += len; @@ -1467,7 +1468,7 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) get_page(skb_frag_page(&hssi->frags[dfrag_idx])); - len += hssi->frags[dfrag_idx].size; + len += skb_frag_size(&hssi->frags[dfrag_idx]); hssi->nr_frags++; hpdu_cb->frags++; diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 6a50e1d0529c..9f57736fe15e 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -102,7 +102,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, struct iov_iter iov_iter; unsigned out, in; size_t nbytes; - size_t len; + size_t iov_len, payload_len; int head; spin_lock_bh(&vsock->send_pkt_list_lock); @@ -147,8 +147,24 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } - len = iov_length(&vq->iov[out], in); - iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len); + iov_len = iov_length(&vq->iov[out], in); + if (iov_len < sizeof(pkt->hdr)) { + virtio_transport_free_pkt(pkt); + vq_err(vq, "Buffer len [%zu] too small\n", iov_len); + break; + } + + iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len); + payload_len = pkt->len - pkt->off; + + /* If the packet is greater than the space available in the + * buffer, we split it using multiple buffers. + */ + if (payload_len > iov_len - sizeof(pkt->hdr)) + payload_len = iov_len - sizeof(pkt->hdr); + + /* Set the correct length in the header */ + pkt->hdr.len = cpu_to_le32(payload_len); nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); if (nbytes != sizeof(pkt->hdr)) { @@ -157,33 +173,47 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } - nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter); - if (nbytes != pkt->len) { + nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len, + &iov_iter); + if (nbytes != payload_len) { virtio_transport_free_pkt(pkt); vq_err(vq, "Faulted on copying pkt buf\n"); break; } - vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); + vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); added = true; - if (pkt->reply) { - int val; - - val = atomic_dec_return(&vsock->queued_replies); - - /* Do we have resources to resume tx processing? */ - if (val + 1 == tx_vq->num) - restart_tx = true; - } - /* Deliver to monitoring devices all correctly transmitted * packets. */ virtio_transport_deliver_tap_pkt(pkt); - total_len += pkt->len; - virtio_transport_free_pkt(pkt); + pkt->off += payload_len; + total_len += payload_len; + + /* If we didn't send all the payload we can requeue the packet + * to send it with the next available buffer. + */ + if (pkt->off < pkt->len) { + spin_lock_bh(&vsock->send_pkt_list_lock); + list_add(&pkt->list, &vsock->send_pkt_list); + spin_unlock_bh(&vsock->send_pkt_list_lock); + } else { + if (pkt->reply) { + int val; + + val = atomic_dec_return(&vsock->queued_replies); + + /* Do we have resources to resume tx + * processing? + */ + if (val + 1 == tx_vq->num) + restart_tx = true; + } + + virtio_transport_free_pkt(pkt); + } } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); if (added) vhost_signal(&vsock->dev, vq); @@ -329,6 +359,8 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, return NULL; } + pkt->buf_len = pkt->len; + nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter); if (nbytes != pkt->len) { vq_err(vq, "Expected %u byte payload, got %zu bytes\n", diff --git a/include/linux/can/core.h b/include/linux/can/core.h index 6099bc18bd0c..708c10d3417a 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * linux/can/core.h * @@ -57,6 +57,5 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev, void *data); extern int can_send(struct sk_buff *skb, int loop); -extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); #endif /* !_CAN_CORE_H */ diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index b3379a97245c..a954def26c0d 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * linux/can/skb.h * diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 8511fadc0935..7d3f2ced92d1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -881,6 +881,14 @@ struct ieee80211_tpc_report_ie { u8 link_margin; } __packed; +#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1) +#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1 +#define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0) + +struct ieee80211_addba_ext_ie { + u8 data; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -973,6 +981,8 @@ struct ieee80211_mgmt { __le16 capab; __le16 timeout; __le16 start_seq_num; + /* followed by BA Extension */ + u8 variable[0]; } __packed addba_req; struct{ u8 action_code; @@ -1626,6 +1636,18 @@ struct ieee80211_he_operation { } __packed; /** + * struct ieee80211_he_spr - HE spatial reuse element + * + * This structure is the "HE spatial reuse element" element as + * described in P802.11ax_D4.0 section 9.4.2.241 + */ +struct ieee80211_he_spr { + u8 he_sr_control; + /* Optional 0 to 19 bytes: depends on @he_sr_control */ + u8 optional[0]; +} __packed; + +/** * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field * * This structure is the "MU AC Parameter Record" fields as @@ -2033,8 +2055,8 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the the byte * after the ext ID byte. It is assumed that he_oper_ie has at least - * sizeof(struct ieee80211_he_operation) bytes, checked already in - * ieee802_11_parse_elems_crc() + * sizeof(struct ieee80211_he_operation) bytes, the caller must have + * validated this. * @return the actual size of the IE data (not including header), or 0 on error */ static inline u8 @@ -2063,6 +2085,42 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) return oper_len; } +/* HE Spatial Reuse defines */ +#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT 0x4 +#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT 0x8 + +/* + * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size + * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the the byte + * after the ext ID byte. It is assumed that he_spr_ie has at least + * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated + * this + * @return the actual size of the IE data (not including header), or 0 on error + */ +static inline u8 +ieee80211_he_spr_size(const u8 *he_spr_ie) +{ + struct ieee80211_he_spr *he_spr = (void *)he_spr_ie; + u8 spr_len = sizeof(struct ieee80211_he_spr); + u32 he_spr_params; + + /* Make sure the input is not NULL */ + if (!he_spr_ie) + return 0; + + /* Calc required length */ + he_spr_params = le32_to_cpu(he_spr->he_sr_control); + if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) + spr_len++; + if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) + spr_len += 18; + + /* Add the first byte (extension ID) to the total length */ + spr_len++; + + return spr_len; +} + /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 @@ -2485,6 +2543,7 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_HE_OPERATION = 36, WLAN_EID_EXT_UORA = 37, WLAN_EID_EXT_HE_MU_EDCA = 38, + WLAN_EID_EXT_HE_SPR = 39, WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52, WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55, WLAN_EID_EXT_NON_INHERITANCE = 56, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 0e6da1840c7d..d8f348ef9c33 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -477,6 +477,17 @@ struct mlx5_core_sriov { u16 max_vfs; }; +struct mlx5_fc_pool { + struct mlx5_core_dev *dev; + struct mutex pool_lock; /* protects pool lists */ + struct list_head fully_used; + struct list_head partially_used; + struct list_head unused; + int available_fcs; + int used_fcs; + int threshold; +}; + struct mlx5_fc_stats { spinlock_t counters_idr_lock; /* protects counters_idr */ struct idr counters_idr; @@ -488,6 +499,8 @@ struct mlx5_fc_stats { struct delayed_work work; unsigned long next_query; unsigned long sampling_interval; /* jiffies */ + u32 *bulk_query_out; + struct mlx5_fc_pool fc_pool; }; struct mlx5_events; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index ec571fd7fcf8..da5e7eaed438 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1040,6 +1040,21 @@ enum { MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, }; +#define MLX5_FC_BULK_SIZE_FACTOR 128 + +enum mlx5_fc_bulk_alloc_bitmask { + MLX5_FC_BULK_128 = (1 << 0), + MLX5_FC_BULK_256 = (1 << 1), + MLX5_FC_BULK_512 = (1 << 2), + MLX5_FC_BULK_1024 = (1 << 3), + MLX5_FC_BULK_2048 = (1 << 4), + MLX5_FC_BULK_4096 = (1 << 5), + MLX5_FC_BULK_8192 = (1 << 6), + MLX5_FC_BULK_16384 = (1 << 7), +}; + +#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x30]; u8 vhca_id[0x10]; @@ -1244,7 +1259,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_2e0[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_at_300[0x18]; + u8 reserved_at_300[0x10]; + u8 flow_counter_bulk_alloc[0x8]; u8 log_max_mcg[0x8]; u8 reserved_at_320[0x3]; @@ -2766,7 +2782,7 @@ struct mlx5_ifc_traffic_counter_bits { struct mlx5_ifc_tisc_bits { u8 strict_lag_tx_port_affinity[0x1]; u8 tls_en[0x1]; - u8 reserved_at_1[0x2]; + u8 reserved_at_2[0x2]; u8 lag_tx_port_affinity[0x04]; u8 reserved_at_8[0x4]; @@ -2941,6 +2957,13 @@ enum { SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, }; +enum { + ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0, + ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1, + ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, + ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, +}; + struct mlx5_ifc_scheduling_context_bits { u8 element_type[0x8]; u8 reserved_at_8[0x18]; @@ -7817,7 +7840,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x38]; + u8 flow_counter_bulk[0x8]; }; struct mlx5_ifc_add_vxlan_udp_dport_out_bits { diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h deleted file mode 100644 index 97827ad468e2..000000000000 --- a/include/linux/platform_data/nxp-nci.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Generic platform data for the NXP NCI NFC chips. - * - * Copyright (C) 2014 NXP Semiconductors All rights reserved. - * - * Authors: Clément Perrochaud <clement.perrochaud@nxp.com> - */ - -#ifndef _NXP_NCI_H_ -#define _NXP_NCI_H_ - -struct nxp_nci_nfc_platform_data { - unsigned int gpio_en; - unsigned int gpio_fw; - unsigned int irq; -}; - -#endif /* _NXP_NCI_H_ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d8af86d995d6..3aef8d82ea59 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -14,6 +14,7 @@ #include <linux/compiler.h> #include <linux/time.h> #include <linux/bug.h> +#include <linux/bvec.h> #include <linux/cache.h> #include <linux/rbtree.h> #include <linux/socket.h> @@ -308,58 +309,45 @@ extern int sysctl_max_skb_frags; */ #define GSO_BY_FRAGS 0xFFFF -typedef struct skb_frag_struct skb_frag_t; - -struct skb_frag_struct { - struct { - struct page *p; - } page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; - __u32 size; -#else - __u16 page_offset; - __u16 size; -#endif -}; +typedef struct bio_vec skb_frag_t; /** - * skb_frag_size - Returns the size of a skb fragment + * skb_frag_size() - Returns the size of a skb fragment * @frag: skb fragment */ static inline unsigned int skb_frag_size(const skb_frag_t *frag) { - return frag->size; + return frag->bv_len; } /** - * skb_frag_size_set - Sets the size of a skb fragment + * skb_frag_size_set() - Sets the size of a skb fragment * @frag: skb fragment * @size: size of fragment */ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) { - frag->size = size; + frag->bv_len = size; } /** - * skb_frag_size_add - Incrementes the size of a skb fragment by %delta + * skb_frag_size_add() - Increments the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to add */ static inline void skb_frag_size_add(skb_frag_t *frag, int delta) { - frag->size += delta; + frag->bv_len += delta; } /** - * skb_frag_size_sub - Decrements the size of a skb fragment by %delta + * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to subtract */ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) { - frag->size -= delta; + frag->bv_len -= delta; } /** @@ -379,7 +367,7 @@ static inline bool skb_frag_must_loop(struct page *p) * skb_frag_foreach_page - loop over pages in a fragment * * @f: skb frag to operate on - * @f_off: offset from start of f->page.p + * @f_off: offset from start of f->bv_page * @f_len: length from f_off to loop over * @p: (temp var) current page * @p_off: (temp var) offset from start of current page, @@ -2089,8 +2077,8 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, * that not all callers have unique ownership of the page but rely * on page_is_pfmemalloc doing the right thing(tm). */ - frag->page.p = page; - frag->page_offset = off; + frag->bv_page = page; + frag->bv_offset = off; skb_frag_size_set(frag, size); page = compound_head(page); @@ -2870,6 +2858,46 @@ static inline void skb_propagate_pfmemalloc(struct page *page, } /** + * skb_frag_off() - Returns the offset of a skb fragment + * @frag: the paged fragment + */ +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->bv_offset; +} + +/** + * skb_frag_off_add() - Increments the offset of a skb fragment by @delta + * @frag: skb fragment + * @delta: value to add + */ +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->bv_offset += delta; +} + +/** + * skb_frag_off_set() - Sets the offset of a skb fragment + * @frag: skb fragment + * @offset: offset of fragment + */ +static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) +{ + frag->bv_offset = offset; +} + +/** + * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment + * @fragto: skb fragment where offset is set + * @fragfrom: skb fragment offset is copied from + */ +static inline void skb_frag_off_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) +{ + fragto->bv_offset = fragfrom->bv_offset; +} + +/** * skb_frag_page - retrieve the page referred to by a paged fragment * @frag: the paged fragment * @@ -2877,7 +2905,7 @@ static inline void skb_propagate_pfmemalloc(struct page *page, */ static inline struct page *skb_frag_page(const skb_frag_t *frag) { - return frag->page.p; + return frag->bv_page; } /** @@ -2935,7 +2963,7 @@ static inline void skb_frag_unref(struct sk_buff *skb, int f) */ static inline void *skb_frag_address(const skb_frag_t *frag) { - return page_address(skb_frag_page(frag)) + frag->page_offset; + return page_address(skb_frag_page(frag)) + skb_frag_off(frag); } /** @@ -2951,7 +2979,18 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag) if (unlikely(!ptr)) return NULL; - return ptr + frag->page_offset; + return ptr + skb_frag_off(frag); +} + +/** + * skb_frag_page_copy() - sets the page in a fragment from another fragment + * @fragto: skb fragment where page is set + * @fragfrom: skb fragment page is copied from + */ +static inline void skb_frag_page_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) +{ + fragto->bv_page = fragfrom->bv_page; } /** @@ -2963,7 +3002,7 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag) */ static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) { - frag->page.p = page; + frag->bv_page = page; } /** @@ -2999,7 +3038,7 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev, enum dma_data_direction dir) { return dma_map_page(dev, skb_frag_page(frag), - frag->page_offset + offset, size, dir); + skb_frag_off(frag) + offset, size, dir); } static inline struct sk_buff *pskb_copy(struct sk_buff *skb, @@ -3166,10 +3205,10 @@ static inline bool skb_can_coalesce(struct sk_buff *skb, int i, if (skb_zcopy(skb)) return false; if (i) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; return page == skb_frag_page(frag) && - off == frag->page_offset + skb_frag_size(frag); + off == skb_frag_off(frag) + skb_frag_size(frag); } return false; } diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 7d06241582dd..7b3e354bcd3c 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -81,6 +81,7 @@ struct stmmac_mdio_bus_data { unsigned int phy_mask; int *irqs; int probed_phy_irq; + bool needs_reset; }; struct stmmac_dma_cfg { diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index e223e2632edd..4c7781f4b29b 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -35,13 +35,14 @@ struct virtio_vsock_sock { /* Protected by tx_lock */ u32 tx_cnt; - u32 buf_alloc; u32 peer_fwd_cnt; u32 peer_buf_alloc; /* Protected by rx_lock */ u32 fwd_cnt; + u32 last_fwd_cnt; u32 rx_bytes; + u32 buf_alloc; struct list_head rx_queue; }; @@ -52,6 +53,7 @@ struct virtio_vsock_pkt { /* socket refcnt not held, only use for cancellation */ struct vsock_sock *vsk; void *buf; + u32 buf_len; u32 len; u32 off; bool reply; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 26e2ad2c7027..8140c4837122 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -247,6 +247,19 @@ struct ieee80211_rate { }; /** + * struct ieee80211_he_obss_pd - AP settings for spatial reuse + * + * @enable: is the feature enabled. + * @min_offset: minimal tx power offset an associated station shall use + * @max_offset: maximum tx power offset an associated station shall use + */ +struct ieee80211_he_obss_pd { + bool enable; + u8 min_offset; + u8 max_offset; +}; + +/** * struct ieee80211_sta_ht_cap - STA's HT capabilities * * This structure describes most essential parameters needed @@ -896,6 +909,7 @@ enum cfg80211_ap_settings_flags { * @vht_required: stations must support VHT * @twt_responder: Enable Target Wait Time * @flags: flags, as defined in enum cfg80211_ap_settings_flags + * @he_obss_pd: OBSS Packet Detection settings */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@ -923,6 +937,7 @@ struct cfg80211_ap_settings { bool ht_required, vht_required; bool twt_responder; u32 flags; + struct ieee80211_he_obss_pd he_obss_pd; }; /** diff --git a/include/net/dsa.h b/include/net/dsa.h index 1e8650fa8acc..147b757ef8ea 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -41,6 +41,7 @@ struct phylink_link_state; #define DSA_TAG_PROTO_TRAILER_VALUE 11 #define DSA_TAG_PROTO_8021Q_VALUE 12 #define DSA_TAG_PROTO_SJA1105_VALUE 13 +#define DSA_TAG_PROTO_KSZ8795_VALUE 14 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -57,6 +58,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_TRAILER = DSA_TAG_PROTO_TRAILER_VALUE, DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE, DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE, + DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE, }; struct packet_type; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index b16d21636d69..d3b12bc8a114 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -117,6 +117,8 @@ enum flow_action_id { FLOW_ACTION_GOTO, FLOW_ACTION_REDIRECT, FLOW_ACTION_MIRRED, + FLOW_ACTION_REDIRECT_INGRESS, + FLOW_ACTION_MIRRED_INGRESS, FLOW_ACTION_VLAN_PUSH, FLOW_ACTION_VLAN_POP, FLOW_ACTION_VLAN_MANGLE, @@ -126,11 +128,15 @@ enum flow_action_id { FLOW_ACTION_ADD, FLOW_ACTION_CSUM, FLOW_ACTION_MARK, + FLOW_ACTION_PTYPE, FLOW_ACTION_WAKE, FLOW_ACTION_QUEUE, FLOW_ACTION_SAMPLE, FLOW_ACTION_POLICE, FLOW_ACTION_CT, + FLOW_ACTION_MPLS_PUSH, + FLOW_ACTION_MPLS_POP, + FLOW_ACTION_MPLS_MANGLE, }; /* This is mirroring enum pedit_header_type definition for easy mapping between @@ -165,6 +171,7 @@ struct flow_action_entry { const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */ u32 csum_flags; /* FLOW_ACTION_CSUM */ u32 mark; /* FLOW_ACTION_MARK */ + u16 ptype; /* FLOW_ACTION_PTYPE */ struct { /* FLOW_ACTION_QUEUE */ u32 ctx; u32 index; @@ -184,6 +191,22 @@ struct flow_action_entry { int action; u16 zone; } ct; + struct { /* FLOW_ACTION_MPLS_PUSH */ + u32 label; + __be16 proto; + u8 tc; + u8 bos; + u8 ttl; + } mpls_push; + struct { /* FLOW_ACTION_MPLS_POP */ + __be16 proto; + } mpls_pop; + struct { /* FLOW_ACTION_MPLS_MANGLE */ + u32 label; + u8 tc; + u8 bos; + u8 ttl; + } mpls_mangle; }; }; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index d26da013f7c0..6781d4637557 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -315,6 +315,7 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder * functionality changed for this BSS (AP mode). * @BSS_CHANGED_TWT: TWT status changed + * @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed. * */ enum ieee80211_bss_change { @@ -346,6 +347,7 @@ enum ieee80211_bss_change { BSS_CHANGED_MCAST_RATE = 1<<25, BSS_CHANGED_FTM_RESPONDER = 1<<26, BSS_CHANGED_TWT = 1<<27, + BSS_CHANGED_HE_OBSS_PD = 1<<28, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -600,6 +602,8 @@ struct ieee80211_ftm_responder_params { * nontransmitted BSSIDs * @profile_periodicity: the least number of beacon frames need to be received * in order to discover all the nontransmitted BSSIDs in the set. + * @he_operation: HE operation information of the AP we are connected to + * @he_obss_pd: OBSS Packet Detection parameters. */ struct ieee80211_bss_conf { const u8 *bssid; @@ -661,6 +665,8 @@ struct ieee80211_bss_conf { u8 bssid_indicator; bool ema_ap; u8 profile_periodicity; + struct ieee80211_he_operation he_operation; + struct ieee80211_he_obss_pd he_obss_pd; }; /** @@ -1058,11 +1064,13 @@ struct ieee80211_tx_info { * @sta: Station that the packet was transmitted for * @info: Basic tx status information * @skb: Packet skb (can be NULL if not provided by the driver) + * @rate: The TX rate that was used when sending the packet */ struct ieee80211_tx_status { struct ieee80211_sta *sta; struct ieee80211_tx_info *info; struct sk_buff *skb; + struct rate_info *rate; }; /** @@ -1702,6 +1710,9 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif); * a TKIP key if it only requires MIC space. Do not set together with * @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key. * @IEEE80211_KEY_FLAG_NO_AUTO_TX: Key needs explicit Tx activation. + * @IEEE80211_KEY_FLAG_GENERATE_MMIE: This flag should be set by the driver + * for a AES_CMAC key to indicate that it requires sequence number + * generation only */ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0), @@ -1714,6 +1725,7 @@ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7), IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8), IEEE80211_KEY_FLAG_NO_AUTO_TX = BIT(9), + IEEE80211_KEY_FLAG_GENERATE_MMIE = BIT(10), }; /** @@ -2268,11 +2280,9 @@ struct ieee80211_txq { * @IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID: Hardware supports multi BSSID * only for HE APs. Applies if @IEEE80211_HW_SUPPORTS_MULTI_BSSID is set. * - * @IEEE80211_HW_EXT_KEY_ID_NATIVE: Driver and hardware are supporting Extended - * Key ID and can handle two unicast keys per station for Rx and Tx. - * - * @IEEE80211_HW_NO_AMPDU_KEYBORDER_SUPPORT: The card/driver can't handle - * active Tx A-MPDU sessions with Extended Key IDs during rekey. + * @IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT: The card and driver is only + * aggregating MPDUs with the same keyid, allowing mac80211 to keep Tx + * A-MPDU sessions active while rekeying with Extended Key ID. * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ @@ -2325,8 +2335,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_TX_STATUS_NO_AMPDU_LEN, IEEE80211_HW_SUPPORTS_MULTI_BSSID, IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID, - IEEE80211_HW_EXT_KEY_ID_NATIVE, - IEEE80211_HW_NO_AMPDU_KEYBORDER_SUPPORT, + IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -3914,7 +3923,8 @@ struct ieee80211_ops { struct ieee80211_channel *chan, int duration, enum ieee80211_roc_type type); - int (*cancel_remain_on_channel)(struct ieee80211_hw *hw); + int (*cancel_remain_on_channel)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx); void (*get_ringparam)(struct ieee80211_hw *hw, u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max); @@ -5945,7 +5955,6 @@ struct rate_control_ops { void (*add_sta_debugfs)(void *priv, void *priv_sta, struct dentry *dir); - void (*remove_sta_debugfs)(void *priv, void *priv_sta); u32 (*get_expected_throughput)(void *priv_sta); }; @@ -6234,11 +6243,37 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); * but for the duration of the frame handling. * However, also note that while in the wake_tx_queue() method, * rcu_read_lock() is already held. + * + * softirqs must also be disabled when this function is called. + * In process context, use ieee80211_tx_dequeue_ni() instead. */ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); /** + * ieee80211_tx_dequeue_ni - dequeue a packet from a software tx queue + * (in process context) + * + * Like ieee80211_tx_dequeue() but can be called in process context + * (internally disables bottom halves). + * + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @txq: pointer obtained from station or virtual interface, or from + * ieee80211_next_txq() + */ +static inline struct sk_buff *ieee80211_tx_dequeue_ni(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct sk_buff *skb; + + local_bh_disable(); + skb = ieee80211_tx_dequeue(hw, txq); + local_bh_enable(); + + return skb; +} + +/** * ieee80211_next_txq - get next tx queue to pull packets from * * @hw: pointer as obtained from ieee80211_alloc_hw() diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 366150053043..b2f715ca0567 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -40,6 +40,7 @@ enum { ND_OPT_RDNSS = 25, /* RFC5006 */ ND_OPT_DNSSL = 31, /* RFC6106 */ ND_OPT_6CO = 34, /* RFC6775 */ + ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */ __ND_OPT_MAX }; diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h index c757585a05b0..1cace4c69e44 100644 --- a/include/net/tc_act/tc_mirred.h +++ b/include/net/tc_act/tc_mirred.h @@ -32,6 +32,24 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a) return false; } +static inline bool is_tcf_mirred_ingress_redirect(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->id == TCA_ID_MIRRED) + return to_mirred(a)->tcfm_eaction == TCA_INGRESS_REDIR; +#endif + return false; +} + +static inline bool is_tcf_mirred_ingress_mirror(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->id == TCA_ID_MIRRED) + return to_mirred(a)->tcfm_eaction == TCA_INGRESS_MIRROR; +#endif + return false; +} + static inline struct net_device *tcf_mirred_dev(const struct tc_action *a) { return rtnl_dereference(to_mirred(a)->tcfm_dev); diff --git a/include/net/tc_act/tc_mpls.h b/include/net/tc_act/tc_mpls.h index 4bc3d9250ef0..721de4f5733a 100644 --- a/include/net/tc_act/tc_mpls.h +++ b/include/net/tc_act/tc_mpls.h @@ -27,4 +27,79 @@ struct tcf_mpls { }; #define to_mpls(a) ((struct tcf_mpls *)a) +static inline bool is_tcf_mpls(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->id == TCA_ID_MPLS) + return true; +#endif + return false; +} + +static inline u32 tcf_mpls_action(const struct tc_action *a) +{ + u32 tcfm_action; + + rcu_read_lock(); + tcfm_action = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_action; + rcu_read_unlock(); + + return tcfm_action; +} + +static inline __be16 tcf_mpls_proto(const struct tc_action *a) +{ + __be16 tcfm_proto; + + rcu_read_lock(); + tcfm_proto = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_proto; + rcu_read_unlock(); + + return tcfm_proto; +} + +static inline u32 tcf_mpls_label(const struct tc_action *a) +{ + u32 tcfm_label; + + rcu_read_lock(); + tcfm_label = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_label; + rcu_read_unlock(); + + return tcfm_label; +} + +static inline u8 tcf_mpls_tc(const struct tc_action *a) +{ + u8 tcfm_tc; + + rcu_read_lock(); + tcfm_tc = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_tc; + rcu_read_unlock(); + + return tcfm_tc; +} + +static inline u8 tcf_mpls_bos(const struct tc_action *a) +{ + u8 tcfm_bos; + + rcu_read_lock(); + tcfm_bos = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_bos; + rcu_read_unlock(); + + return tcfm_bos; +} + +static inline u8 tcf_mpls_ttl(const struct tc_action *a) +{ + u8 tcfm_ttl; + + rcu_read_lock(); + tcfm_ttl = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_ttl; + rcu_read_unlock(); + + return tcfm_ttl; +} + #endif /* __NET_TC_MPLS_H */ diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h index 4c04e2985508..b22a1f641f02 100644 --- a/include/net/tc_act/tc_skbedit.h +++ b/include/net/tc_act/tc_skbedit.h @@ -54,4 +54,31 @@ static inline u32 tcf_skbedit_mark(const struct tc_action *a) return mark; } +/* Return true iff action is ptype */ +static inline bool is_tcf_skbedit_ptype(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + u32 flags; + + if (a->ops && a->ops->id == TCA_ID_SKBEDIT) { + rcu_read_lock(); + flags = rcu_dereference(to_skbedit(a)->params)->flags; + rcu_read_unlock(); + return flags == SKBEDIT_F_PTYPE; + } +#endif + return false; +} + +static inline u32 tcf_skbedit_ptype(const struct tc_action *a) +{ + u16 ptype; + + rcu_read_lock(); + ptype = rcu_dereference(to_skbedit(a)->params)->ptype; + rcu_read_unlock(); + + return ptype; +} + #endif /* __NET_TC_SKBEDIT_H */ diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 773e476a8e54..1b3c2b643a02 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -237,6 +237,7 @@ struct br_mdb_entry { #define MDB_PERMANENT 1 __u8 state; #define MDB_FLAGS_OFFLOAD (1 << 0) +#define MDB_FLAGS_FAST_LEAVE (1 << 1) __u8 flags; __u16 vid; struct { diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index beb9a9d0c00a..822851d369ab 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -657,7 +657,9 @@ * is used during CSA period. * @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this * command may be used with the corresponding cookie to cancel the wait - * time if it is known that it is no longer necessary. + * time if it is known that it is no longer necessary. This command is + * also sent as an event whenever the driver has completed the off-channel + * wait time. * @NL80211_CMD_ACTION: Alias for @NL80211_CMD_FRAME for backward compatibility. * @NL80211_CMD_FRAME_TX_STATUS: Report TX status of a management frame * transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies @@ -2356,6 +2358,9 @@ enum nl80211_commands { * * @NL80211_ATTR_TWT_RESPONDER: Enable target wait time responder support. * + * @NL80211_ATTR_HE_OBSS_PD: nested attribute for OBSS Packet Detection + * functionality. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2813,6 +2818,8 @@ enum nl80211_attrs { NL80211_ATTR_TWT_RESPONDER, + NL80211_ATTR_HE_OBSS_PD, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -6488,4 +6495,26 @@ enum nl80211_peer_measurement_ftm_resp { NL80211_PMSR_FTM_RESP_ATTR_MAX = NUM_NL80211_PMSR_FTM_RESP_ATTR - 1 }; +/** + * enum nl80211_obss_pd_attributes - OBSS packet detection attributes + * @__NL80211_HE_OBSS_PD_ATTR_INVALID: Invalid + * + * @NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET: the OBSS PD minimum tx power offset. + * @NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET: the OBSS PD maximum tx power offset. + * + * @__NL80211_HE_OBSS_PD_ATTR_LAST: Internal + * @NL80211_HE_OBSS_PD_ATTR_MAX: highest OBSS PD attribute. + */ +enum nl80211_obss_pd_attributes { + __NL80211_HE_OBSS_PD_ATTR_INVALID, + + NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET, + NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET, + + /* keep last */ + __NL80211_HE_OBSS_PD_ATTR_LAST, + NL80211_HE_OBSS_PD_ATTR_MAX = __NL80211_HE_OBSS_PD_ATTR_LAST - 1, +}; + + #endif /* __LINUX_NL80211_H */ diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index a8cb6b2e20c1..4072e9d394d6 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -953,8 +953,8 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, if (copy > len) copy = len; vaddr = kmap_atomic(skb_frag_page(frag)); - sum = atalk_sum_partial(vaddr + frag->page_offset + - offset - start, copy, sum); + sum = atalk_sum_partial(vaddr + skb_frag_off(frag) + + offset - start, copy, sum); kunmap_atomic(vaddr); if (!(len -= copy)) diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index bf6acd34234d..428af1abf8cc 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -60,6 +60,8 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) e->flags = 0; if (flags & MDB_PG_FLAGS_OFFLOAD) e->flags |= MDB_FLAGS_OFFLOAD; + if (flags & MDB_PG_FLAGS_FAST_LEAVE) + e->flags |= MDB_FLAGS_FAST_LEAVE; } static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index f8cac3702712..9b379e110129 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1396,7 +1396,7 @@ br_multicast_leave_group(struct net_bridge *br, del_timer(&p->timer); kfree_rcu(p, rcu); br_mdb_notify(br->dev, port, group, RTM_DELMDB, - p->flags); + p->flags | MDB_PG_FLAGS_FAST_LEAVE); if (!mp->ports && !mp->host_joined && netif_running(br->dev)) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 646504db0220..b7a4942ff1b3 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -199,6 +199,7 @@ struct net_bridge_fdb_entry { #define MDB_PG_FLAGS_PERMANENT BIT(0) #define MDB_PG_FLAGS_OFFLOAD BIT(1) +#define MDB_PG_FLAGS_FAST_LEAVE BIT(2) struct net_bridge_port_group { struct net_bridge_port *port; diff --git a/net/can/Kconfig b/net/can/Kconfig index 0f9fe846ddef..d4319aa3e1b1 100644 --- a/net/can/Kconfig +++ b/net/can/Kconfig @@ -8,11 +8,12 @@ menuconfig CAN tristate "CAN bus subsystem support" ---help--- Controller Area Network (CAN) is a slow (up to 1Mbit/s) serial - communications protocol which was developed by Bosch in - 1991, mainly for automotive, but now widely used in marine - (NMEA2000), industrial, and medical applications. - More information on the CAN network protocol family PF_CAN - is contained in <Documentation/networking/can.rst>. + communications protocol. Development of the CAN bus started in + 1983 at Robert Bosch GmbH, and the protocol was officially + released in 1986. The CAN bus was originally mainly for automotive, + but is now widely used in marine (NMEA2000), industrial, and medical + applications. More information on the CAN network protocol family + PF_CAN is contained in <Documentation/networking/can.rst>. If you want CAN support you should say Y here and also to the specific driver for your controller(s) below. diff --git a/net/can/af_can.c b/net/can/af_can.c index 80281ef2ccbd..76cf83b2bd40 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * af_can.c - Protocol family CAN core module * (used by different CAN protocol modules) @@ -87,15 +88,6 @@ static atomic_t skbcounter = ATOMIC_INIT(0); * af_can socket functions */ -int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) -{ - switch (cmd) { - default: - return -ENOIOCTLCMD; - } -} -EXPORT_SYMBOL(can_ioctl); - static void can_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); diff --git a/net/can/af_can.h b/net/can/af_can.h index 9cb3719632bd..ef21f7c6bc80 100644 --- a/net/can/af_can.h +++ b/net/can/af_can.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * Copyright (c) 2002-2007 Volkswagen Group Electronic Research * All rights reserved. diff --git a/net/can/bcm.c b/net/can/bcm.c index a34ee52f19ea..bf1d0bbecec8 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content * @@ -1679,6 +1680,13 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, return size; } +int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, + unsigned long arg) +{ + /* no ioctls for socket layer -> hand it down to NIC layer */ + return -ENOIOCTLCMD; +} + static const struct proto_ops bcm_ops = { .family = PF_CAN, .release = bcm_release, @@ -1688,7 +1696,7 @@ static const struct proto_ops bcm_ops = { .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, - .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ + .ioctl = bcm_sock_no_ioctlcmd, .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/can/gw.c b/net/can/gw.c index 72711053ebe6..ce17f836262b 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* * gw.c - CAN frame Gateway/Router/Bridge with netlink interface * diff --git a/net/can/proc.c b/net/can/proc.c index 70fea17bb04c..edb822c31902 100644 --- a/net/can/proc.c +++ b/net/can/proc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* * proc.c - procfs support for Protocol family CAN core module * diff --git a/net/can/raw.c b/net/can/raw.c index afcbff063a67..da386f1fa815 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* * raw.c - Raw sockets for protocol family CAN * @@ -836,6 +837,13 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, return size; } +int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, + unsigned long arg) +{ + /* no ioctls for socket layer -> hand it down to NIC layer */ + return -ENOIOCTLCMD; +} + static const struct proto_ops raw_ops = { .family = PF_CAN, .release = raw_release, @@ -845,7 +853,7 @@ static const struct proto_ops raw_ops = { .accept = sock_no_accept, .getname = raw_getname, .poll = datagram_poll, - .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ + .ioctl = raw_sock_no_ioctlcmd, .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/core/datagram.c b/net/core/datagram.c index 45a162ef5e02..4cc8dc5db2b7 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -442,8 +442,8 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset, if (copy > len) copy = len; - n = cb(vaddr + frag->page_offset + - offset - start, copy, data, to); + n = cb(vaddr + skb_frag_off(frag) + offset - start, + copy, data, to); kunmap(page); offset += n; if (n != copy) @@ -573,7 +573,7 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, if (copy > len) copy = len; copied = copy_page_from_iter(skb_frag_page(frag), - frag->page_offset + offset - start, + skb_frag_off(frag) + offset - start, copy, from); if (copied != copy) goto fault; diff --git a/net/core/dev.c b/net/core/dev.c index 0891f499c1bb..af071b0ce88e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5486,7 +5486,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow) skb->data_len -= grow; skb->tail += grow; - pinfo->frags[0].page_offset += grow; + skb_frag_off_add(&pinfo->frags[0], grow); skb_frag_size_sub(&pinfo->frags[0], grow); if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 4ea4347f5062..4deb86f990f1 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -43,10 +43,16 @@ * netlink alerts */ static int trace_state = TRACE_OFF; -static DEFINE_MUTEX(trace_state_mutex); + +/* net_dm_mutex + * + * An overall lock guarding every operation coming from userspace. + * It also guards the global 'hw_stats_list' list. + */ +static DEFINE_MUTEX(net_dm_mutex); struct per_cpu_dm_data { - spinlock_t lock; + spinlock_t lock; /* Protects 'skb' and 'send_timer' */ struct sk_buff *skb; struct work_struct dm_alert_work; struct timer_list send_timer; @@ -235,22 +241,21 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi, rcu_read_unlock(); } -static int set_all_monitor_traces(int state) +static int set_all_monitor_traces(int state, struct netlink_ext_ack *extack) { int rc = 0; struct dm_hw_stat_delta *new_stat = NULL; struct dm_hw_stat_delta *temp; - mutex_lock(&trace_state_mutex); - if (state == trace_state) { - rc = -EAGAIN; - goto out_unlock; + NL_SET_ERR_MSG_MOD(extack, "Trace state already set to requested state"); + return -EAGAIN; } switch (state) { case TRACE_ON: if (!try_module_get(THIS_MODULE)) { + NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module"); rc = -ENODEV; break; } @@ -288,17 +293,15 @@ static int set_all_monitor_traces(int state) else rc = -EINPROGRESS; -out_unlock: - mutex_unlock(&trace_state_mutex); - return rc; } - static int net_dm_cmd_config(struct sk_buff *skb, struct genl_info *info) { - return -ENOTSUPP; + NL_SET_ERR_MSG_MOD(info->extack, "Command not supported"); + + return -EOPNOTSUPP; } static int net_dm_cmd_trace(struct sk_buff *skb, @@ -306,12 +309,12 @@ static int net_dm_cmd_trace(struct sk_buff *skb, { switch (info->genlhdr->cmd) { case NET_DM_CMD_START: - return set_all_monitor_traces(TRACE_ON); + return set_all_monitor_traces(TRACE_ON, info->extack); case NET_DM_CMD_STOP: - return set_all_monitor_traces(TRACE_OFF); + return set_all_monitor_traces(TRACE_OFF, info->extack); } - return -ENOTSUPP; + return -EOPNOTSUPP; } static int dropmon_net_event(struct notifier_block *ev_block, @@ -330,12 +333,12 @@ static int dropmon_net_event(struct notifier_block *ev_block, new_stat->dev = dev; new_stat->last_rx = jiffies; - mutex_lock(&trace_state_mutex); + mutex_lock(&net_dm_mutex); list_add_rcu(&new_stat->list, &hw_stats_list); - mutex_unlock(&trace_state_mutex); + mutex_unlock(&net_dm_mutex); break; case NETDEV_UNREGISTER: - mutex_lock(&trace_state_mutex); + mutex_lock(&net_dm_mutex); list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { if (new_stat->dev == dev) { new_stat->dev = NULL; @@ -346,7 +349,7 @@ static int dropmon_net_event(struct notifier_block *ev_block, } } } - mutex_unlock(&trace_state_mutex); + mutex_unlock(&net_dm_mutex); break; } out: @@ -371,10 +374,26 @@ static const struct genl_ops dropmon_ops[] = { }, }; +static int net_dm_nl_pre_doit(const struct genl_ops *ops, + struct sk_buff *skb, struct genl_info *info) +{ + mutex_lock(&net_dm_mutex); + + return 0; +} + +static void net_dm_nl_post_doit(const struct genl_ops *ops, + struct sk_buff *skb, struct genl_info *info) +{ + mutex_unlock(&net_dm_mutex); +} + static struct genl_family net_drop_monitor_family __ro_after_init = { .hdrsize = 0, .name = "NET_DM", .version = 2, + .pre_doit = net_dm_nl_pre_doit, + .post_doit = net_dm_nl_post_doit, .module = THIS_MODULE, .ops = dropmon_ops, .n_ops = ARRAY_SIZE(dropmon_ops), @@ -421,7 +440,6 @@ static int __init init_net_drop_monitor(void) reset_per_cpu_data(data); } - goto out; out_unreg: diff --git a/net/core/neighbour.c b/net/core/neighbour.c index f79e61c570ea..5480edff0c86 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3033,7 +3033,7 @@ static struct neighbour *neigh_get_first(struct seq_file *seq) struct net *net = seq_file_net(seq); struct neigh_hash_table *nht = state->nht; struct neighbour *n = NULL; - int bucket = state->bucket; + int bucket; state->flags &= ~NEIGH_SEQ_IS_PNEIGH; for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) { diff --git a/net/core/pktgen.c b/net/core/pktgen.c index bb9915291644..c5dbdc87342a 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2652,7 +2652,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, } get_page(pkt_dev->page); skb_frag_set_page(skb, i, pkt_dev->page); - skb_shinfo(skb)->frags[i].page_offset = 0; + skb_frag_off_set(&skb_shinfo(skb)->frags[i], 0); /*last fragment, fill rest of data*/ if (i == (frags - 1)) skb_frag_size_set(&skb_shinfo(skb)->frags[i], diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0338820ee0ec..ea8e8d332d85 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -785,7 +785,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) struct page *p; u8 *vaddr; - skb_frag_foreach_page(frag, frag->page_offset, + skb_frag_foreach_page(frag, skb_frag_off(frag), skb_frag_size(frag), p, p_off, p_len, copied) { seg_len = min_t(int, p_len, len); @@ -1375,7 +1375,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) struct page *p; u8 *vaddr; - skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f), + skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), p, p_off, p_len, copied) { u32 copy, done = 0; vaddr = kmap_atomic(p); @@ -2144,10 +2144,12 @@ pull_pages: skb_frag_unref(skb, i); eat -= size; } else { - skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; + + *frag = skb_shinfo(skb)->frags[i]; if (eat) { - skb_shinfo(skb)->frags[k].page_offset += eat; - skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); + skb_frag_off_add(frag, eat); + skb_frag_size_sub(frag, eat); if (!i) goto end; eat = 0; @@ -2219,7 +2221,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) copy = len; skb_frag_foreach_page(f, - f->page_offset + offset - start, + skb_frag_off(f) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); memcpy(to + copied, vaddr + p_off, p_len); @@ -2395,7 +2397,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; if (__splice_segment(skb_frag_page(f), - f->page_offset, skb_frag_size(f), + skb_frag_off(f), skb_frag_size(f), offset, len, spd, false, sk, pipe)) return true; } @@ -2485,20 +2487,20 @@ do_frag_list: for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; - if (offset < frag->size) + if (offset < skb_frag_size(frag)) break; - offset -= frag->size; + offset -= skb_frag_size(frag); } for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; - slen = min_t(size_t, len, frag->size - offset); + slen = min_t(size_t, len, skb_frag_size(frag) - offset); while (slen) { - ret = kernel_sendpage_locked(sk, frag->page.p, - frag->page_offset + offset, + ret = kernel_sendpage_locked(sk, skb_frag_page(frag), + skb_frag_off(frag) + offset, slen, MSG_DONTWAIT); if (ret <= 0) goto error; @@ -2580,7 +2582,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) copy = len; skb_frag_foreach_page(frag, - frag->page_offset + offset - start, + skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); memcpy(vaddr + p_off, from + copied, p_len); @@ -2660,7 +2662,7 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, copy = len; skb_frag_foreach_page(frag, - frag->page_offset + offset - start, + skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); csum2 = INDIRECT_CALL_1(ops->update, @@ -2759,7 +2761,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, copy = len; skb_frag_foreach_page(frag, - frag->page_offset + offset - start, + skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); csum2 = csum_partial_copy_nocheck(vaddr + p_off, @@ -2975,11 +2977,15 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) skb_zerocopy_clone(to, from, GFP_ATOMIC); for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { + int size; + if (!len) break; skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; - skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); - len -= skb_shinfo(to)->frags[j].size; + size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), + len); + skb_frag_size_set(&skb_shinfo(to)->frags[j], size); + len -= size; skb_frag_ref(to, j); j++; } @@ -3230,7 +3236,7 @@ static inline void skb_split_no_header(struct sk_buff *skb, * 2. Split is accurately. We make this. */ skb_frag_ref(skb, i); - skb_shinfo(skb1)->frags[0].page_offset += len - pos; + skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); skb_shinfo(skb)->nr_frags++; @@ -3293,7 +3299,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb) int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) { int from, to, merge, todo; - struct skb_frag_struct *fragfrom, *fragto; + skb_frag_t *fragfrom, *fragto; BUG_ON(shiftlen > skb->len); @@ -3312,7 +3318,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) */ if (!to || !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), - fragfrom->page_offset)) { + skb_frag_off(fragfrom))) { merge = -1; } else { merge = to - 1; @@ -3329,7 +3335,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) skb_frag_size_add(fragto, shiftlen); skb_frag_size_sub(fragfrom, shiftlen); - fragfrom->page_offset += shiftlen; + skb_frag_off_add(fragfrom, shiftlen); goto onlymerged; } @@ -3360,11 +3366,11 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) } else { __skb_frag_ref(fragfrom); - fragto->page = fragfrom->page; - fragto->page_offset = fragfrom->page_offset; + skb_frag_page_copy(fragto, fragfrom); + skb_frag_off_copy(fragto, fragfrom); skb_frag_size_set(fragto, todo); - fragfrom->page_offset += todo; + skb_frag_off_add(fragfrom, todo); skb_frag_size_sub(fragfrom, todo); todo = 0; @@ -3489,7 +3495,7 @@ next_skb: if (!st->frag_data) st->frag_data = kmap_atomic(skb_frag_page(frag)); - *data = (u8 *) st->frag_data + frag->page_offset + + *data = (u8 *) st->frag_data + skb_frag_off(frag) + (abs_offset - st->stepped_offset); return block_limit - abs_offset; @@ -3625,10 +3631,10 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) struct page *page; page = virt_to_head_page(frag_skb->head); - head_frag.page.p = page; - head_frag.page_offset = frag_skb->data - - (unsigned char *)page_address(page); - head_frag.size = skb_headlen(frag_skb); + __skb_frag_set_page(&head_frag, page); + skb_frag_off_set(&head_frag, frag_skb->data - + (unsigned char *)page_address(page)); + skb_frag_size_set(&head_frag, skb_headlen(frag_skb)); return head_frag; } @@ -3871,7 +3877,7 @@ normal: size = skb_frag_size(nskb_frag); if (pos < offset) { - nskb_frag->page_offset += offset - pos; + skb_frag_off_add(nskb_frag, offset - pos); skb_frag_size_sub(nskb_frag, offset - pos); } @@ -3992,7 +3998,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) *--frag = *--frag2; } while (--i); - frag->page_offset += offset; + skb_frag_off_add(frag, offset); skb_frag_size_sub(frag, offset); /* all fragments truesize : remove (head size + sk_buff) */ @@ -4021,8 +4027,8 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; - frag->page.p = page; - frag->page_offset = first_offset; + __skb_frag_set_page(frag, page); + skb_frag_off_set(frag, first_offset); skb_frag_size_set(frag, first_size); memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); @@ -4038,7 +4044,7 @@ merge: if (offset > headlen) { unsigned int eat = offset - headlen; - skbinfo->frags[0].page_offset += eat; + skb_frag_off_add(&skbinfo->frags[0], eat); skb_frag_size_sub(&skbinfo->frags[0], eat); skb->data_len -= eat; skb->len -= eat; @@ -4163,7 +4169,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, if (copy > len) copy = len; sg_set_page(&sg[elt], skb_frag_page(frag), copy, - frag->page_offset+offset-start); + skb_frag_off(frag) + offset - start); elt++; if (!(len -= copy)) return elt; @@ -5834,7 +5840,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, * where splitting is expensive. * 2. Split is accurately. We make this. */ - shinfo->frags[0].page_offset += off - pos; + skb_frag_off_add(&shinfo->frags[0], off - pos); skb_frag_size_sub(&shinfo->frags[0], off - pos); } skb_frag_ref(skb, i); diff --git a/net/core/tso.c b/net/core/tso.c index 43f4eba61933..d4d5c077ad72 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -55,8 +55,8 @@ void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; /* Move to next segment */ - tso->size = frag->size; - tso->data = page_address(frag->page.p) + frag->page_offset; + tso->size = skb_frag_size(frag); + tso->data = skb_frag_address(frag); tso->next_frag_idx++; } } @@ -79,8 +79,8 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso) skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; /* Move to next segment */ - tso->size = frag->size; - tso->data = page_address(frag->page.p) + frag->page_offset; + tso->size = skb_frag_size(frag); + tso->data = skb_frag_address(frag); tso->next_frag_idx++; } } diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 6e942dda1bcd..2f69d4b53d46 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -84,13 +84,6 @@ config NET_DSA_TAG_KSZ Say Y if you want to enable support for tagging frames for the Microchip 9893 family of switches. -config NET_DSA_TAG_KSZ9477 - tristate "Tag driver for Microchip 9477 family of switches" - select NET_DSA_TAG_KSZ_COMMON - help - Say Y if you want to enable support for tagging frames for the - Microchip 9477 family of switches. - config NET_DSA_TAG_QCA tristate "Tag driver for Qualcomm Atheros QCA8K switches" help diff --git a/net/dsa/master.c b/net/dsa/master.c index 4b52f8bac5e1..a8e52c9967f4 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -8,6 +8,70 @@ #include "dsa_priv.h" +static int dsa_master_get_regs_len(struct net_device *dev) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; + struct dsa_switch *ds = cpu_dp->ds; + int port = cpu_dp->index; + int ret = 0; + int len; + + if (ops->get_regs_len) { + len = ops->get_regs_len(dev); + if (len < 0) + return len; + ret += len; + } + + ret += sizeof(struct ethtool_drvinfo); + ret += sizeof(struct ethtool_regs); + + if (ds->ops->get_regs_len) { + len = ds->ops->get_regs_len(ds, port); + if (len < 0) + return len; + ret += len; + } + + return ret; +} + +static void dsa_master_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *data) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; + struct dsa_switch *ds = cpu_dp->ds; + struct ethtool_drvinfo *cpu_info; + struct ethtool_regs *cpu_regs; + int port = cpu_dp->index; + int len; + + if (ops->get_regs_len && ops->get_regs) { + len = ops->get_regs_len(dev); + if (len < 0) + return; + regs->len = len; + ops->get_regs(dev, regs, data); + data += regs->len; + } + + cpu_info = (struct ethtool_drvinfo *)data; + strlcpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver)); + data += sizeof(*cpu_info); + cpu_regs = (struct ethtool_regs *)data; + data += sizeof(*cpu_regs); + + if (ds->ops->get_regs_len && ds->ops->get_regs) { + len = ds->ops->get_regs_len(ds, port); + if (len < 0) + return; + cpu_regs->len = len; + ds->ops->get_regs(ds, port, cpu_regs, data); + } +} + static void dsa_master_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, uint64_t *data) @@ -147,6 +211,8 @@ static int dsa_master_ethtool_setup(struct net_device *dev) if (cpu_dp->orig_ethtool_ops) memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops)); + ops->get_regs_len = dsa_master_get_regs_len; + ops->get_regs = dsa_master_get_regs; ops->get_sset_count = dsa_master_get_sset_count; ops->get_ethtool_stats = dsa_master_get_ethtool_stats; ops->get_strings = dsa_master_get_strings; diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index b4872b87d4a6..73605bcbb385 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -70,6 +70,67 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb, } /* + * For Ingress (Host -> KSZ8795), 1 byte is added before FCS. + * --------------------------------------------------------------------------- + * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag(1byte)|FCS(4bytes) + * --------------------------------------------------------------------------- + * tag : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5) + * + * For Egress (KSZ8795 -> Host), 1 byte is added before FCS. + * --------------------------------------------------------------------------- + * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes) + * --------------------------------------------------------------------------- + * tag0 : zero-based value represents port + * (eg, 0x00=port1, 0x02=port3, 0x06=port7) + */ + +#define KSZ8795_INGRESS_TAG_LEN 1 + +#define KSZ8795_TAIL_TAG_OVERRIDE BIT(6) +#define KSZ8795_TAIL_TAG_LOOKUP BIT(7) + +static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct sk_buff *nskb; + u8 *tag; + u8 *addr; + + nskb = ksz_common_xmit(skb, dev, KSZ8795_INGRESS_TAG_LEN); + if (!nskb) + return NULL; + + /* Tag encoding */ + tag = skb_put(nskb, KSZ8795_INGRESS_TAG_LEN); + addr = skb_mac_header(nskb); + + *tag = 1 << dp->index; + if (is_link_local_ether_addr(addr)) + *tag |= KSZ8795_TAIL_TAG_OVERRIDE; + + return nskb; +} + +static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt) +{ + u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; + + return ksz_common_rcv(skb, dev, tag[0] & 7, KSZ_EGRESS_TAG_LEN); +} + +static const struct dsa_device_ops ksz8795_netdev_ops = { + .name = "ksz8795", + .proto = DSA_TAG_PROTO_KSZ8795, + .xmit = ksz8795_xmit, + .rcv = ksz8795_rcv, + .overhead = KSZ8795_INGRESS_TAG_LEN, +}; + +DSA_TAG_DRIVER(ksz8795_netdev_ops); +MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795); + +/* * For Ingress (Host -> KSZ9477), 2 bytes are added before FCS. * --------------------------------------------------------------------------- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes) @@ -183,6 +244,7 @@ DSA_TAG_DRIVER(ksz9893_netdev_ops); MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9893); static struct dsa_tag_driver *dsa_tag_driver_array[] = { + &DSA_TAG_DRIVER_NAME(ksz8795_netdev_ops), &DSA_TAG_DRIVER_NAME(ksz9477_netdev_ops), &DSA_TAG_DRIVER_NAME(ksz9893_netdev_ops), }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 776905899ac0..a0a66321c0ee 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1776,19 +1776,21 @@ static int tcp_zerocopy_receive(struct sock *sk, break; frags = skb_shinfo(skb)->frags; while (offset) { - if (frags->size > offset) + if (skb_frag_size(frags) > offset) goto out; - offset -= frags->size; + offset -= skb_frag_size(frags); frags++; } } - if (frags->size != PAGE_SIZE || frags->page_offset) { + if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) { int remaining = zc->recv_skip_hint; + int size = skb_frag_size(frags); - while (remaining && (frags->size != PAGE_SIZE || - frags->page_offset)) { - remaining -= frags->size; + while (remaining && (size != PAGE_SIZE || + skb_frag_off(frags))) { + remaining -= size; frags++; + size = skb_frag_size(frags); } zc->recv_skip_hint -= remaining; break; @@ -3781,8 +3783,8 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, return 1; for (i = 0; i < shi->nr_frags; ++i) { - const struct skb_frag_struct *f = &shi->frags[i]; - unsigned int offset = f->page_offset; + const skb_frag_t *f = &shi->frags[i]; + unsigned int offset = skb_frag_off(f); struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); sg_set_page(&sg, page, skb_frag_size(f), diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6e4afc48d7bb..e6d02e05bb1c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1402,7 +1402,7 @@ static int __pskb_trim_head(struct sk_buff *skb, int len) } else { shinfo->frags[k] = shinfo->frags[i]; if (eat) { - shinfo->frags[k].page_offset += eat; + skb_frag_off_add(&shinfo->frags[k], eat); skb_frag_size_sub(&shinfo->frags[k], eat); eat = 0; } diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index b358f1a4dd08..da46c4284676 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c @@ -197,10 +197,8 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, struct ipv6hdr _ip6, *ip6; ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); - if (!ip6 || (ip6->version != 6)) { - printk(KERN_ERR "IPv6 header not found\n"); + if (!ip6 || (ip6->version != 6)) return -EBADMSG; - } start = *offset + sizeof(struct ipv6hdr); nexthdr = ip6->nexthdr; } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 083cc1c94cd3..53caf59c591e 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -196,6 +196,7 @@ static inline int ndisc_is_useropt(const struct net_device *dev, { return opt->nd_opt_type == ND_OPT_RDNSS || opt->nd_opt_type == ND_OPT_DNSSL || + opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL || ndisc_ops_is_useropt(dev, opt->nd_opt_type); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index fd059e08785a..7a5d331cdefa 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2725,10 +2725,9 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, rcu_read_lock(); res.f6i = rcu_dereference(rt6->from); - if (!res.f6i) { - rcu_read_unlock(); - return; - } + if (!res.f6i) + goto out_unlock; + res.fib6_flags = res.f6i->fib6_flags; res.fib6_type = res.f6i->fib6_type; @@ -2744,10 +2743,8 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, /* fib6_info uses a nexthop that does not have fib6_nh * using the dst->dev + gw. Should be impossible. */ - if (!arg.match) { - rcu_read_unlock(); - return; - } + if (!arg.match) + goto out_unlock; res.nh = arg.match; } else { @@ -2760,6 +2757,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, if (rt6_insert_exception(nrt6, &res)) dst_release_immediate(&nrt6->dst); } +out_unlock: rcu_read_unlock(); } } diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 5dbc0c48f8cb..4ff75c3a8d6e 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -635,15 +635,15 @@ do_frag_list: frag_offset = 0; do_frag: frag = &skb_shinfo(skb)->frags[fragidx]; - if (WARN_ON(!frag->size)) { + if (WARN_ON(!skb_frag_size(frag))) { ret = -EINVAL; goto out; } ret = kernel_sendpage(psock->sk->sk_socket, - frag->page.p, - frag->page_offset + frag_offset, - frag->size - frag_offset, + skb_frag_page(frag), + skb_frag_off(frag) + frag_offset, + skb_frag_size(frag) - frag_offset, MSG_DONTWAIT); if (ret <= 0) { if (ret == -EAGAIN) { @@ -678,7 +678,7 @@ do_frag: sent += ret; frag_offset += ret; KCM_STATS_ADD(psock->stats.tx_bytes, ret); - if (frag_offset < frag->size) { + if (frag_offset < skb_frag_size(frag)) { /* Not finished with this frag */ goto do_frag; } diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 01b0dad24500..4d1c335e06e5 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -178,17 +178,54 @@ static void sta_rx_agg_reorder_timer_expired(struct timer_list *t) rcu_read_unlock(); } -static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, +static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + const struct ieee80211_addba_ext_ie *req) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_addba_ext_ie *resp; + const struct ieee80211_sta_he_cap *he_cap; + u8 frag_level, cap_frag_level; + u8 *pos; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return; + he_cap = ieee80211_get_he_iftype_cap(sband, sdata->vif.type); + if (!he_cap) + return; + + pos = skb_put_zero(skb, 2 + sizeof(struct ieee80211_addba_ext_ie)); + *pos++ = WLAN_EID_ADDBA_EXT; + *pos++ = sizeof(struct ieee80211_addba_ext_ie); + resp = (struct ieee80211_addba_ext_ie *)pos; + resp->data = req->data & IEEE80211_ADDBA_EXT_NO_FRAG; + + frag_level = u32_get_bits(req->data, + IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK); + cap_frag_level = u32_get_bits(he_cap->he_cap_elem.mac_cap_info[0], + IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK); + if (frag_level > cap_frag_level) + frag_level = cap_frag_level; + resp->data |= u8_encode_bits(frag_level, + IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK); +} + +static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid, u8 dialog_token, u16 status, u16 policy, - u16 buf_size, u16 timeout) + u16 buf_size, u16 timeout, + const struct ieee80211_addba_ext_ie *addbaext) { + struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; bool amsdu = ieee80211_hw_check(&local->hw, SUPPORTS_AMSDU_IN_AMPDU); u16 capab; - skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); + skb = dev_alloc_skb(sizeof(*mgmt) + + 2 + sizeof(struct ieee80211_addba_ext_ie) + + local->hw.extra_tx_headroom); if (!skb) return; @@ -222,13 +259,17 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); + if (sta->sta.he_cap.has_he && addbaext) + ieee80211_add_addbaext(sdata, skb, addbaext); + ieee80211_tx_skb(sdata, skb); } void ___ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, - u16 buf_size, bool tx, bool auto_seq) + u16 buf_size, bool tx, bool auto_seq, + const struct ieee80211_addba_ext_ie *addbaext) { struct ieee80211_local *local = sta->sdata->local; struct tid_ampdu_rx *tid_agg_rx; @@ -410,21 +451,22 @@ end: } if (tx) - ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, + ieee80211_send_addba_resp(sta, sta->sta.addr, tid, dialog_token, status, 1, buf_size, - timeout); + timeout, addbaext); } static void __ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, u16 buf_size, bool tx, - bool auto_seq) + bool auto_seq, + const struct ieee80211_addba_ext_ie *addbaext) { mutex_lock(&sta->ampdu_mlme.mtx); ___ieee80211_start_rx_ba_session(sta, dialog_token, timeout, start_seq_num, ba_policy, tid, - buf_size, tx, auto_seq); + buf_size, tx, auto_seq, addbaext); mutex_unlock(&sta->ampdu_mlme.mtx); } @@ -434,7 +476,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, size_t len) { u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num; + struct ieee802_11_elems elems = { 0 }; u8 dialog_token; + int ies_len; /* extract session parameters from addba request frame */ dialog_token = mgmt->u.action.u.addba_req.dialog_token; @@ -447,9 +491,19 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; + ies_len = len - offsetof(struct ieee80211_mgmt, + u.action.u.addba_req.variable); + if (ies_len) { + ieee802_11_parse_elems(mgmt->u.action.u.addba_req.variable, + ies_len, true, &elems, mgmt->bssid, NULL); + if (elems.parse_error) + return; + } + __ieee80211_start_rx_ba_session(sta, dialog_token, timeout, start_seq_num, ba_policy, tid, - buf_size, true, false); + buf_size, true, false, + elems.addba_ext_ie); } void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 4d458067d80d..ed56b0c6fe19 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -980,7 +980,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS | BSS_CHANGED_TXPOWER | - BSS_CHANGED_TWT; + BSS_CHANGED_TWT | + BSS_CHANGED_HE_OBSS_PD; int err; int prev_beacon_int; @@ -1051,6 +1052,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, sdata->vif.bss_conf.enable_beacon = true; sdata->vif.bss_conf.allow_p2p_go_ps = sdata->vif.p2p; sdata->vif.bss_conf.twt_responder = params->twt_responder; + memcpy(&sdata->vif.bss_conf.he_obss_pd, ¶ms->he_obss_pd, + sizeof(struct ieee80211_he_obss_pd)); sdata->vif.bss_conf.ssid_len = params->ssid_len; if (params->ssid_len) @@ -1543,7 +1546,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, if (ether_addr_equal(mac, sdata->vif.addr)) return -EINVAL; - if (is_multicast_ether_addr(mac)) + if (!is_valid_ether_addr(mac)) return -EINVAL; sta = sta_info_alloc(sdata, mac, GFP_KERNEL); diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 2e7f75938c51..568b3b276931 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -271,8 +271,7 @@ static const char *hw_flag_names[] = { FLAG(TX_STATUS_NO_AMPDU_LEN), FLAG(SUPPORTS_MULTI_BSSID), FLAG(SUPPORTS_ONLY_HE_MULTI_BSSID), - FLAG(EXT_KEY_ID_NATIVE), - FLAG(NO_AMPDU_KEYBORDER_SUPPORT), + FLAG(AMPDU_KEYBORDER_SUPPORT), #undef FLAG }; diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index c2d8b5451a5e..2c9b3eb8b652 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -692,14 +692,16 @@ static inline int drv_remain_on_channel(struct ieee80211_local *local, return ret; } -static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local) +static inline int +drv_cancel_remain_on_channel(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) { int ret; might_sleep(); - trace_drv_cancel_remain_on_channel(local); - ret = local->ops->cancel_remain_on_channel(&local->hw); + trace_drv_cancel_remain_on_channel(local, sdata); + ret = local->ops->cancel_remain_on_channel(&local->hw, &sdata->vif); trace_drv_return_int(local, ret); return ret; diff --git a/net/mac80211/he.c b/net/mac80211/he.c index 219650591c79..a02abfc424aa 100644 --- a/net/mac80211/he.c +++ b/net/mac80211/he.c @@ -50,3 +50,42 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, he_cap->has_he = true; } + +void +ieee80211_he_op_ie_to_bss_conf(struct ieee80211_vif *vif, + const struct ieee80211_he_operation *he_op_ie_elem) +{ + struct ieee80211_he_operation *he_operation = + &vif->bss_conf.he_operation; + + if (!he_op_ie_elem) { + memset(he_operation, 0, sizeof(*he_operation)); + return; + } + + vif->bss_conf.he_operation = *he_op_ie_elem; +} + +void +ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif, + const struct ieee80211_he_spr *he_spr_ie_elem) +{ + struct ieee80211_he_obss_pd *he_obss_pd = + &vif->bss_conf.he_obss_pd; + const u8 *data = he_spr_ie_elem->optional; + + memset(he_obss_pd, 0, sizeof(*he_obss_pd)); + + if (!he_spr_ie_elem) + return; + + if (he_spr_ie_elem->he_sr_control & + IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) + data++; + if (he_spr_ie_elem->he_sr_control & + IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) { + he_obss_pd->max_offset = *data++; + he_obss_pd->min_offset = *data++; + he_obss_pd->enable = true; + } +} diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index d5a500b2a448..a2e4d6b8fd98 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -359,7 +359,7 @@ void ieee80211_ba_session_work(struct work_struct *work) sta->ampdu_mlme.tid_rx_manage_offl)) ___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid, IEEE80211_MAX_AMPDU_BUF_HT, - false, true); + false, true, NULL); if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS, sta->ampdu_mlme.tid_rx_manage_offl)) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 004e2e3adb88..791ce58d0f09 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1480,6 +1480,7 @@ struct ieee802_11_elems { const struct ieee80211_meshconf_ie *mesh_config; const u8 *he_cap; const struct ieee80211_he_operation *he_operation; + const struct ieee80211_he_spr *he_spr; const struct ieee80211_mu_edca_param_set *mu_edca_param_set; const u8 *uora_element; const u8 *mesh_id; @@ -1506,6 +1507,7 @@ struct ieee802_11_elems { u8 max_bssid_indicator; u8 dtim_count; u8 dtim_period; + const struct ieee80211_addba_ext_ie *addba_ext_ie; /* length of them, respectively */ u8 ext_capab_len; @@ -1767,7 +1769,8 @@ ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 info_flags); void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_supported_band *sband, - int retry_count, int shift, bool send_to_cooked); + int retry_count, int shift, bool send_to_cooked, + struct ieee80211_tx_status *status); void ieee80211_check_fast_xmit(struct sta_info *sta); void ieee80211_check_fast_xmit_all(struct ieee80211_local *local); @@ -1804,7 +1807,8 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, void ___ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, - u16 buf_size, bool tx, bool auto_seq); + u16 buf_size, bool tx, bool auto_seq, + const struct ieee80211_addba_ext_ie *addbaext); void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, enum ieee80211_agg_stop_reason reason); void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, @@ -1869,6 +1873,13 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const u8 *he_cap_ie, u8 he_cap_len, struct sta_info *sta); +void +ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif, + const struct ieee80211_he_spr *he_spr_ie_elem); + +void +ieee80211_he_op_ie_to_bss_conf(struct ieee80211_vif *vif, + const struct ieee80211_he_operation *he_op_ie_elem); /* Spectrum management */ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, @@ -2133,9 +2144,11 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, u32 cap); u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, const struct cfg80211_chan_def *chandef); +u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype); u8 *ieee80211_ie_build_he_cap(u8 *pos, const struct ieee80211_sta_he_cap *he_cap, u8 *end); +u8 *ieee80211_ie_build_he_oper(u8 *pos); int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef, const struct ieee80211_supported_band *sband, const u8 *srates, int srates_len, u32 *rates); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index dd60f6428049..7dfee848abac 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -270,7 +270,7 @@ int ieee80211_set_tx_key(struct ieee80211_key *key) sta->ptk_idx = key->conf.keyidx; - if (ieee80211_hw_check(&local->hw, NO_AMPDU_KEYBORDER_SUPPORT)) + if (!ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) clear_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_check_fast_xmit(sta); @@ -290,15 +290,15 @@ static void ieee80211_pairwise_rekey(struct ieee80211_key *old, /* Extended Key ID key install, initial one or rekey */ if (sta->ptk_idx != INVALID_PTK_KEYIDX && - ieee80211_hw_check(&local->hw, - NO_AMPDU_KEYBORDER_SUPPORT)) { + !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) { /* Aggregation Sessions with Extended Key ID must not * mix MPDUs with different keyIDs within one A-MPDU. - * Tear down any running Tx aggregation and all new - * Rx/Tx aggregation request during rekey if the driver - * asks us to do so. (Blocking Tx only would be - * sufficient but WLAN_STA_BLOCK_BA gets the job done - * for the few ms we need it.) + * Tear down running Tx aggregation sessions and block + * new Rx/Tx aggregation requests during rekey to + * ensure there are no A-MPDUs when the driver is not + * supporting A-MPDU key borders. (Blocking Tx only + * would be sufficient but WLAN_STA_BLOCK_BA gets the + * job done for the few ms we need it.) */ set_sta_flag(sta, WLAN_STA_BLOCK_BA); mutex_lock(&sta->ampdu_mlme.mtx); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 4c2702f128f3..29b9d57df1a3 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1048,21 +1048,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) } } - /* Enable Extended Key IDs when driver allowed it, or when it - * supports neither HW crypto nor A-MPDUs + /* Mac80211 and therefore all drivers using SW crypto only + * are able to handle PTK rekeys and Extended Key ID. */ - if ((!local->ops->set_key && - !ieee80211_hw_check(hw, AMPDU_AGGREGATION)) || - ieee80211_hw_check(&local->hw, EXT_KEY_ID_NATIVE)) - wiphy_ext_feature_set(local->hw.wiphy, - NL80211_EXT_FEATURE_EXT_KEY_ID); - - /* Mac80211 and therefore all cards only using SW crypto are able to - * handle PTK rekeys correctly - */ - if (!local->ops->set_key) + if (!local->ops->set_key) { wiphy_ext_feature_set(local->hw.wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); + wiphy_ext_feature_set(local->hw.wiphy, + NL80211_EXT_FEATURE_EXT_KEY_ID); + } /* * Calculate scan IE length -- we need this to alloc diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 2e7fa743c892..d09b3c789314 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -532,6 +532,61 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata, return 0; } +int mesh_add_he_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u8 ie_len) +{ + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_supported_band *sband; + u8 *pos; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT); + + if (!he_cap || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + if (skb_tailroom(skb) < ie_len) + return -ENOMEM; + + pos = skb_put(skb, ie_len); + ieee80211_ie_build_he_cap(pos, he_cap, pos + ie_len); + + return 0; +} + +int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_supported_band *sband; + u8 *pos; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT); + if (!he_cap || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + if (skb_tailroom(skb) < 2 + 1 + sizeof(struct ieee80211_he_operation)) + return -ENOMEM; + + pos = skb_put(skb, 2 + 1 + sizeof(struct ieee80211_he_operation)); + ieee80211_ie_build_he_oper(pos); + + return 0; +} + static void ieee80211_mesh_path_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = @@ -677,6 +732,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) struct ieee80211_chanctx_conf *chanctx_conf; struct mesh_csa_settings *csa; enum nl80211_band band; + u8 ie_len_he_cap; u8 *pos; struct ieee80211_sub_if_data *sdata; int hdr_len = offsetofend(struct ieee80211_mgmt, u.beacon); @@ -687,6 +743,8 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) band = chanctx_conf->def.chan->band; rcu_read_unlock(); + ie_len_he_cap = ieee80211_ie_len_he_cap(sdata, + NL80211_IFTYPE_MESH_POINT); head_len = hdr_len + 2 + /* NULL SSID */ /* Channel Switch Announcement */ @@ -706,6 +764,8 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) 2 + sizeof(__le16) + /* awake window */ 2 + sizeof(struct ieee80211_vht_cap) + 2 + sizeof(struct ieee80211_vht_operation) + + ie_len_he_cap + + 2 + 1 + sizeof(struct ieee80211_he_operation) + ifmsh->ie_len; bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL); @@ -823,6 +883,8 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) mesh_add_awake_window_ie(sdata, skb) || mesh_add_vht_cap_ie(sdata, skb) || mesh_add_vht_oper_ie(sdata, skb) || + mesh_add_he_cap_ie(sdata, skb, ie_len_he_cap) || + mesh_add_he_oper_ie(sdata, skb) || mesh_add_vendor_ies(sdata, skb)) goto out_free; diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 94d57cce70da..953f720754e8 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -218,6 +218,10 @@ int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); +int mesh_add_he_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u8 ie_len); +int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); void ieee80211s_init(void); diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index dd3aefd052a9..737c5f4dbf52 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -218,9 +218,12 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, bool include_plid = false; u16 peering_proto = 0; u8 *pos, ie_len = 4; + u8 ie_len_he_cap; int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.self_prot); int err = -ENOMEM; + ie_len_he_cap = ieee80211_ie_len_he_cap(sdata, + NL80211_IFTYPE_MESH_POINT); skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + /* capability info */ @@ -233,6 +236,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 2 + sizeof(struct ieee80211_ht_operation) + 2 + sizeof(struct ieee80211_vht_cap) + 2 + sizeof(struct ieee80211_vht_operation) + + ie_len_he_cap + + 2 + 1 + sizeof(struct ieee80211_he_operation) + 2 + 8 + /* peering IE */ sdata->u.mesh.ie_len); if (!skb) @@ -321,7 +326,9 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, if (mesh_add_ht_cap_ie(sdata, skb) || mesh_add_ht_oper_ie(sdata, skb) || mesh_add_vht_cap_ie(sdata, skb) || - mesh_add_vht_oper_ie(sdata, skb)) + mesh_add_vht_oper_ie(sdata, skb) || + mesh_add_he_cap_ie(sdata, skb, ie_len_he_cap) || + mesh_add_he_oper_ie(sdata, skb)) goto free; } @@ -433,6 +440,9 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, elems->vht_cap_elem, sta); + ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap, + elems->he_cap_len, sta); + if (bw != sta->sta.bandwidth) changed |= IEEE80211_RC_BW_CHANGED; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 4c888dc9bd81..641876982ab9 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2522,7 +2522,10 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) { ifmgd->nullfunc_failed = false; - ieee80211_send_nullfunc(sdata->local, sdata, false); + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) + ifmgd->probe_send_count--; + else + ieee80211_send_nullfunc(sdata->local, sdata, false); } else { int ssid_len; @@ -3391,6 +3394,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, if (elems.uora_element) bss_conf->uora_ocw_range = elems.uora_element[0]; + ieee80211_he_op_ie_to_bss_conf(&sdata->vif, elems.he_operation); + ieee80211_he_spr_ie_to_bss_conf(&sdata->vif, elems.he_spr); /* TODO: OPEN: what happens if BSS color disable is set? */ } diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 60ef8972b254..c710504ccf1a 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -8,6 +8,7 @@ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> + * Copyright (C) 2019 Intel Corporation */ #include <linux/export.h> #include <net/mac80211.h> @@ -732,7 +733,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, } if (local->ops->remain_on_channel) { - ret = drv_cancel_remain_on_channel(local); + ret = drv_cancel_remain_on_channel(local, roc->sdata); if (WARN_ON_ONCE(ret)) { mutex_unlock(&local->mtx); return ret; @@ -991,7 +992,7 @@ void ieee80211_roc_purge(struct ieee80211_local *local, if (roc->started) { if (local->ops->remain_on_channel) { /* can race, so ignore return value */ - drv_cancel_remain_on_channel(local); + drv_cancel_remain_on_channel(local, sdata); ieee80211_roc_notify_destroy(roc); } else { roc->abort = true; diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 5d5348bc41ec..5397c6dad056 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -60,15 +60,6 @@ static inline void rate_control_add_sta_debugfs(struct sta_info *sta) #endif } -static inline void rate_control_remove_sta_debugfs(struct sta_info *sta) -{ -#ifdef CONFIG_MAC80211_DEBUGFS - struct rate_control_ref *ref = sta->rate_ctrl; - if (ref && ref->ops->remove_sta_debugfs) - ref->ops->remove_sta_debugfs(ref->priv, sta->rate_ctrl_priv); -#endif -} - void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata); /* Get a reference to the rate control algorithm. If `name' is NULL, get the diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 95eb8220e2e4..fb6614f57cbc 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1065,7 +1065,6 @@ static void __sta_info_destroy_part2(struct sta_info *sta) cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); kfree(sinfo); - rate_control_remove_sta_debugfs(sta); ieee80211_sta_debugfs_remove(sta); cleanup_single_sta(sta); diff --git a/net/mac80211/status.c b/net/mac80211/status.c index a88e3bf17e9d..f03aa8924d23 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -254,7 +254,8 @@ static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn) tid_tx->bar_pending = true; } -static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info) +static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info, + struct ieee80211_tx_status *status) { int len = sizeof(struct ieee80211_radiotap_header); @@ -272,7 +273,14 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info) /* IEEE80211_RADIOTAP_MCS * IEEE80211_RADIOTAP_VHT */ - if (info->status.rates[0].idx >= 0) { + if (status && status->rate) { + if (status->rate->flags & RATE_INFO_FLAGS_MCS) + len += 3; + else if (status->rate->flags & RATE_INFO_FLAGS_VHT_MCS) + len = ALIGN(len, 2) + 12; + else if (status->rate->flags & RATE_INFO_FLAGS_HE_MCS) + len = ALIGN(len, 2) + 12; + } else if (info->status.rates[0].idx >= 0) { if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) len += 3; else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) @@ -286,12 +294,14 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, struct ieee80211_supported_band *sband, struct sk_buff *skb, int retry_count, - int rtap_len, int shift) + int rtap_len, int shift, + struct ieee80211_tx_status *status) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_radiotap_header *rthdr; unsigned char *pos; + u16 legacy_rate = 0; u16 txflags; rthdr = skb_push(skb, rtap_len); @@ -310,14 +320,22 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, */ /* IEEE80211_RADIOTAP_RATE */ - if (info->status.rates[0].idx >= 0 && - !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS | - IEEE80211_TX_RC_VHT_MCS))) { - u16 rate; + if (status && status->rate && !(status->rate->flags & + (RATE_INFO_FLAGS_MCS | + RATE_INFO_FLAGS_60G | + RATE_INFO_FLAGS_VHT_MCS | + RATE_INFO_FLAGS_HE_MCS))) + legacy_rate = status->rate->legacy; + else if (info->status.rates[0].idx >= 0 && + !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS | + IEEE80211_TX_RC_VHT_MCS))) + legacy_rate = + sband->bitrates[info->status.rates[0].idx].bitrate; + + if (legacy_rate) { rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); - rate = sband->bitrates[info->status.rates[0].idx].bitrate; - *pos = DIV_ROUND_UP(rate, 5 * (1 << shift)); + *pos = DIV_ROUND_UP(legacy_rate, 5 * (1 << shift)); /* padding for tx flags */ pos += 2; } @@ -341,7 +359,139 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, *pos = retry_count; pos++; - if (info->status.rates[0].idx < 0) + if (status && status->rate && + (status->rate->flags & RATE_INFO_FLAGS_MCS)) { + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); + pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS | + IEEE80211_RADIOTAP_MCS_HAVE_GI | + IEEE80211_RADIOTAP_MCS_HAVE_BW; + if (status->rate->flags & RATE_INFO_FLAGS_SHORT_GI) + pos[1] |= IEEE80211_RADIOTAP_MCS_SGI; + if (status->rate->bw == RATE_INFO_BW_40) + pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40; + pos[2] = status->rate->mcs; + pos += 3; + } else if (status && status->rate && + (status->rate->flags & RATE_INFO_FLAGS_VHT_MCS)) { + u16 known = local->hw.radiotap_vht_details & + (IEEE80211_RADIOTAP_VHT_KNOWN_GI | + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH); + + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); + + /* required alignment from rthdr */ + pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2); + + /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */ + put_unaligned_le16(known, pos); + pos += 2; + + /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */ + if (status->rate->flags & RATE_INFO_FLAGS_SHORT_GI) + *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; + pos++; + + /* u8 bandwidth */ + switch (status->rate->bw) { + case RATE_INFO_BW_160: + *pos = 11; + break; + case RATE_INFO_BW_80: + *pos = 2; + break; + case RATE_INFO_BW_40: + *pos = 1; + break; + default: + *pos = 0; + break; + } + + /* u8 mcs_nss[4] */ + *pos = (status->rate->mcs << 4) | status->rate->nss; + pos += 4; + + /* u8 coding */ + pos++; + /* u8 group_id */ + pos++; + /* u16 partial_aid */ + pos += 2; + } else if (status && status->rate && + (status->rate->flags & RATE_INFO_FLAGS_HE_MCS)) { + struct ieee80211_radiotap_he *he; + + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); + + /* required alignment from rthdr */ + pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2); + he = (struct ieee80211_radiotap_he *)pos; + + he->data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU | + IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + + he->data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN); + +#define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) + + he->data6 |= HE_PREP(DATA6_NSTS, status->rate->nss); + +#define CHECK_GI(s) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ + (int)NL80211_RATE_INFO_HE_GI_##s) + + CHECK_GI(0_8); + CHECK_GI(1_6); + CHECK_GI(3_2); + + he->data3 |= HE_PREP(DATA3_DATA_MCS, status->rate->mcs); + he->data3 |= HE_PREP(DATA3_DATA_DCM, status->rate->he_dcm); + + he->data5 |= HE_PREP(DATA5_GI, status->rate->he_gi); + + switch (status->rate->bw) { + case RATE_INFO_BW_20: + he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); + break; + case RATE_INFO_BW_40: + he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); + break; + case RATE_INFO_BW_80: + he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); + break; + case RATE_INFO_BW_160: + he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); + break; + case RATE_INFO_BW_HE_RU: +#define CHECK_RU_ALLOC(s) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ + NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) + + CHECK_RU_ALLOC(26); + CHECK_RU_ALLOC(52); + CHECK_RU_ALLOC(106); + CHECK_RU_ALLOC(242); + CHECK_RU_ALLOC(484); + CHECK_RU_ALLOC(996); + CHECK_RU_ALLOC(2x996); + + he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + status->rate->he_ru_alloc + 4); + break; + default: + WARN_ONCE(1, "Invalid SU BW %d\n", status->rate->bw); + } + + pos += sizeof(struct ieee80211_radiotap_he); + } + + if ((status && status->rate) || info->status.rates[0].idx < 0) return; /* IEEE80211_RADIOTAP_MCS @@ -645,7 +795,8 @@ static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_supported_band *sband, - int retry_count, int shift, bool send_to_cooked) + int retry_count, int shift, bool send_to_cooked, + struct ieee80211_tx_status *status) { struct sk_buff *skb2; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -654,14 +805,14 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, int rtap_len; /* send frame to monitor interfaces now */ - rtap_len = ieee80211_tx_radiotap_len(info); + rtap_len = ieee80211_tx_radiotap_len(info, status); if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) { pr_err("ieee80211_tx_status: headroom too small\n"); dev_kfree_skb(skb); return; } ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count, - rtap_len, shift); + rtap_len, shift, status); /* XXX: is this sufficient for BPF? */ skb_reset_mac_header(skb); @@ -901,7 +1052,8 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, } /* send to monitor interfaces */ - ieee80211_tx_monitor(local, skb, sband, retry_count, shift, send_to_cooked); + ieee80211_tx_monitor(local, skb, sband, retry_count, shift, + send_to_cooked, status); } void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 3bb4459b52c7..4768322dc202 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -1242,9 +1242,10 @@ TRACE_EVENT(drv_remain_on_channel, ) ); -DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel, - TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) +DEFINE_EVENT(local_sdata_evt, drv_cancel_remain_on_channel, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) ); TRACE_EVENT(drv_set_ringparam, diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f13eb2f61ccf..235c6377a203 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3546,6 +3546,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, ieee80211_tx_result r; struct ieee80211_vif *vif = txq->vif; + WARN_ON_ONCE(softirq_count() == 0); + begin: spin_lock_bh(&fq->lock); @@ -4647,7 +4649,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, if (!sband) return bcn; - ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false); + ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false, + NULL); return bcn; } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index ad1e58184c4e..286c7ee35e63 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1200,6 +1200,13 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, elems->cisco_dtpc_elem = pos; break; + case WLAN_EID_ADDBA_EXT: + if (elen != sizeof(struct ieee80211_addba_ext_ie)) { + elem_parse_failed = true; + break; + } + elems->addba_ext_ie = (void *)pos; + break; case WLAN_EID_TIMEOUT_INTERVAL: if (elen >= sizeof(struct ieee80211_timeout_interval_ie)) elems->timeout_int = (void *)pos; @@ -1233,6 +1240,10 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION && elen == 3) { elems->mbssid_config_ie = (void *)&pos[1]; + } else if (pos[0] == WLAN_EID_EXT_HE_SPR && + elen >= sizeof(*elems->he_spr) && + elen >= ieee80211_he_spr_size(&pos[1])) { + elems->he_spr = (void *)&pos[1]; } break; default: @@ -2702,6 +2713,27 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, return pos; } +u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype) +{ + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_supported_band *sband; + u8 n; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return 0; + + he_cap = ieee80211_get_he_iftype_cap(sband, iftype); + if (!he_cap) + return 0; + + n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem); + return 2 + 1 + + sizeof(he_cap->he_cap_elem) + n + + ieee80211_he_ppe_size(he_cap->ppe_thres[0], + he_cap->he_cap_elem.phy_cap_info); +} + u8 *ieee80211_ie_build_he_cap(u8 *pos, const struct ieee80211_sta_he_cap *he_cap, u8 *end) @@ -2891,6 +2923,34 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, return pos + sizeof(struct ieee80211_vht_operation); } +u8 *ieee80211_ie_build_he_oper(u8 *pos) +{ + struct ieee80211_he_operation *he_oper; + u32 he_oper_params; + + *pos++ = WLAN_EID_EXTENSION; + *pos++ = 1 + sizeof(struct ieee80211_he_operation); + *pos++ = WLAN_EID_EXT_HE_OPERATION; + + he_oper_params = 0; + he_oper_params |= u32_encode_bits(1023, /* disabled */ + IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); + he_oper_params |= u32_encode_bits(1, + IEEE80211_HE_OPERATION_ER_SU_DISABLE); + he_oper_params |= u32_encode_bits(1, + IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED); + + he_oper = (struct ieee80211_he_operation *)pos; + he_oper->he_oper_params = cpu_to_le32(he_oper_params); + + /* don't require special HE peer rates */ + he_oper->he_mcs_nss_set = cpu_to_le16(0xffff); + + /* TODO add VHT operational and 6GHz operational subelement? */ + + return pos + sizeof(struct ieee80211_vht_operation); +} + bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper, struct cfg80211_chan_def *chandef) { diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index ee72779729e5..91bf32af55e9 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -946,7 +946,8 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) info = IEEE80211_SKB_CB(skb); - if (info->control.hw_key) + if (info->control.hw_key && + !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIE)) return TX_CONTINUE; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) @@ -962,6 +963,9 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) bip_ipn_set64(mmie->sequence_number, pn64); + if (info->control.hw_key) + return TX_CONTINUE; + bip_aad(skb, aad); /* diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index ea64c90b14e8..17e6ca62f1be 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -970,7 +970,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info) int rc; u32 idx; - if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || + !info->attrs[NFC_ATTR_TARGET_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); @@ -1018,7 +1019,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg = NULL; u32 idx; - if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || + !info->attrs[NFC_ATTR_FIRMWARE_NAME]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index d01410e52097..65122bbccd27 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -222,6 +222,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) struct dp_stats_percpu *stats; u64 *stats_counter; u32 n_mask_hit; + int error; stats = this_cpu_ptr(dp->stats_percpu); @@ -229,7 +230,6 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit); if (unlikely(!flow)) { struct dp_upcall_info upcall; - int error; memset(&upcall, 0, sizeof(upcall)); upcall.cmd = OVS_PACKET_CMD_MISS; @@ -246,7 +246,10 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) ovs_flow_stats_update(flow, key->tp.flags, skb); sf_acts = rcu_dereference(flow->sf_acts); - ovs_execute_actions(dp, skb, sf_acts, key); + error = ovs_execute_actions(dp, skb, sf_acts, key); + if (unlikely(error)) + net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n", + ovs_dp_name(dp), error); stats_counter = &stats->n_hit; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 822f45386e31..63b26baa108a 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -226,6 +226,9 @@ struct rxrpc_security { int (*verify_packet)(struct rxrpc_call *, struct sk_buff *, unsigned int, unsigned int, rxrpc_seq_t, u16); + /* Free crypto request on a call */ + void (*free_call_crypto)(struct rxrpc_call *); + /* Locate the data in a received packet that has been verified. */ void (*locate_data)(struct rxrpc_call *, struct sk_buff *, unsigned int *, unsigned int *); @@ -557,6 +560,7 @@ struct rxrpc_call { unsigned long expect_term_by; /* When we expect call termination by */ u32 next_rx_timo; /* Timeout for next Rx packet (jif) */ u32 next_req_timo; /* Timeout for next Rx request packet (jif) */ + struct skcipher_request *cipher_req; /* Packet cipher request buffer */ struct timer_list timer; /* Combined event timer */ struct work_struct processor; /* Event processor */ rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 217b12be9e08..60cbc81dc461 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -476,8 +476,10 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); - if (conn) + if (conn) { rxrpc_disconnect_call(call); + conn->security->free_call_crypto(call); + } for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { rxrpc_free_skb(call->rxtx_buffer[i], diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c index a29d26c273b5..f6c59f5fae9d 100644 --- a/net/rxrpc/insecure.c +++ b/net/rxrpc/insecure.c @@ -33,6 +33,10 @@ static int none_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, return 0; } +static void none_free_call_crypto(struct rxrpc_call *call) +{ +} + static void none_locate_data(struct rxrpc_call *call, struct sk_buff *skb, unsigned int *_offset, unsigned int *_len) { @@ -83,6 +87,7 @@ const struct rxrpc_security rxrpc_no_security = { .exit = none_exit, .init_connection_security = none_init_connection_security, .prime_packet_security = none_prime_packet_security, + .free_call_crypto = none_free_call_crypto, .secure_packet = none_secure_packet, .verify_packet = none_verify_packet, .locate_data = none_locate_data, diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index ae8cd8926456..dbb109da1835 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -43,6 +43,7 @@ struct rxkad_level2_hdr { * packets */ static struct crypto_sync_skcipher *rxkad_ci; +static struct skcipher_request *rxkad_ci_req; static DEFINE_MUTEX(rxkad_ci_mutex); /* @@ -99,8 +100,8 @@ error: */ static int rxkad_prime_packet_security(struct rxrpc_connection *conn) { + struct skcipher_request *req; struct rxrpc_key_token *token; - SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher); struct scatterlist sg; struct rxrpc_crypt iv; __be32 *tmpbuf; @@ -115,6 +116,12 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn) if (!tmpbuf) return -ENOMEM; + req = skcipher_request_alloc(&conn->cipher->base, GFP_NOFS); + if (!req) { + kfree(tmpbuf); + return -ENOMEM; + } + token = conn->params.key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); @@ -128,7 +135,7 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn) skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x); crypto_skcipher_encrypt(req); - skcipher_request_zero(req); + skcipher_request_free(req); memcpy(&conn->csum_iv, tmpbuf + 2, sizeof(conn->csum_iv)); kfree(tmpbuf); @@ -137,6 +144,35 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn) } /* + * Allocate and prepare the crypto request on a call. For any particular call, + * this is called serially for the packets, so no lock should be necessary. + */ +static struct skcipher_request *rxkad_get_call_crypto(struct rxrpc_call *call) +{ + struct crypto_skcipher *tfm = &call->conn->cipher->base; + struct skcipher_request *cipher_req = call->cipher_req; + + if (!cipher_req) { + cipher_req = skcipher_request_alloc(tfm, GFP_NOFS); + if (!cipher_req) + return NULL; + call->cipher_req = cipher_req; + } + + return cipher_req; +} + +/* + * Clean up the crypto on a call. + */ +static void rxkad_free_call_crypto(struct rxrpc_call *call) +{ + if (call->cipher_req) + skcipher_request_free(call->cipher_req); + call->cipher_req = NULL; +} + +/* * partially encrypt a packet (level 1 security) */ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, @@ -246,7 +282,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call, void *sechdr) { struct rxrpc_skb_priv *sp; - SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); + struct skcipher_request *req; struct rxrpc_crypt iv; struct scatterlist sg; u32 x, y; @@ -265,6 +301,10 @@ static int rxkad_secure_packet(struct rxrpc_call *call, if (ret < 0) return ret; + req = rxkad_get_call_crypto(call); + if (!req) + return -ENOMEM; + /* continue encrypting from where we left off */ memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); @@ -502,7 +542,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, unsigned int offset, unsigned int len, rxrpc_seq_t seq, u16 expected_cksum) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); + struct skcipher_request *req; struct rxrpc_crypt iv; struct scatterlist sg; bool aborted; @@ -515,6 +555,10 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, if (!call->conn->cipher) return 0; + req = rxkad_get_call_crypto(call); + if (!req) + return -ENOMEM; + /* continue encrypting from where we left off */ memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); @@ -747,14 +791,18 @@ static void rxkad_calc_response_checksum(struct rxkad_response *response) /* * encrypt the response packet */ -static void rxkad_encrypt_response(struct rxrpc_connection *conn, - struct rxkad_response *resp, - const struct rxkad_key *s2) +static int rxkad_encrypt_response(struct rxrpc_connection *conn, + struct rxkad_response *resp, + const struct rxkad_key *s2) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher); + struct skcipher_request *req; struct rxrpc_crypt iv; struct scatterlist sg[1]; + req = skcipher_request_alloc(&conn->cipher->base, GFP_NOFS); + if (!req) + return -ENOMEM; + /* continue encrypting from where we left off */ memcpy(&iv, s2->session_key, sizeof(iv)); @@ -764,7 +812,8 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn, skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x); crypto_skcipher_encrypt(req); - skcipher_request_zero(req); + skcipher_request_free(req); + return 0; } /* @@ -839,8 +888,9 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, /* calculate the response checksum and then do the encryption */ rxkad_calc_response_checksum(resp); - rxkad_encrypt_response(conn, resp, token->kad); - ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad); + ret = rxkad_encrypt_response(conn, resp, token->kad); + if (ret == 0) + ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad); kfree(resp); return ret; @@ -1017,18 +1067,16 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn, struct rxkad_response *resp, const struct rxrpc_crypt *session_key) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci); + struct skcipher_request *req = rxkad_ci_req; struct scatterlist sg[1]; struct rxrpc_crypt iv; _enter(",,%08x%08x", ntohl(session_key->n[0]), ntohl(session_key->n[1])); - ASSERT(rxkad_ci != NULL); - mutex_lock(&rxkad_ci_mutex); if (crypto_sync_skcipher_setkey(rxkad_ci, session_key->x, - sizeof(*session_key)) < 0) + sizeof(*session_key)) < 0) BUG(); memcpy(&iv, session_key, sizeof(iv)); @@ -1222,10 +1270,26 @@ static void rxkad_clear(struct rxrpc_connection *conn) */ static int rxkad_init(void) { + struct crypto_sync_skcipher *tfm; + struct skcipher_request *req; + /* pin the cipher we need so that the crypto layer doesn't invoke * keventd to go get it */ - rxkad_ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0); - return PTR_ERR_OR_ZERO(rxkad_ci); + tfm = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + req = skcipher_request_alloc(&tfm->base, GFP_KERNEL); + if (!req) + goto nomem_tfm; + + rxkad_ci_req = req; + rxkad_ci = tfm; + return 0; + +nomem_tfm: + crypto_free_sync_skcipher(tfm); + return -ENOMEM; } /* @@ -1233,8 +1297,8 @@ static int rxkad_init(void) */ static void rxkad_exit(void) { - if (rxkad_ci) - crypto_free_sync_skcipher(rxkad_ci); + crypto_free_sync_skcipher(rxkad_ci); + skcipher_request_free(rxkad_ci_req); } /* @@ -1249,6 +1313,7 @@ const struct rxrpc_security rxkad = { .prime_packet_security = rxkad_prime_packet_security, .secure_packet = rxkad_secure_packet, .verify_packet = rxkad_verify_packet, + .free_call_crypto = rxkad_free_call_crypto, .locate_data = rxkad_locate_data, .issue_challenge = rxkad_issue_challenge, .respond_to_challenge = rxkad_respond_to_challenge, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index efd3cfb80a2a..9d85d3295c7c 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -36,6 +36,7 @@ #include <net/tc_act/tc_sample.h> #include <net/tc_act/tc_skbedit.h> #include <net/tc_act/tc_ct.h> +#include <net/tc_act/tc_mpls.h> extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; @@ -3204,6 +3205,12 @@ int tc_setup_flow_action(struct flow_action *flow_action, } else if (is_tcf_mirred_egress_mirror(act)) { entry->id = FLOW_ACTION_MIRRED; entry->dev = tcf_mirred_dev(act); + } else if (is_tcf_mirred_ingress_redirect(act)) { + entry->id = FLOW_ACTION_REDIRECT_INGRESS; + entry->dev = tcf_mirred_dev(act); + } else if (is_tcf_mirred_ingress_mirror(act)) { + entry->id = FLOW_ACTION_MIRRED_INGRESS; + entry->dev = tcf_mirred_dev(act); } else if (is_tcf_vlan(act)) { switch (tcf_vlan_action(act)) { case TCA_VLAN_ACT_PUSH: @@ -3269,6 +3276,33 @@ int tc_setup_flow_action(struct flow_action *flow_action, entry->id = FLOW_ACTION_CT; entry->ct.action = tcf_ct_action(act); entry->ct.zone = tcf_ct_zone(act); + } else if (is_tcf_mpls(act)) { + switch (tcf_mpls_action(act)) { + case TCA_MPLS_ACT_PUSH: + entry->id = FLOW_ACTION_MPLS_PUSH; + entry->mpls_push.proto = tcf_mpls_proto(act); + entry->mpls_push.label = tcf_mpls_label(act); + entry->mpls_push.tc = tcf_mpls_tc(act); + entry->mpls_push.bos = tcf_mpls_bos(act); + entry->mpls_push.ttl = tcf_mpls_ttl(act); + break; + case TCA_MPLS_ACT_POP: + entry->id = FLOW_ACTION_MPLS_POP; + entry->mpls_pop.proto = tcf_mpls_proto(act); + break; + case TCA_MPLS_ACT_MODIFY: + entry->id = FLOW_ACTION_MPLS_MANGLE; + entry->mpls_mangle.label = tcf_mpls_label(act); + entry->mpls_mangle.tc = tcf_mpls_tc(act); + entry->mpls_mangle.bos = tcf_mpls_bos(act); + entry->mpls_mangle.ttl = tcf_mpls_ttl(act); + break; + default: + goto err_out; + } + } else if (is_tcf_skbedit_ptype(act)) { + entry->id = FLOW_ACTION_PTYPE; + entry->ptype = tcf_skbedit_ptype(act); } else { goto err_out; } diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index d59fbcc745d1..9edd0f495001 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -45,7 +45,6 @@ struct fq_codel_flow { struct sk_buff *tail; struct list_head flowchain; int deficit; - u32 dropped; /* number of drops (or ECN marks) on this flow */ struct codel_vars cvars; }; /* please try to keep this structure <= 64 bytes */ @@ -173,7 +172,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, __qdisc_drop(skb, to_free); } while (++i < max_packets && len < threshold); - flow->dropped += i; + /* Tell codel to increase its signal strength also */ + flow->cvars.count += i; q->backlogs[idx] -= len; q->memory_usage -= mem; sch->qstats.drops += i; @@ -211,7 +211,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, list_add_tail(&flow->flowchain, &q->new_flows); q->new_flow_count++; flow->deficit = q->quantum; - flow->dropped = 0; } get_codel_cb(skb)->mem_usage = skb->truesize; q->memory_usage += get_codel_cb(skb)->mem_usage; @@ -310,9 +309,6 @@ begin: &flow->cvars, &q->cstats, qdisc_pkt_len, codel_get_enqueue_time, drop_func, dequeue_func); - flow->dropped += q->cstats.drop_count - prev_drop_count; - flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; - if (!skb) { /* force a pass through old_flows to prevent starvation */ if ((head == &q->new_flows) && !list_empty(&q->old_flows)) @@ -658,7 +654,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, sch_tree_unlock(sch); } qs.backlog = q->backlogs[idx]; - qs.drops = flow->dropped; + qs.drops = 0; } if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) return -1; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9d1f83b10c0a..12503e16fa96 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1044,158 +1044,161 @@ out: return err; } -/* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) - * - * Common routine for handling connect() and sctp_connectx(). - * Connect will come in with just a single address. - */ -static int __sctp_connect(struct sock *sk, - struct sockaddr *kaddrs, - int addrs_size, int flags, - sctp_assoc_t *assoc_id) +static int sctp_connect_new_asoc(struct sctp_endpoint *ep, + const union sctp_addr *daddr, + const struct sctp_initmsg *init, + struct sctp_transport **tp) { + struct sctp_association *asoc; + struct sock *sk = ep->base.sk; struct net *net = sock_net(sk); - struct sctp_sock *sp; - struct sctp_endpoint *ep; - struct sctp_association *asoc = NULL; - struct sctp_association *asoc2; - struct sctp_transport *transport; - union sctp_addr to; enum sctp_scope scope; - long timeo; - int err = 0; - int addrcnt = 0; - int walk_size = 0; - union sctp_addr *sa_addr = NULL; - void *addr_buf; - unsigned short port; + int err; - sp = sctp_sk(sk); - ep = sp->ep; + if (sctp_endpoint_is_peeled_off(ep, daddr)) + return -EADDRNOTAVAIL; - /* connect() cannot be done on a socket that is already in ESTABLISHED - * state - UDP-style peeled off socket or a TCP-style socket that - * is already connected. - * It cannot be done even on a TCP-style listening socket. - */ - if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) || - (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { - err = -EISCONN; - goto out_free; + if (!ep->base.bind_addr.port) { + if (sctp_autobind(sk)) + return -EAGAIN; + } else { + if (ep->base.bind_addr.port < inet_prot_sock(net) && + !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) + return -EACCES; } - /* Walk through the addrs buffer and count the number of addresses. */ - addr_buf = kaddrs; - while (walk_size < addrs_size) { - struct sctp_af *af; - - if (walk_size + sizeof(sa_family_t) > addrs_size) { - err = -EINVAL; - goto out_free; - } + scope = sctp_scope(daddr); + asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); + if (!asoc) + return -ENOMEM; - sa_addr = addr_buf; - af = sctp_get_af_specific(sa_addr->sa.sa_family); + err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); + if (err < 0) + goto free; - /* If the address family is not supported or if this address - * causes the address buffer to overflow return EINVAL. - */ - if (!af || (walk_size + af->sockaddr_len) > addrs_size) { - err = -EINVAL; - goto out_free; - } + *tp = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN); + if (!*tp) { + err = -ENOMEM; + goto free; + } - port = ntohs(sa_addr->v4.sin_port); + if (!init) + return 0; - /* Save current address so we can work with it */ - memcpy(&to, sa_addr, af->sockaddr_len); + if (init->sinit_num_ostreams) { + __u16 outcnt = init->sinit_num_ostreams; - err = sctp_verify_addr(sk, &to, af->sockaddr_len); + asoc->c.sinit_num_ostreams = outcnt; + /* outcnt has been changed, need to re-init stream */ + err = sctp_stream_init(&asoc->stream, outcnt, 0, GFP_KERNEL); if (err) - goto out_free; + goto free; + } - /* Make sure the destination port is correctly set - * in all addresses. - */ - if (asoc && asoc->peer.port && asoc->peer.port != port) { - err = -EINVAL; - goto out_free; - } + if (init->sinit_max_instreams) + asoc->c.sinit_max_instreams = init->sinit_max_instreams; - /* Check if there already is a matching association on the - * endpoint (other than the one created here). - */ - asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); - if (asoc2 && asoc2 != asoc) { - if (asoc2->state >= SCTP_STATE_ESTABLISHED) - err = -EISCONN; - else - err = -EALREADY; - goto out_free; - } + if (init->sinit_max_attempts) + asoc->max_init_attempts = init->sinit_max_attempts; - /* If we could not find a matching association on the endpoint, - * make sure that there is no peeled-off association matching - * the peer address even on another socket. - */ - if (sctp_endpoint_is_peeled_off(ep, &to)) { - err = -EADDRNOTAVAIL; - goto out_free; - } + if (init->sinit_max_init_timeo) + asoc->max_init_timeo = + msecs_to_jiffies(init->sinit_max_init_timeo); - if (!asoc) { - /* If a bind() or sctp_bindx() is not called prior to - * an sctp_connectx() call, the system picks an - * ephemeral port and will choose an address set - * equivalent to binding with a wildcard address. - */ - if (!ep->base.bind_addr.port) { - if (sctp_autobind(sk)) { - err = -EAGAIN; - goto out_free; - } - } else { - /* - * If an unprivileged user inherits a 1-many - * style socket with open associations on a - * privileged port, it MAY be permitted to - * accept new associations, but it SHOULD NOT - * be permitted to open new associations. - */ - if (ep->base.bind_addr.port < - inet_prot_sock(net) && - !ns_capable(net->user_ns, - CAP_NET_BIND_SERVICE)) { - err = -EACCES; - goto out_free; - } - } + return 0; +free: + sctp_association_free(asoc); + return err; +} - scope = sctp_scope(&to); - asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); - if (!asoc) { - err = -ENOMEM; - goto out_free; - } +static int sctp_connect_add_peer(struct sctp_association *asoc, + union sctp_addr *daddr, int addr_len) +{ + struct sctp_endpoint *ep = asoc->ep; + struct sctp_association *old; + struct sctp_transport *t; + int err; - err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, - GFP_KERNEL); - if (err < 0) { - goto out_free; - } + err = sctp_verify_addr(ep->base.sk, daddr, addr_len); + if (err) + return err; - } + old = sctp_endpoint_lookup_assoc(ep, daddr, &t); + if (old && old != asoc) + return old->state >= SCTP_STATE_ESTABLISHED ? -EISCONN + : -EALREADY; - /* Prime the peer's transport structures. */ - transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, - SCTP_UNKNOWN); - if (!transport) { - err = -ENOMEM; + if (sctp_endpoint_is_peeled_off(ep, daddr)) + return -EADDRNOTAVAIL; + + t = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN); + if (!t) + return -ENOMEM; + + return 0; +} + +/* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) + * + * Common routine for handling connect() and sctp_connectx(). + * Connect will come in with just a single address. + */ +static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs, + int addrs_size, int flags, sctp_assoc_t *assoc_id) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_endpoint *ep = sp->ep; + struct sctp_transport *transport; + struct sctp_association *asoc; + void *addr_buf = kaddrs; + union sctp_addr *daddr; + struct sctp_af *af; + int walk_size, err; + long timeo; + + if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) || + (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) + return -EISCONN; + + daddr = addr_buf; + af = sctp_get_af_specific(daddr->sa.sa_family); + if (!af || af->sockaddr_len > addrs_size) + return -EINVAL; + + err = sctp_verify_addr(sk, daddr, af->sockaddr_len); + if (err) + return err; + + asoc = sctp_endpoint_lookup_assoc(ep, daddr, &transport); + if (asoc) + return asoc->state >= SCTP_STATE_ESTABLISHED ? -EISCONN + : -EALREADY; + + err = sctp_connect_new_asoc(ep, daddr, NULL, &transport); + if (err) + return err; + asoc = transport->asoc; + + addr_buf += af->sockaddr_len; + walk_size = af->sockaddr_len; + while (walk_size < addrs_size) { + err = -EINVAL; + if (walk_size + sizeof(sa_family_t) > addrs_size) goto out_free; - } - addrcnt++; - addr_buf += af->sockaddr_len; + daddr = addr_buf; + af = sctp_get_af_specific(daddr->sa.sa_family); + if (!af || af->sockaddr_len + walk_size > addrs_size) + goto out_free; + + if (asoc->peer.port != ntohs(daddr->v4.sin_port)) + goto out_free; + + err = sctp_connect_add_peer(asoc, daddr, af->sockaddr_len); + if (err) + goto out_free; + + addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } @@ -1208,40 +1211,25 @@ static int __sctp_connect(struct sock *sk, goto out_free; } - err = sctp_primitive_ASSOCIATE(net, asoc, NULL); - if (err < 0) { + err = sctp_primitive_ASSOCIATE(sock_net(sk), asoc, NULL); + if (err < 0) goto out_free; - } /* Initialize sk's dport and daddr for getpeername() */ inet_sk(sk)->inet_dport = htons(asoc->peer.port); - sp->pf->to_sk_daddr(sa_addr, sk); + sp->pf->to_sk_daddr(daddr, sk); sk->sk_err = 0; - timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); - if (assoc_id) *assoc_id = asoc->assoc_id; - err = sctp_wait_for_connect(asoc, &timeo); - /* Note: the asoc may be freed after the return of - * sctp_wait_for_connect. - */ - - /* Don't free association on exit. */ - asoc = NULL; + timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); + return sctp_wait_for_connect(asoc, &timeo); out_free: pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", __func__, asoc, kaddrs, err); - - if (asoc) { - /* sctp_primitive_ASSOCIATE may have added this association - * To the hash table, try to unhash it, just in case, its a noop - * if it wasn't hashed so we're safe - */ - sctp_association_free(asoc); - } + sctp_association_free(asoc); return err; } @@ -1311,7 +1299,8 @@ static int __sctp_setsockopt_connectx(struct sock *sk, pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", __func__, sk, addrs, addrs_size); - if (unlikely(addrs_size <= 0)) + /* make sure the 1st addr's sa_family is accessible later */ + if (unlikely(addrs_size < sizeof(sa_family_t))) return -EINVAL; kaddrs = memdup_user(addrs, addrs_size); @@ -1659,9 +1648,7 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, struct sctp_transport **tp) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; - struct net *net = sock_net(sk); struct sctp_association *asoc; - enum sctp_scope scope; struct cmsghdr *cmsg; __be32 flowinfo = 0; struct sctp_af *af; @@ -1676,20 +1663,6 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, sctp_sstate(sk, CLOSING))) return -EADDRNOTAVAIL; - if (sctp_endpoint_is_peeled_off(ep, daddr)) - return -EADDRNOTAVAIL; - - if (!ep->base.bind_addr.port) { - if (sctp_autobind(sk)) - return -EAGAIN; - } else { - if (ep->base.bind_addr.port < inet_prot_sock(net) && - !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) - return -EACCES; - } - - scope = sctp_scope(daddr); - /* Label connection socket for first association 1-to-many * style for client sequence socket()->sendmsg(). This * needs to be done before sctp_assoc_add_peer() as that will @@ -1705,45 +1678,10 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, if (err < 0) return err; - asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); - if (!asoc) - return -ENOMEM; - - if (sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL) < 0) { - err = -ENOMEM; - goto free; - } - - if (cmsgs->init) { - struct sctp_initmsg *init = cmsgs->init; - - if (init->sinit_num_ostreams) { - __u16 outcnt = init->sinit_num_ostreams; - - asoc->c.sinit_num_ostreams = outcnt; - /* outcnt has been changed, need to re-init stream */ - err = sctp_stream_init(&asoc->stream, outcnt, 0, - GFP_KERNEL); - if (err) - goto free; - } - - if (init->sinit_max_instreams) - asoc->c.sinit_max_instreams = init->sinit_max_instreams; - - if (init->sinit_max_attempts) - asoc->max_init_attempts = init->sinit_max_attempts; - - if (init->sinit_max_init_timeo) - asoc->max_init_timeo = - msecs_to_jiffies(init->sinit_max_init_timeo); - } - - *tp = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN); - if (!*tp) { - err = -ENOMEM; - goto free; - } + err = sctp_connect_new_asoc(ep, daddr, cmsgs->init, tp); + if (err) + return err; + asoc = (*tp)->asoc; if (!cmsgs->addrs_msg) return 0; @@ -1753,8 +1691,6 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, /* sendv addr list parse */ for_each_cmsghdr(cmsg, cmsgs->addrs_msg) { - struct sctp_transport *transport; - struct sctp_association *old; union sctp_addr _daddr; int dlen; @@ -1788,30 +1724,10 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, daddr->v6.sin6_port = htons(asoc->peer.port); memcpy(&daddr->v6.sin6_addr, CMSG_DATA(cmsg), dlen); } - err = sctp_verify_addr(sk, daddr, sizeof(*daddr)); - if (err) - goto free; - - old = sctp_endpoint_lookup_assoc(ep, daddr, &transport); - if (old && old != asoc) { - if (old->state >= SCTP_STATE_ESTABLISHED) - err = -EISCONN; - else - err = -EALREADY; - goto free; - } - - if (sctp_endpoint_is_peeled_off(ep, daddr)) { - err = -EADDRNOTAVAIL; - goto free; - } - transport = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, - SCTP_UNKNOWN); - if (!transport) { - err = -ENOMEM; + err = sctp_connect_add_peer(asoc, daddr, sizeof(*daddr)); + if (err) goto free; - } } return 0; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index e2f8e369cd08..7235a6032671 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -43,8 +43,8 @@ static struct sctp_transport *sctp_transport_init(struct net *net, gfp_t gfp) { /* Copy in the address. */ - peer->ipaddr = *addr; peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); + memcpy(&peer->ipaddr, addr, peer->af_specific->sockaddr_len); memset(&peer->saddr, 0, sizeof(union sctp_addr)); peer->sack_generation = 0; diff --git a/net/tipc/link.c b/net/tipc/link.c index 66d3a07bc571..dd3155b14654 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -180,6 +180,7 @@ struct tipc_link { /* Fragmentation/reassembly */ struct sk_buff *reasm_buf; + struct sk_buff *reasm_tnlmsg; /* Broadcast */ u16 ackers; @@ -853,18 +854,31 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr) */ static void link_prepare_wakeup(struct tipc_link *l) { + struct sk_buff_head *wakeupq = &l->wakeupq; + struct sk_buff_head *inputq = l->inputq; struct sk_buff *skb, *tmp; - int imp, i = 0; + struct sk_buff_head tmpq; + int avail[5] = {0,}; + int imp = 0; + + __skb_queue_head_init(&tmpq); + + for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) + avail[imp] = l->backlog[imp].limit - l->backlog[imp].len; - skb_queue_walk_safe(&l->wakeupq, skb, tmp) { + skb_queue_walk_safe(wakeupq, skb, tmp) { imp = TIPC_SKB_CB(skb)->chain_imp; - if (l->backlog[imp].len < l->backlog[imp].limit) { - skb_unlink(skb, &l->wakeupq); - skb_queue_tail(l->inputq, skb); - } else if (i++ > 10) { - break; - } + if (avail[imp] <= 0) + continue; + avail[imp]--; + __skb_unlink(skb, wakeupq); + __skb_queue_tail(&tmpq, skb); } + + spin_lock_bh(&inputq->lock); + skb_queue_splice_tail(&tmpq, inputq); + spin_unlock_bh(&inputq->lock); + } void tipc_link_reset(struct tipc_link *l) @@ -897,8 +911,10 @@ void tipc_link_reset(struct tipc_link *l) l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; kfree_skb(l->reasm_buf); + kfree_skb(l->reasm_tnlmsg); kfree_skb(l->failover_reasm_skb); l->reasm_buf = NULL; + l->reasm_tnlmsg = NULL; l->failover_reasm_skb = NULL; l->rcv_unacked = 0; l->snd_nxt = 1; @@ -940,6 +956,9 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, int rc = 0; if (unlikely(msg_size(hdr) > mtu)) { + pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n", + skb_queue_len(list), msg_user(hdr), + msg_type(hdr), msg_size(hdr), mtu); skb_queue_purge(list); return -EMSGSIZE; } @@ -1233,6 +1252,7 @@ static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *inputq) { struct sk_buff **reasm_skb = &l->failover_reasm_skb; + struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg; struct sk_buff_head *fdefq = &l->failover_deferdq; struct tipc_msg *hdr = buf_msg(skb); struct sk_buff *iskb; @@ -1240,40 +1260,56 @@ static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb, int rc = 0; u16 seqno; - /* SYNCH_MSG */ - if (msg_type(hdr) == SYNCH_MSG) - goto drop; + if (msg_type(hdr) == SYNCH_MSG) { + kfree_skb(skb); + return 0; + } - /* FAILOVER_MSG */ - if (!tipc_msg_extract(skb, &iskb, &ipos)) { - pr_warn_ratelimited("Cannot extract FAILOVER_MSG, defq: %d\n", - skb_queue_len(fdefq)); - return rc; + /* Not a fragment? */ + if (likely(!msg_nof_fragms(hdr))) { + if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) { + pr_warn_ratelimited("Unable to extract msg, defq: %d\n", + skb_queue_len(fdefq)); + return 0; + } + kfree_skb(skb); + } else { + /* Set fragment type for buf_append */ + if (msg_fragm_no(hdr) == 1) + msg_set_type(hdr, FIRST_FRAGMENT); + else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr)) + msg_set_type(hdr, FRAGMENT); + else + msg_set_type(hdr, LAST_FRAGMENT); + + if (!tipc_buf_append(reasm_tnlmsg, &skb)) { + /* Successful but non-complete reassembly? */ + if (*reasm_tnlmsg || link_is_bc_rcvlink(l)) + return 0; + pr_warn_ratelimited("Unable to reassemble tunnel msg\n"); + return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); + } + iskb = skb; } do { seqno = buf_seqno(iskb); - if (unlikely(less(seqno, l->drop_point))) { kfree_skb(iskb); continue; } - if (unlikely(seqno != l->drop_point)) { __tipc_skb_queue_sorted(fdefq, seqno, iskb); continue; } l->drop_point++; - if (!tipc_data_input(l, iskb, inputq)) rc |= tipc_link_input(l, iskb, inputq, reasm_skb); if (unlikely(rc)) break; } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point))); -drop: - kfree_skb(skb); return rc; } @@ -1663,14 +1699,18 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, struct sk_buff *skb, *tnlskb; struct tipc_msg *hdr, tnlhdr; struct sk_buff_head *queue = &l->transmq; - struct sk_buff_head tmpxq, tnlq; + struct sk_buff_head tmpxq, tnlq, frags; u16 pktlen, pktcnt, seqno = l->snd_nxt; + bool pktcnt_need_update = false; + u16 syncpt; + int rc; if (!tnl) return; skb_queue_head_init(&tnlq); skb_queue_head_init(&tmpxq); + skb_queue_head_init(&frags); /* At least one packet required for safe algorithm => add dummy */ skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, @@ -1684,6 +1724,31 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, tipc_link_xmit(l, &tnlq, &tmpxq); __skb_queue_purge(&tmpxq); + /* Link Synching: + * From now on, send only one single ("dummy") SYNCH message + * to peer. The SYNCH message does not contain any data, just + * a header conveying the synch point to the peer. + */ + if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) { + tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG, + INT_H_SIZE, 0, l->addr, + tipc_own_addr(l->net), + 0, 0, 0); + if (!tnlskb) { + pr_warn("%sunable to create dummy SYNCH_MSG\n", + link_co_err); + return; + } + + hdr = buf_msg(tnlskb); + syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1; + msg_set_syncpt(hdr, syncpt); + msg_set_bearer_id(hdr, l->peer_bearer_id); + __skb_queue_tail(&tnlq, tnlskb); + tipc_link_xmit(tnl, &tnlq, xmitq); + return; + } + /* Initialize reusable tunnel packet header */ tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL, mtyp, INT_H_SIZE, l->addr); @@ -1701,6 +1766,39 @@ tnl: if (queue == &l->backlogq) msg_set_seqno(hdr, seqno++); pktlen = msg_size(hdr); + + /* Tunnel link MTU is not large enough? This could be + * due to: + * 1) Link MTU has just changed or set differently; + * 2) Or FAILOVER on the top of a SYNCH message + * + * The 2nd case should not happen if peer supports + * TIPC_TUNNEL_ENHANCED + */ + if (pktlen > tnl->mtu - INT_H_SIZE) { + if (mtyp == FAILOVER_MSG && + (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) { + rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu, + &frags); + if (rc) { + pr_warn("%sunable to frag msg: rc %d\n", + link_co_err, rc); + return; + } + pktcnt += skb_queue_len(&frags) - 1; + pktcnt_need_update = true; + skb_queue_splice_tail_init(&frags, &tnlq); + continue; + } + /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED + * => Just warn it and return! + */ + pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n", + link_co_err, msg_user(hdr), + msg_type(hdr), msg_size(hdr)); + return; + } + msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC); if (!tnlskb) { @@ -1716,6 +1814,12 @@ tnl: goto tnl; } + if (pktcnt_need_update) + skb_queue_walk(&tnlq, skb) { + hdr = buf_msg(skb); + msg_set_msgcnt(hdr, pktcnt); + } + tipc_link_xmit(tnl, &tnlq, xmitq); if (mtyp == FAILOVER_MSG) { diff --git a/net/tipc/msg.c b/net/tipc/msg.c index f48e5857210f..e6d49cdc61b4 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -244,6 +244,65 @@ bool tipc_msg_validate(struct sk_buff **_skb) } /** + * tipc_msg_fragment - build a fragment skb list for TIPC message + * + * @skb: TIPC message skb + * @hdr: internal msg header to be put on the top of the fragments + * @pktmax: max size of a fragment incl. the header + * @frags: returned fragment skb list + * + * Returns 0 if the fragmentation is successful, otherwise: -EINVAL + * or -ENOMEM + */ +int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr, + int pktmax, struct sk_buff_head *frags) +{ + int pktno, nof_fragms, dsz, dmax, eat; + struct tipc_msg *_hdr; + struct sk_buff *_skb; + u8 *data; + + /* Non-linear buffer? */ + if (skb_linearize(skb)) + return -ENOMEM; + + data = (u8 *)skb->data; + dsz = msg_size(buf_msg(skb)); + dmax = pktmax - INT_H_SIZE; + if (dsz <= dmax || !dmax) + return -EINVAL; + + nof_fragms = dsz / dmax + 1; + for (pktno = 1; pktno <= nof_fragms; pktno++) { + if (pktno < nof_fragms) + eat = dmax; + else + eat = dsz % dmax; + /* Allocate a new fragment */ + _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC); + if (!_skb) + goto error; + skb_orphan(_skb); + __skb_queue_tail(frags, _skb); + /* Copy header & data to the fragment */ + skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE); + skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat); + data += eat; + /* Update the fragment's header */ + _hdr = buf_msg(_skb); + msg_set_fragm_no(_hdr, pktno); + msg_set_nof_fragms(_hdr, nof_fragms); + msg_set_size(_hdr, INT_H_SIZE + eat); + } + return 0; + +error: + __skb_queue_purge(frags); + __skb_queue_head_init(frags); + return -ENOMEM; +} + +/** * tipc_msg_build - create buffer chain containing specified header and data * @mhdr: Message header, to be prepended to data * @m: User message diff --git a/net/tipc/msg.h b/net/tipc/msg.h index da509f0eb9ca..1c8c8dd32a4e 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -721,12 +721,26 @@ static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n) msg_set_bits(m, 4, 16, 0xffff, n); } +static inline u32 msg_nof_fragms(struct tipc_msg *m) +{ + return msg_bits(m, 4, 0, 0xffff); +} + +static inline void msg_set_nof_fragms(struct tipc_msg *m, u32 n) +{ + msg_set_bits(m, 4, 0, 0xffff, n); +} + +static inline u32 msg_fragm_no(struct tipc_msg *m) +{ + return msg_bits(m, 4, 16, 0xffff); +} + static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n) { msg_set_bits(m, 4, 16, 0xffff, n); } - static inline u16 msg_next_sent(struct tipc_msg *m) { return msg_bits(m, 4, 0, 0xffff); @@ -877,6 +891,16 @@ static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n) msg_set_bits(m, 9, 16, 0xffff, n); } +static inline u16 msg_syncpt(struct tipc_msg *m) +{ + return msg_bits(m, 9, 16, 0xffff); +} + +static inline void msg_set_syncpt(struct tipc_msg *m, u16 n) +{ + msg_set_bits(m, 9, 16, 0xffff, n); +} + static inline u32 msg_conn_ack(struct tipc_msg *m) { return msg_bits(m, 9, 16, 0xffff); @@ -1035,6 +1059,8 @@ bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu); bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, u32 mtu, u32 dnode); bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos); +int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr, + int pktmax, struct sk_buff_head *frags); int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, int dsz, int mtu, struct sk_buff_head *list); bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err); diff --git a/net/tipc/node.c b/net/tipc/node.c index 3a5be1d7e572..7ca019001f7c 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1649,7 +1649,6 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, int usr = msg_user(hdr); int mtyp = msg_type(hdr); u16 oseqno = msg_seqno(hdr); - u16 iseqno = msg_seqno(msg_inner_hdr(hdr)); u16 exp_pkts = msg_msgcnt(hdr); u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; int state = n->state; @@ -1748,7 +1747,10 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, /* Initiate synch mode if applicable */ if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { - syncpt = iseqno + exp_pkts - 1; + if (n->capabilities & TIPC_TUNNEL_ENHANCED) + syncpt = msg_syncpt(hdr); + else + syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1; if (!tipc_link_is_up(l)) __tipc_node_link_up(n, bearer_id, xmitq); if (n->state == SELF_UP_PEER_UP) { diff --git a/net/tipc/node.h b/net/tipc/node.h index c0bf49ea3de4..291d0ecd4101 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -53,7 +53,8 @@ enum { TIPC_NODE_ID128 = (1 << 5), TIPC_LINK_PROTO_SEQNO = (1 << 6), TIPC_MCAST_RBCTL = (1 << 7), - TIPC_GAP_ACK_BLOCK = (1 << 8) + TIPC_GAP_ACK_BLOCK = (1 << 8), + TIPC_TUNNEL_ENHANCED = (1 << 9) }; #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ @@ -64,7 +65,8 @@ enum { TIPC_NODE_ID128 | \ TIPC_LINK_PROTO_SEQNO | \ TIPC_MCAST_RBCTL | \ - TIPC_GAP_ACK_BLOCK) + TIPC_GAP_ACK_BLOCK | \ + TIPC_TUNNEL_ENHANCED) #define INVALID_BEARER_ID -1 void tipc_node_stop(struct net *net); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 7c0b2b778703..d184230665eb 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -243,14 +243,14 @@ static void tls_append_frag(struct tls_record_info *record, skb_frag_t *frag; frag = &record->frags[record->num_frags - 1]; - if (frag->page.p == pfrag->page && - frag->page_offset + frag->size == pfrag->offset) { - frag->size += size; + if (skb_frag_page(frag) == pfrag->page && + skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) { + skb_frag_size_add(frag, size); } else { ++frag; - frag->page.p = pfrag->page; - frag->page_offset = pfrag->offset; - frag->size = size; + __skb_frag_set_page(frag, pfrag->page); + skb_frag_off_set(frag, pfrag->offset); + skb_frag_size_set(frag, size); ++record->num_frags; get_page(pfrag->page); } @@ -301,8 +301,8 @@ static int tls_push_record(struct sock *sk, frag = &record->frags[i]; sg_unmark_end(&offload_ctx->sg_tx_data[i]); sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag), - frag->size, frag->page_offset); - sk_mem_charge(sk, frag->size); + skb_frag_size(frag), skb_frag_off(frag)); + sk_mem_charge(sk, skb_frag_size(frag)); get_page(skb_frag_page(frag)); } sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]); @@ -324,7 +324,7 @@ static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, frag = &record->frags[0]; __skb_frag_set_page(frag, pfrag->page); - frag->page_offset = pfrag->offset; + skb_frag_off_set(frag, pfrag->offset); skb_frag_size_set(frag, prepend_size); get_page(pfrag->page); diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 9070d68a92a4..28895333701e 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -273,7 +273,7 @@ static int fill_sg_in(struct scatterlist *sg_in, __skb_frag_ref(frag); sg_set_page(sg_in + i, skb_frag_page(frag), - skb_frag_size(frag), frag->page_offset); + skb_frag_size(frag), skb_frag_off(frag)); remaining -= skb_frag_size(frag); diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index 9d864ebeb7b3..261521d286d6 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -77,11 +77,11 @@ struct hvs_send_buf { VMBUS_PKT_TRAILER_SIZE) union hvs_service_id { - uuid_le srv_id; + guid_t srv_id; struct { unsigned int svm_port; - unsigned char b[sizeof(uuid_le) - sizeof(unsigned int)]; + unsigned char b[sizeof(guid_t) - sizeof(unsigned int)]; }; }; @@ -89,8 +89,8 @@ union hvs_service_id { struct hvsock { struct vsock_sock *vsk; - uuid_le vm_srv_id; - uuid_le host_srv_id; + guid_t vm_srv_id; + guid_t host_srv_id; struct vmbus_channel *chan; struct vmpacket_descriptor *recv_desc; @@ -159,21 +159,21 @@ struct hvsock { #define MIN_HOST_EPHEMERAL_PORT (MAX_HOST_LISTEN_PORT + 1) /* 00000000-facb-11e6-bd58-64006a7986d3 */ -static const uuid_le srv_id_template = - UUID_LE(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58, - 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3); +static const guid_t srv_id_template = + GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58, + 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3); -static bool is_valid_srv_id(const uuid_le *id) +static bool is_valid_srv_id(const guid_t *id) { - return !memcmp(&id->b[4], &srv_id_template.b[4], sizeof(uuid_le) - 4); + return !memcmp(&id->b[4], &srv_id_template.b[4], sizeof(guid_t) - 4); } -static unsigned int get_port_by_srv_id(const uuid_le *svr_id) +static unsigned int get_port_by_srv_id(const guid_t *svr_id) { return *((unsigned int *)svr_id); } -static void hvs_addr_init(struct sockaddr_vm *addr, const uuid_le *svr_id) +static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id) { unsigned int port = get_port_by_srv_id(svr_id); @@ -321,7 +321,7 @@ static void hvs_close_connection(struct vmbus_channel *chan) static void hvs_open_connection(struct vmbus_channel *chan) { - uuid_le *if_instance, *if_type; + guid_t *if_instance, *if_type; unsigned char conn_from_host; struct sockaddr_vm addr; diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 0815d1357861..082a30936690 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -307,6 +307,7 @@ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) break; } + pkt->buf_len = buf_len; pkt->len = buf_len; sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 6f1a8aff65c5..94cc0fa3e848 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -26,6 +26,9 @@ /* How long to wait for graceful shutdown of a connection */ #define VSOCK_CLOSE_TIMEOUT (8 * HZ) +/* Threshold for detecting small packets to copy */ +#define GOOD_COPY_LEN 128 + static const struct virtio_transport *virtio_transport_get_ops(void) { const struct vsock_transport *t = vsock_core_get_transport(); @@ -64,6 +67,9 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, pkt->buf = kmalloc(len, GFP_KERNEL); if (!pkt->buf) goto out_pkt; + + pkt->buf_len = len; + err = memcpy_from_msg(pkt->buf, info->msg, len); if (err) goto out; @@ -91,8 +97,17 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) struct virtio_vsock_pkt *pkt = opaque; struct af_vsockmon_hdr *hdr; struct sk_buff *skb; + size_t payload_len; + void *payload_buf; + + /* A packet could be split to fit the RX buffer, so we can retrieve + * the payload length from the header and the buffer pointer taking + * care of the offset in the original packet. + */ + payload_len = le32_to_cpu(pkt->hdr.len); + payload_buf = pkt->buf + pkt->off; - skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + pkt->len, + skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len, GFP_ATOMIC); if (!skb) return NULL; @@ -132,8 +147,8 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr)); - if (pkt->len) { - skb_put_data(skb, pkt->buf, pkt->len); + if (payload_len) { + skb_put_data(skb, payload_buf, payload_len); } return skb; @@ -166,8 +181,8 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, vvs = vsk->trans; /* we can send less than pkt_len bytes */ - if (pkt_len > VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE) - pkt_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; + if (pkt_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) + pkt_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; /* virtio_transport_get_credit might return less than pkt_len credit */ pkt_len = virtio_transport_get_credit(vvs, pkt_len); @@ -204,10 +219,11 @@ static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt) { - spin_lock_bh(&vvs->tx_lock); + spin_lock_bh(&vvs->rx_lock); + vvs->last_fwd_cnt = vvs->fwd_cnt; pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt); pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc); - spin_unlock_bh(&vvs->tx_lock); + spin_unlock_bh(&vvs->rx_lock); } EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt); @@ -255,6 +271,7 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, struct virtio_vsock_sock *vvs = vsk->trans; struct virtio_vsock_pkt *pkt; size_t bytes, total = 0; + u32 free_space; int err = -EFAULT; spin_lock_bh(&vvs->rx_lock); @@ -285,11 +302,19 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, virtio_transport_free_pkt(pkt); } } + + free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt); + spin_unlock_bh(&vvs->rx_lock); - /* Send a credit pkt to peer */ - virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM, - NULL); + /* We send a credit update only when the space available seen + * by the transmitter is less than VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + */ + if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) { + virtio_transport_send_credit_update(vsk, + VIRTIO_VSOCK_TYPE_STREAM, + NULL); + } return total; @@ -841,24 +866,60 @@ destroy: return err; } +static void +virtio_transport_recv_enqueue(struct vsock_sock *vsk, + struct virtio_vsock_pkt *pkt) +{ + struct virtio_vsock_sock *vvs = vsk->trans; + bool free_pkt = false; + + pkt->len = le32_to_cpu(pkt->hdr.len); + pkt->off = 0; + + spin_lock_bh(&vvs->rx_lock); + + virtio_transport_inc_rx_pkt(vvs, pkt); + + /* Try to copy small packets into the buffer of last packet queued, + * to avoid wasting memory queueing the entire buffer with a small + * payload. + */ + if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) { + struct virtio_vsock_pkt *last_pkt; + + last_pkt = list_last_entry(&vvs->rx_queue, + struct virtio_vsock_pkt, list); + + /* If there is space in the last packet queued, we copy the + * new packet in its buffer. + */ + if (pkt->len <= last_pkt->buf_len - last_pkt->len) { + memcpy(last_pkt->buf + last_pkt->len, pkt->buf, + pkt->len); + last_pkt->len += pkt->len; + free_pkt = true; + goto out; + } + } + + list_add_tail(&pkt->list, &vvs->rx_queue); + +out: + spin_unlock_bh(&vvs->rx_lock); + if (free_pkt) + virtio_transport_free_pkt(pkt); +} + static int virtio_transport_recv_connected(struct sock *sk, struct virtio_vsock_pkt *pkt) { struct vsock_sock *vsk = vsock_sk(sk); - struct virtio_vsock_sock *vvs = vsk->trans; int err = 0; switch (le16_to_cpu(pkt->hdr.op)) { case VIRTIO_VSOCK_OP_RW: - pkt->len = le32_to_cpu(pkt->hdr.len); - pkt->off = 0; - - spin_lock_bh(&vvs->rx_lock); - virtio_transport_inc_rx_pkt(vvs, pkt); - list_add_tail(&pkt->list, &vvs->rx_queue); - spin_unlock_bh(&vvs->rx_lock); - + virtio_transport_recv_enqueue(vsk, pkt); sk->sk_data_ready(sk); return err; case VIRTIO_VSOCK_OP_CREDIT_UPDATE: diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 67f8360dfcee..63cf7131f601 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -217,6 +217,8 @@ config LIB80211_CRYPT_WEP config LIB80211_CRYPT_CCMP tristate + select CRYPTO_AES + select CRYPTO_CCM config LIB80211_CRYPT_TKIP tristate diff --git a/net/wireless/core.c b/net/wireless/core.c index 32b3c719fdfc..a599469b8157 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -142,12 +142,10 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, if (result) return result; - if (rdev->wiphy.debugfsdir && - !debugfs_rename(rdev->wiphy.debugfsdir->d_parent, - rdev->wiphy.debugfsdir, - rdev->wiphy.debugfsdir->d_parent, - newname)) - pr_err("failed to rename debugfs dir to %s!\n", newname); + if (rdev->wiphy.debugfsdir) + debugfs_rename(rdev->wiphy.debugfsdir->d_parent, + rdev->wiphy.debugfsdir, + rdev->wiphy.debugfsdir->d_parent, newname); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); @@ -899,11 +897,8 @@ int wiphy_register(struct wiphy *wiphy) cfg80211_rdev_list_generation++; /* add to debugfs */ - rdev->wiphy.debugfsdir = - debugfs_create_dir(wiphy_name(&rdev->wiphy), - ieee80211_debugfs_dir); - if (IS_ERR(rdev->wiphy.debugfsdir)) - rdev->wiphy.debugfsdir = NULL; + rdev->wiphy.debugfsdir = debugfs_create_dir(wiphy_name(&rdev->wiphy), + ieee80211_debugfs_dir); cfg80211_debugfs_rdev_add(rdev); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); diff --git a/net/wireless/core.h b/net/wireless/core.h index ee8388fe4a92..77556c58d9ac 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -306,6 +306,8 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy); void cfg80211_bss_expire(struct cfg80211_registered_device *rdev); void cfg80211_bss_age(struct cfg80211_registered_device *rdev, unsigned long age_secs); +void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev, + struct ieee80211_channel *channel); /* IBSS */ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c index 7e8ff9d7dcfa..6a5f08f7491e 100644 --- a/net/wireless/lib80211_crypt_ccmp.c +++ b/net/wireless/lib80211_crypt_ccmp.c @@ -22,6 +22,7 @@ #include <linux/ieee80211.h> #include <linux/crypto.h> +#include <crypto/aead.h> #include <net/lib80211.h> @@ -48,20 +49,13 @@ struct lib80211_ccmp_data { int key_idx; - struct crypto_cipher *tfm; + struct crypto_aead *tfm; /* scratch buffers for virt_to_page() (crypto API) */ - u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], - tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN]; - u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; + u8 tx_aad[2 * AES_BLOCK_LEN]; + u8 rx_aad[2 * AES_BLOCK_LEN]; }; -static inline void lib80211_ccmp_aes_encrypt(struct crypto_cipher *tfm, - const u8 pt[16], u8 ct[16]) -{ - crypto_cipher_encrypt_one(tfm, ct, pt); -} - static void *lib80211_ccmp_init(int key_idx) { struct lib80211_ccmp_data *priv; @@ -71,7 +65,7 @@ static void *lib80211_ccmp_init(int key_idx) goto fail; priv->key_idx = key_idx; - priv->tfm = crypto_alloc_cipher("aes", 0, 0); + priv->tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tfm)) { priv->tfm = NULL; goto fail; @@ -82,7 +76,7 @@ static void *lib80211_ccmp_init(int key_idx) fail: if (priv) { if (priv->tfm) - crypto_free_cipher(priv->tfm); + crypto_free_aead(priv->tfm); kfree(priv); } @@ -93,25 +87,16 @@ static void lib80211_ccmp_deinit(void *priv) { struct lib80211_ccmp_data *_priv = priv; if (_priv && _priv->tfm) - crypto_free_cipher(_priv->tfm); + crypto_free_aead(_priv->tfm); kfree(priv); } -static inline void xor_block(u8 * b, u8 * a, size_t len) -{ - int i; - for (i = 0; i < len; i++) - b[i] ^= a[i]; -} - -static void ccmp_init_blocks(struct crypto_cipher *tfm, - struct ieee80211_hdr *hdr, - u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) +static int ccmp_init_iv_and_aad(const struct ieee80211_hdr *hdr, + const u8 *pn, u8 *iv, u8 *aad) { u8 *pos, qc = 0; size_t aad_len; int a4_included, qc_included; - u8 aad[2 * AES_BLOCK_LEN]; a4_included = ieee80211_has_a4(hdr->frame_control); qc_included = ieee80211_is_data_qos(hdr->frame_control); @@ -127,17 +112,19 @@ static void ccmp_init_blocks(struct crypto_cipher *tfm, aad_len += 2; } - /* CCM Initial Block: - * Flag (Include authentication header, M=3 (8-octet MIC), - * L=1 (2-octet Dlen)) - * Nonce: 0x00 | A2 | PN - * Dlen */ - b0[0] = 0x59; - b0[1] = qc; - memcpy(b0 + 2, hdr->addr2, ETH_ALEN); - memcpy(b0 + 8, pn, CCMP_PN_LEN); - b0[14] = (dlen >> 8) & 0xff; - b0[15] = dlen & 0xff; + /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC + * mode authentication are not allowed to collide, yet both are derived + * from the same vector. We only set L := 1 here to indicate that the + * data size can be represented in (L+1) bytes. The CCM layer will take + * care of storing the data length in the top (L+1) bytes and setting + * and clearing the other bits as is required to derive the two IVs. + */ + iv[0] = 0x1; + + /* Nonce: QC | A2 | PN */ + iv[1] = qc; + memcpy(iv + 2, hdr->addr2, ETH_ALEN); + memcpy(iv + 8, pn, CCMP_PN_LEN); /* AAD: * FC with bits 4..6 and 11..13 masked to zero; 14 is always one @@ -147,31 +134,20 @@ static void ccmp_init_blocks(struct crypto_cipher *tfm, * QC (if present) */ pos = (u8 *) hdr; - aad[0] = 0; /* aad_len >> 8 */ - aad[1] = aad_len & 0xff; - aad[2] = pos[0] & 0x8f; - aad[3] = pos[1] & 0xc7; - memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN); + aad[0] = pos[0] & 0x8f; + aad[1] = pos[1] & 0xc7; + memcpy(aad + 2, hdr->addr1, 3 * ETH_ALEN); pos = (u8 *) & hdr->seq_ctrl; - aad[22] = pos[0] & 0x0f; - aad[23] = 0; /* all bits masked */ - memset(aad + 24, 0, 8); + aad[20] = pos[0] & 0x0f; + aad[21] = 0; /* all bits masked */ + memset(aad + 22, 0, 8); if (a4_included) - memcpy(aad + 24, hdr->addr4, ETH_ALEN); + memcpy(aad + 22, hdr->addr4, ETH_ALEN); if (qc_included) { - aad[a4_included ? 30 : 24] = qc; + aad[a4_included ? 28 : 22] = qc; /* rest of QC masked */ } - - /* Start with the first block and AAD */ - lib80211_ccmp_aes_encrypt(tfm, b0, auth); - xor_block(auth, aad, AES_BLOCK_LEN); - lib80211_ccmp_aes_encrypt(tfm, auth, auth); - xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN); - lib80211_ccmp_aes_encrypt(tfm, auth, auth); - b0[0] &= 0x07; - b0[14] = b0[15] = 0; - lib80211_ccmp_aes_encrypt(tfm, b0, s0); + return aad_len; } static int lib80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, @@ -214,13 +190,13 @@ static int lib80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_ccmp_data *key = priv; - int data_len, i, blocks, last, len; - u8 *pos, *mic; struct ieee80211_hdr *hdr; - u8 *b0 = key->tx_b0; - u8 *b = key->tx_b; - u8 *e = key->tx_e; - u8 *s0 = key->tx_s0; + struct aead_request *req; + struct scatterlist sg[2]; + u8 *aad = key->tx_aad; + u8 iv[AES_BLOCK_LEN]; + int len, data_len, aad_len; + int ret; if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) return -1; @@ -230,31 +206,28 @@ static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) if (len < 0) return -1; - pos = skb->data + hdr_len + CCMP_HDR_LEN; + req = aead_request_alloc(key->tfm, GFP_ATOMIC); + if (!req) + return -ENOMEM; + hdr = (struct ieee80211_hdr *)skb->data; - ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); - - blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); - last = data_len % AES_BLOCK_LEN; - - for (i = 1; i <= blocks; i++) { - len = (i == blocks && last) ? last : AES_BLOCK_LEN; - /* Authentication */ - xor_block(b, pos, len); - lib80211_ccmp_aes_encrypt(key->tfm, b, b); - /* Encryption, with counter */ - b0[14] = (i >> 8) & 0xff; - b0[15] = i & 0xff; - lib80211_ccmp_aes_encrypt(key->tfm, b0, e); - xor_block(pos, e, len); - pos += len; - } + aad_len = ccmp_init_iv_and_aad(hdr, key->tx_pn, iv, aad); - mic = skb_put(skb, CCMP_MIC_LEN); - for (i = 0; i < CCMP_MIC_LEN; i++) - mic[i] = b[i] ^ s0[i]; + skb_put(skb, CCMP_MIC_LEN); - return 0; + sg_init_table(sg, 2); + sg_set_buf(&sg[0], aad, aad_len); + sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN, + data_len + CCMP_MIC_LEN); + + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_ad(req, aad_len); + aead_request_set_crypt(req, sg, sg, data_len, iv); + + ret = crypto_aead_encrypt(req); + aead_request_free(req); + + return ret; } /* @@ -283,13 +256,13 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) struct lib80211_ccmp_data *key = priv; u8 keyidx, *pos; struct ieee80211_hdr *hdr; - u8 *b0 = key->rx_b0; - u8 *b = key->rx_b; - u8 *a = key->rx_a; + struct aead_request *req; + struct scatterlist sg[2]; + u8 *aad = key->rx_aad; + u8 iv[AES_BLOCK_LEN]; u8 pn[6]; - int i, blocks, last, len; - size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN; - u8 *mic = skb->data + skb->len - CCMP_MIC_LEN; + int aad_len, ret; + size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN; if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) { key->dot11RSNAStatsCCMPFormatErrors++; @@ -337,28 +310,26 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return -4; } - ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); - xor_block(mic, b, CCMP_MIC_LEN); - - blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); - last = data_len % AES_BLOCK_LEN; - - for (i = 1; i <= blocks; i++) { - len = (i == blocks && last) ? last : AES_BLOCK_LEN; - /* Decrypt, with counter */ - b0[14] = (i >> 8) & 0xff; - b0[15] = i & 0xff; - lib80211_ccmp_aes_encrypt(key->tfm, b0, b); - xor_block(pos, b, len); - /* Authentication */ - xor_block(a, pos, len); - lib80211_ccmp_aes_encrypt(key->tfm, a, a); - pos += len; - } + req = aead_request_alloc(key->tfm, GFP_ATOMIC); + if (!req) + return -ENOMEM; - if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { - net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM\n", - hdr->addr2); + aad_len = ccmp_init_iv_and_aad(hdr, pn, iv, aad); + + sg_init_table(sg, 2); + sg_set_buf(&sg[0], aad, aad_len); + sg_set_buf(&sg[1], pos, data_len); + + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_ad(req, aad_len); + aead_request_set_crypt(req, sg, sg, data_len, iv); + + ret = crypto_aead_decrypt(req); + aead_request_free(req); + + if (ret) { + net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM (%d)\n", + hdr->addr2, ret); key->dot11RSNAStatsCCMPDecryptErrors++; return -5; } @@ -377,7 +348,7 @@ static int lib80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_ccmp_data *data = priv; int keyidx; - struct crypto_cipher *tfm = data->tfm; + struct crypto_aead *tfm = data->tfm; keyidx = data->key_idx; memset(data, 0, sizeof(*data)); @@ -394,7 +365,9 @@ static int lib80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv) data->rx_pn[4] = seq[1]; data->rx_pn[5] = seq[0]; } - crypto_cipher_setkey(data->tfm, data->key, CCMP_TK_LEN); + if (crypto_aead_setauthsize(data->tfm, CCMP_MIC_LEN) || + crypto_aead_setkey(data->tfm, data->key, CCMP_TK_LEN)) + return -1; } else if (len == 0) data->key_set = 0; else diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index fd05ae1437a9..92e06482563c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -281,7 +281,16 @@ nl80211_pmsr_attr_policy[NL80211_PMSR_ATTR_MAX + 1] = { NLA_POLICY_NESTED_ARRAY(nl80211_psmr_peer_attr_policy), }; +static const struct nla_policy +he_obss_pd_policy[NL80211_HE_OBSS_PD_ATTR_MAX + 1] = { + [NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] = + NLA_POLICY_RANGE(NLA_U8, 1, 20), + [NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET] = + NLA_POLICY_RANGE(NLA_U8, 1, 20), +}; + const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { + [0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD }, [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, .len = 20-1 }, @@ -574,6 +583,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_SAE_PASSWORD] = { .type = NLA_BINARY, .len = SAE_PASSWORD_MAX_LEN }, [NL80211_ATTR_TWT_RESPONDER] = { .type = NLA_FLAG }, + [NL80211_ATTR_HE_OBSS_PD] = NLA_POLICY_NESTED(he_obss_pd_policy), }; /* policy for the key attributes */ @@ -749,17 +759,25 @@ int nl80211_prepare_wdev_dump(struct netlink_callback *cb, int err; if (!cb->args[0]) { + struct nlattr **attrbuf; + + attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf), + GFP_KERNEL); + if (!attrbuf) + return -ENOMEM; + err = nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, - genl_family_attrbuf(&nl80211_fam), - nl80211_fam.maxattr, + attrbuf, nl80211_fam.maxattr, nl80211_policy, NULL); - if (err) + if (err) { + kfree(attrbuf); return err; + } - *wdev = __cfg80211_wdev_from_attrs( - sock_net(cb->skb->sk), - genl_family_attrbuf(&nl80211_fam)); + *wdev = __cfg80211_wdev_from_attrs(sock_net(cb->skb->sk), + attrbuf); + kfree(attrbuf); if (IS_ERR(*wdev)) return PTR_ERR(*wdev); *rdev = wiphy_to_rdev((*wdev)->wiphy); @@ -2172,6 +2190,30 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, rdev->wiphy.vht_capa_mod_mask)) goto nla_put_failure; + if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, + rdev->wiphy.perm_addr)) + goto nla_put_failure; + + if (!is_zero_ether_addr(rdev->wiphy.addr_mask) && + nla_put(msg, NL80211_ATTR_MAC_MASK, ETH_ALEN, + rdev->wiphy.addr_mask)) + goto nla_put_failure; + + if (rdev->wiphy.n_addresses > 1) { + void *attr; + + attr = nla_nest_start(msg, NL80211_ATTR_MAC_ADDRS); + if (!attr) + goto nla_put_failure; + + for (i = 0; i < rdev->wiphy.n_addresses; i++) + if (nla_put(msg, i + 1, ETH_ALEN, + rdev->wiphy.addresses[i].addr)) + goto nla_put_failure; + + nla_nest_end(msg, attr); + } + state->split_start++; break; case 10: @@ -2366,14 +2408,21 @@ static int nl80211_dump_wiphy_parse(struct sk_buff *skb, struct netlink_callback *cb, struct nl80211_dump_wiphy_state *state) { - struct nlattr **tb = genl_family_attrbuf(&nl80211_fam); - int ret = nlmsg_parse_deprecated(cb->nlh, - GENL_HDRLEN + nl80211_fam.hdrsize, - tb, nl80211_fam.maxattr, - nl80211_policy, NULL); + struct nlattr **tb = kcalloc(NUM_NL80211_ATTR, sizeof(*tb), GFP_KERNEL); + int ret; + + if (!tb) + return -ENOMEM; + + ret = nlmsg_parse_deprecated(cb->nlh, + GENL_HDRLEN + nl80211_fam.hdrsize, + tb, nl80211_fam.maxattr, + nl80211_policy, NULL); /* ignore parse errors for backward compatibility */ - if (ret) - return 0; + if (ret) { + ret = 0; + goto out; + } state->split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP]; if (tb[NL80211_ATTR_WIPHY]) @@ -2386,8 +2435,10 @@ static int nl80211_dump_wiphy_parse(struct sk_buff *skb, int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]); netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); - if (!netdev) - return -ENODEV; + if (!netdev) { + ret = -ENODEV; + goto out; + } if (netdev->ieee80211_ptr) { rdev = wiphy_to_rdev( netdev->ieee80211_ptr->wiphy); @@ -2395,7 +2446,10 @@ static int nl80211_dump_wiphy_parse(struct sk_buff *skb, } } - return 0; + ret = 0; +out: + kfree(tb); + return ret; } static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) @@ -4359,6 +4413,34 @@ static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev, return 0; } +static int nl80211_parse_he_obss_pd(struct nlattr *attrs, + struct ieee80211_he_obss_pd *he_obss_pd) +{ + struct nlattr *tb[NL80211_HE_OBSS_PD_ATTR_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NL80211_HE_OBSS_PD_ATTR_MAX, attrs, + he_obss_pd_policy, NULL); + if (err) + return err; + + if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] || + !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]) + return -EINVAL; + + he_obss_pd->min_offset = + nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]); + he_obss_pd->max_offset = + nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]); + + if (he_obss_pd->min_offset >= he_obss_pd->max_offset) + return -EINVAL; + + he_obss_pd->enable = true; + + return 0; +} + static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params, const u8 *rates) { @@ -4643,6 +4725,14 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) params.twt_responder = nla_get_flag(info->attrs[NL80211_ATTR_TWT_RESPONDER]); + if (info->attrs[NL80211_ATTR_HE_OBSS_PD]) { + err = nl80211_parse_he_obss_pd( + info->attrs[NL80211_ATTR_HE_OBSS_PD], + ¶ms.he_obss_pd); + if (err) + return err; + } + nl80211_calculate_ap_params(¶ms); if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT]) @@ -8698,7 +8788,7 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq, static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) { - struct nlattr **attrbuf = genl_family_attrbuf(&nl80211_fam); + struct nlattr **attrbuf; struct survey_info survey; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; @@ -8706,6 +8796,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) int res; bool radio_stats; + attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf), GFP_KERNEL); + if (!attrbuf) + return -ENOMEM; + rtnl_lock(); res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (res) @@ -8750,6 +8844,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) cb->args[2] = survey_idx; res = skb->len; out_err: + kfree(attrbuf); rtnl_unlock(); return res; } @@ -9609,6 +9704,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct cfg80211_registered_device *rdev; + struct nlattr **attrbuf = NULL; int err; long phy_idx; void *data = NULL; @@ -9629,7 +9725,12 @@ static int nl80211_testmode_dump(struct sk_buff *skb, goto out_err; } } else { - struct nlattr **attrbuf = genl_family_attrbuf(&nl80211_fam); + attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf), + GFP_KERNEL); + if (!attrbuf) { + err = -ENOMEM; + goto out_err; + } err = nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, @@ -9696,6 +9797,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb, /* see above */ cb->args[0] = phy_idx + 1; out_err: + kfree(attrbuf); rtnl_unlock(); return err; } @@ -12789,7 +12891,7 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, struct cfg80211_registered_device **rdev, struct wireless_dev **wdev) { - struct nlattr **attrbuf = genl_family_attrbuf(&nl80211_fam); + struct nlattr **attrbuf; u32 vid, subcmd; unsigned int i; int vcmd_idx = -1; @@ -12820,24 +12922,32 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, return 0; } + attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf), GFP_KERNEL); + if (!attrbuf) + return -ENOMEM; + err = nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, attrbuf, nl80211_fam.maxattr, nl80211_policy, NULL); if (err) - return err; + goto out; if (!attrbuf[NL80211_ATTR_VENDOR_ID] || - !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) - return -EINVAL; + !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { + err = -EINVAL; + goto out; + } *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf); if (IS_ERR(*wdev)) *wdev = NULL; *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); - if (IS_ERR(*rdev)) - return PTR_ERR(*rdev); + if (IS_ERR(*rdev)) { + err = PTR_ERR(*rdev); + goto out; + } vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]); subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); @@ -12850,15 +12960,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) continue; - if (!vcmd->dumpit) - return -EOPNOTSUPP; + if (!vcmd->dumpit) { + err = -EOPNOTSUPP; + goto out; + } vcmd_idx = i; break; } - if (vcmd_idx < 0) - return -EOPNOTSUPP; + if (vcmd_idx < 0) { + err = -EOPNOTSUPP; + goto out; + } if (attrbuf[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]); @@ -12869,7 +12983,7 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, attrbuf[NL80211_ATTR_VENDOR_DATA], cb->extack); if (err) - return err; + goto out; } /* 0 is the first index - add 1 to parse only once */ @@ -12881,7 +12995,10 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, cb->args[4] = data_len; /* keep rtnl locked in successful case */ - return 0; + err = 0; +out: + kfree(attrbuf); + return err; } static int nl80211_vendor_cmd_dump(struct sk_buff *skb, @@ -14559,6 +14676,7 @@ static struct genl_family nl80211_fam __ro_after_init = { .n_ops = ARRAY_SIZE(nl80211_ops), .mcgrps = nl80211_mcgrps, .n_mcgrps = ARRAY_SIZE(nl80211_mcgrps), + .parallel_ops = true, }; /* notification functions */ @@ -16090,7 +16208,9 @@ void cfg80211_ch_switch_notify(struct net_device *dev, if (wdev->iftype == NL80211_IFTYPE_STATION && !WARN_ON(!wdev->current_bss)) - wdev->current_bss->pub.channel = chandef->chan; + cfg80211_update_assoc_bss_entry(wdev, chandef->chan); + + cfg80211_sched_dfs_chan_update(rdev); nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL, NL80211_CMD_CH_SWITCH_NOTIFY, 0); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index d66e6d4b7555..d313c9befa23 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1091,6 +1091,93 @@ struct cfg80211_non_tx_bss { u8 bssid_index; }; +static bool +cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, + struct cfg80211_internal_bss *known, + struct cfg80211_internal_bss *new, + bool signal_valid) +{ + lockdep_assert_held(&rdev->bss_lock); + + /* Update IEs */ + if (rcu_access_pointer(new->pub.proberesp_ies)) { + const struct cfg80211_bss_ies *old; + + old = rcu_access_pointer(known->pub.proberesp_ies); + + rcu_assign_pointer(known->pub.proberesp_ies, + new->pub.proberesp_ies); + /* Override possible earlier Beacon frame IEs */ + rcu_assign_pointer(known->pub.ies, + new->pub.proberesp_ies); + if (old) + kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); + } else if (rcu_access_pointer(new->pub.beacon_ies)) { + const struct cfg80211_bss_ies *old; + struct cfg80211_internal_bss *bss; + + if (known->pub.hidden_beacon_bss && + !list_empty(&known->hidden_list)) { + const struct cfg80211_bss_ies *f; + + /* The known BSS struct is one of the probe + * response members of a group, but we're + * receiving a beacon (beacon_ies in the new + * bss is used). This can only mean that the + * AP changed its beacon from not having an + * SSID to showing it, which is confusing so + * drop this information. + */ + + f = rcu_access_pointer(new->pub.beacon_ies); + kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head); + return false; + } + + old = rcu_access_pointer(known->pub.beacon_ies); + + rcu_assign_pointer(known->pub.beacon_ies, new->pub.beacon_ies); + + /* Override IEs if they were from a beacon before */ + if (old == rcu_access_pointer(known->pub.ies)) + rcu_assign_pointer(known->pub.ies, new->pub.beacon_ies); + + /* Assign beacon IEs to all sub entries */ + list_for_each_entry(bss, &known->hidden_list, hidden_list) { + const struct cfg80211_bss_ies *ies; + + ies = rcu_access_pointer(bss->pub.beacon_ies); + WARN_ON(ies != old); + + rcu_assign_pointer(bss->pub.beacon_ies, + new->pub.beacon_ies); + } + + if (old) + kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); + } + + known->pub.beacon_interval = new->pub.beacon_interval; + + /* don't update the signal if beacon was heard on + * adjacent channel. + */ + if (signal_valid) + known->pub.signal = new->pub.signal; + known->pub.capability = new->pub.capability; + known->ts = new->ts; + known->ts_boottime = new->ts_boottime; + known->parent_tsf = new->parent_tsf; + known->pub.chains = new->pub.chains; + memcpy(known->pub.chain_signal, new->pub.chain_signal, + IEEE80211_MAX_CHAINS); + ether_addr_copy(known->parent_bssid, new->parent_bssid); + known->pub.max_bssid_indicator = new->pub.max_bssid_indicator; + known->pub.bssid_index = new->pub.bssid_index; + + return true; +} + /* Returned bss is reference counted and must be cleaned up appropriately. */ struct cfg80211_internal_bss * cfg80211_bss_update(struct cfg80211_registered_device *rdev, @@ -1114,88 +1201,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, found = rb_find_bss(rdev, tmp, BSS_CMP_REGULAR); if (found) { - /* Update IEs */ - if (rcu_access_pointer(tmp->pub.proberesp_ies)) { - const struct cfg80211_bss_ies *old; - - old = rcu_access_pointer(found->pub.proberesp_ies); - - rcu_assign_pointer(found->pub.proberesp_ies, - tmp->pub.proberesp_ies); - /* Override possible earlier Beacon frame IEs */ - rcu_assign_pointer(found->pub.ies, - tmp->pub.proberesp_ies); - if (old) - kfree_rcu((struct cfg80211_bss_ies *)old, - rcu_head); - } else if (rcu_access_pointer(tmp->pub.beacon_ies)) { - const struct cfg80211_bss_ies *old; - struct cfg80211_internal_bss *bss; - - if (found->pub.hidden_beacon_bss && - !list_empty(&found->hidden_list)) { - const struct cfg80211_bss_ies *f; - - /* - * The found BSS struct is one of the probe - * response members of a group, but we're - * receiving a beacon (beacon_ies in the tmp - * bss is used). This can only mean that the - * AP changed its beacon from not having an - * SSID to showing it, which is confusing so - * drop this information. - */ - - f = rcu_access_pointer(tmp->pub.beacon_ies); - kfree_rcu((struct cfg80211_bss_ies *)f, - rcu_head); - goto drop; - } - - old = rcu_access_pointer(found->pub.beacon_ies); - - rcu_assign_pointer(found->pub.beacon_ies, - tmp->pub.beacon_ies); - - /* Override IEs if they were from a beacon before */ - if (old == rcu_access_pointer(found->pub.ies)) - rcu_assign_pointer(found->pub.ies, - tmp->pub.beacon_ies); - - /* Assign beacon IEs to all sub entries */ - list_for_each_entry(bss, &found->hidden_list, - hidden_list) { - const struct cfg80211_bss_ies *ies; - - ies = rcu_access_pointer(bss->pub.beacon_ies); - WARN_ON(ies != old); - - rcu_assign_pointer(bss->pub.beacon_ies, - tmp->pub.beacon_ies); - } - - if (old) - kfree_rcu((struct cfg80211_bss_ies *)old, - rcu_head); - } - - found->pub.beacon_interval = tmp->pub.beacon_interval; - /* - * don't update the signal if beacon was heard on - * adjacent channel. - */ - if (signal_valid) - found->pub.signal = tmp->pub.signal; - found->pub.capability = tmp->pub.capability; - found->ts = tmp->ts; - found->ts_boottime = tmp->ts_boottime; - found->parent_tsf = tmp->parent_tsf; - found->pub.chains = tmp->pub.chains; - memcpy(found->pub.chain_signal, tmp->pub.chain_signal, - IEEE80211_MAX_CHAINS); - ether_addr_copy(found->parent_bssid, tmp->parent_bssid); - found->pub.max_bssid_indicator = tmp->pub.max_bssid_indicator; - found->pub.bssid_index = tmp->pub.bssid_index; + if (!cfg80211_update_known_bss(rdev, found, tmp, signal_valid)) + goto drop; } else { struct cfg80211_internal_bss *new; struct cfg80211_internal_bss *hidden; @@ -1368,6 +1375,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, struct cfg80211_internal_bss tmp = {}, *res; int bss_type; bool signal_valid; + unsigned long ts; if (WARN_ON(!wiphy)) return NULL; @@ -1390,8 +1398,11 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, tmp.ts_boottime = data->boottime_ns; if (non_tx_data) { tmp.pub.transmitted_bss = non_tx_data->tx_bss; + ts = bss_from_pub(non_tx_data->tx_bss)->ts; tmp.pub.bssid_index = non_tx_data->bssid_index; tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator; + } else { + ts = jiffies; } /* @@ -1425,8 +1436,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, signal_valid = abs(data->chan->center_freq - channel->center_freq) <= wiphy->max_adj_channel_rssi_comp; - res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid, - jiffies); + res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid, ts); if (!res) return NULL; @@ -1440,7 +1450,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, regulatory_hint_found_beacon(wiphy, channel, gfp); } - if (non_tx_data && non_tx_data->tx_bss) { + if (non_tx_data) { /* this is a nontransmitting bss, we need to add it to * transmitting bss' list if it is not there */ @@ -1659,6 +1669,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy, res = cfg80211_inform_single_bss_data(wiphy, data, ftype, bssid, tsf, capability, beacon_interval, ie, ielen, NULL, gfp); + if (!res) + return NULL; non_tx_data.tx_bss = res; cfg80211_parse_mbssid_data(wiphy, data, ftype, bssid, tsf, beacon_interval, ie, ielen, &non_tx_data, @@ -1776,7 +1788,6 @@ static struct cfg80211_bss * cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, struct ieee80211_mgmt *mgmt, size_t len, - struct cfg80211_non_tx_bss *non_tx_data, gfp_t gfp) { struct cfg80211_internal_bss tmp = {}, *res; @@ -1835,11 +1846,6 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, tmp.pub.chains = data->chains; memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS); ether_addr_copy(tmp.parent_bssid, data->parent_bssid); - if (non_tx_data) { - tmp.pub.transmitted_bss = non_tx_data->tx_bss; - tmp.pub.bssid_index = non_tx_data->bssid_index; - tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator; - } signal_valid = abs(data->chan->center_freq - channel->center_freq) <= wiphy->max_adj_channel_rssi_comp; @@ -1877,7 +1883,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, struct cfg80211_non_tx_bss non_tx_data; res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, - len, NULL, gfp); + len, gfp); if (!res || !wiphy->support_mbssid || !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return res; @@ -1995,6 +2001,85 @@ void cfg80211_bss_iter(struct wiphy *wiphy, } EXPORT_SYMBOL(cfg80211_bss_iter); +void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev, + struct ieee80211_channel *chan) +{ + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + struct cfg80211_internal_bss *cbss = wdev->current_bss; + struct cfg80211_internal_bss *new = NULL; + struct cfg80211_internal_bss *bss; + struct cfg80211_bss *nontrans_bss; + struct cfg80211_bss *tmp; + + spin_lock_bh(&rdev->bss_lock); + + if (WARN_ON(cbss->pub.channel == chan)) + goto done; + + /* use transmitting bss */ + if (cbss->pub.transmitted_bss) + cbss = container_of(cbss->pub.transmitted_bss, + struct cfg80211_internal_bss, + pub); + + cbss->pub.channel = chan; + + list_for_each_entry(bss, &rdev->bss_list, list) { + if (!cfg80211_bss_type_match(bss->pub.capability, + bss->pub.channel->band, + wdev->conn_bss_type)) + continue; + + if (bss == cbss) + continue; + + if (!cmp_bss(&bss->pub, &cbss->pub, BSS_CMP_REGULAR)) { + new = bss; + break; + } + } + + if (new) { + /* to save time, update IEs for transmitting bss only */ + if (cfg80211_update_known_bss(rdev, cbss, new, false)) { + new->pub.proberesp_ies = NULL; + new->pub.beacon_ies = NULL; + } + + list_for_each_entry_safe(nontrans_bss, tmp, + &new->pub.nontrans_list, + nontrans_list) { + bss = container_of(nontrans_bss, + struct cfg80211_internal_bss, pub); + if (__cfg80211_unlink_bss(rdev, bss)) + rdev->bss_generation++; + } + + WARN_ON(atomic_read(&new->hold)); + if (!WARN_ON(!__cfg80211_unlink_bss(rdev, new))) + rdev->bss_generation++; + } + + rb_erase(&cbss->rbn, &rdev->bss_tree); + rb_insert_bss(rdev, cbss); + rdev->bss_generation++; + + list_for_each_entry_safe(nontrans_bss, tmp, + &cbss->pub.nontrans_list, + nontrans_list) { + bss = container_of(nontrans_bss, + struct cfg80211_internal_bss, pub); + bss->pub.channel = chan; + rb_erase(&bss->rbn, &rdev->bss_tree); + rb_insert_bss(rdev, bss); + rdev->bss_generation++; + } + +done: + spin_unlock_bh(&rdev->bss_lock); +} + #ifdef CONFIG_CFG80211_WEXT static struct cfg80211_registered_device * cfg80211_get_dev_from_ifindex(struct net *net, int ifindex) diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 32c364d3bfb3..4d422447aadc 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -85,7 +85,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) if (dlen < len) len = dlen; - frag->page_offset = 0; + skb_frag_off_set(frag, 0); skb_frag_size_set(frag, len); memcpy(skb_frag_address(frag), scratch, len); diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh index 40f16f2a3afd..5cbff8038f84 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh @@ -36,8 +36,6 @@ source $lib_dir/lib.sh h1_create() { - local dscp; - simple_if_init $h1 192.0.2.1/28 tc qdisc add dev $h1 clsact dscp_capture_install $h1 10 @@ -67,6 +65,7 @@ h2_destroy() dscp_map() { local base=$1; shift + local prio for prio in {0..7}; do echo app=$prio,5,$((base + prio)) @@ -138,6 +137,7 @@ dscp_ping_test() local prio=$1; shift local dev_10=$1; shift local dev_20=$1; shift + local key local dscp_10=$(((prio + 10) << 2)) local dscp_20=$(((prio + 20) << 2)) @@ -175,6 +175,8 @@ dscp_ping_test() test_dscp() { + local prio + for prio in {0..7}; do dscp_ping_test v$h1 192.0.2.1 192.0.2.2 $prio $h1 $h2 done diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh index 9faf02e32627..c745ce3befee 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh @@ -31,6 +31,7 @@ ALL_TESTS=" ping_ipv4 test_update test_no_update + test_dscp_leftover " lib_dir=$(dirname $0)/../../../net/forwarding @@ -50,10 +51,13 @@ reprioritize() echo ${reprio[$in]} } -h1_create() +zero() { - local dscp; + echo 0 +} +h1_create() +{ simple_if_init $h1 192.0.2.1/28 tc qdisc add dev $h1 clsact dscp_capture_install $h1 0 @@ -87,6 +91,7 @@ h2_destroy() dscp_map() { local base=$1; shift + local prio for prio in {0..7}; do echo app=$prio,5,$((base + prio)) @@ -156,6 +161,7 @@ dscp_ping_test() local reprio=$1; shift local dev1=$1; shift local dev2=$1; shift + local i local prio2=$($reprio $prio) # ICMP Request egress prio local prio3=$($reprio $prio2) # ICMP Response egress prio @@ -205,6 +211,7 @@ __test_update() { local update=$1; shift local reprio=$1; shift + local prio sysctl_restore net.ipv4.ip_forward_update_priority sysctl_set net.ipv4.ip_forward_update_priority $update @@ -224,6 +231,19 @@ test_no_update() __test_update 0 echo } +# Test that when the last APP rule is removed, the prio->DSCP map is properly +# set to zeroes, and that the last APP rule does not stay active in the ASIC. +test_dscp_leftover() +{ + lldptool -T -i $swp2 -V APP -d $(dscp_map 0) >/dev/null + lldpad_app_wait_del + + __test_update 0 zero + + lldptool -T -i $swp2 -V APP $(dscp_map 0) >/dev/null + lldpad_app_wait_set $swp2 +} + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 1b24e36b4047..70f2d6656170 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -10,9 +10,9 @@ TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh -TEST_PROGS += tcp_fastopen_backup_key.sh +TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh TEST_PROGS_EXTENDED := in_netns.sh -TEST_GEN_FILES = socket +TEST_GEN_FILES = socket nettest TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh new file mode 100755 index 000000000000..bd6b564382ec --- /dev/null +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -0,0 +1,3458 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (c) 2019 David Ahern <dsahern@gmail.com>. All rights reserved. +# +# IPv4 and IPv6 functional tests focusing on VRF and routing lookups +# for various permutations: +# 1. icmp, tcp, udp and netfilter +# 2. client, server, no-server +# 3. global address on interface +# 4. global address on 'lo' +# 5. remote and local traffic +# 6. VRF and non-VRF permutations +# +# Setup: +# ns-A | ns-B +# No VRF case: +# [ lo ] [ eth1 ]---|---[ eth1 ] [ lo ] +# remote address +# VRF case: +# [ red ]---[ eth1 ]---|---[ eth1 ] [ lo ] +# +# ns-A: +# eth1: 172.16.1.1/24, 2001:db8:1::1/64 +# lo: 127.0.0.1/8, ::1/128 +# 172.16.2.1/32, 2001:db8:2::1/128 +# red: 127.0.0.1/8, ::1/128 +# 172.16.3.1/32, 2001:db8:3::1/128 +# +# ns-B: +# eth1: 172.16.1.2/24, 2001:db8:1::2/64 +# lo2: 127.0.0.1/8, ::1/128 +# 172.16.2.2/32, 2001:db8:2::2/128 +# +# server / client nomenclature relative to ns-A + +VERBOSE=0 + +NSA_DEV=eth1 +NSB_DEV=eth1 +VRF=red +VRF_TABLE=1101 + +# IPv4 config +NSA_IP=172.16.1.1 +NSB_IP=172.16.1.2 +VRF_IP=172.16.3.1 + +# IPv6 config +NSA_IP6=2001:db8:1::1 +NSB_IP6=2001:db8:1::2 +VRF_IP6=2001:db8:3::1 + +NSA_LO_IP=172.16.2.1 +NSB_LO_IP=172.16.2.2 +NSA_LO_IP6=2001:db8:2::1 +NSB_LO_IP6=2001:db8:2::2 + +MCAST=ff02::1 +# set after namespace create +NSA_LINKIP6= +NSB_LINKIP6= + +NSA=ns-A +NSB=ns-B + +NSA_CMD="ip netns exec ${NSA}" +NSB_CMD="ip netns exec ${NSB}" + +which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping) + +################################################################################ +# utilities + +log_test() +{ + local rc=$1 + local expected=$2 + local msg="$3" + + [ "${VERBOSE}" = "1" ] && echo + + if [ ${rc} -eq ${expected} ]; then + nsuccess=$((nsuccess+1)) + printf "TEST: %-70s [ OK ]\n" "${msg}" + else + nfail=$((nfail+1)) + printf "TEST: %-70s [FAIL]\n" "${msg}" + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then + echo + echo "hit enter to continue, 'q' to quit" + read a + [ "$a" = "q" ] && exit 1 + fi + fi + + if [ "${PAUSE}" = "yes" ]; then + echo + echo "hit enter to continue, 'q' to quit" + read a + [ "$a" = "q" ] && exit 1 + fi + + kill_procs +} + +log_test_addr() +{ + local addr=$1 + local rc=$2 + local expected=$3 + local msg="$4" + local astr + + astr=$(addr2str ${addr}) + log_test $rc $expected "$msg - ${astr}" +} + +log_section() +{ + echo + echo "###########################################################################" + echo "$*" + echo "###########################################################################" + echo +} + +log_subsection() +{ + echo + echo "#################################################################" + echo "$*" + echo +} + +log_start() +{ + # make sure we have no test instances running + kill_procs + + if [ "${VERBOSE}" = "1" ]; then + echo + echo "#######################################################" + fi +} + +log_debug() +{ + if [ "${VERBOSE}" = "1" ]; then + echo + echo "$*" + echo + fi +} + +show_hint() +{ + if [ "${VERBOSE}" = "1" ]; then + echo "HINT: $*" + echo + fi +} + +kill_procs() +{ + killall nettest ping ping6 >/dev/null 2>&1 + sleep 1 +} + +do_run_cmd() +{ + local cmd="$*" + local out + + if [ "$VERBOSE" = "1" ]; then + echo "COMMAND: ${cmd}" + fi + + out=$($cmd 2>&1) + rc=$? + if [ "$VERBOSE" = "1" -a -n "$out" ]; then + echo "$out" + fi + + return $rc +} + +run_cmd() +{ + do_run_cmd ${NSA_CMD} $* +} + +run_cmd_nsb() +{ + do_run_cmd ${NSB_CMD} $* +} + +setup_cmd() +{ + local cmd="$*" + local rc + + run_cmd ${cmd} + rc=$? + if [ $rc -ne 0 ]; then + # show user the command if not done so already + if [ "$VERBOSE" = "0" ]; then + echo "setup command: $cmd" + fi + echo "failed. stopping tests" + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then + echo + echo "hit enter to continue" + read a + fi + exit $rc + fi +} + +setup_cmd_nsb() +{ + local cmd="$*" + local rc + + run_cmd_nsb ${cmd} + rc=$? + if [ $rc -ne 0 ]; then + # show user the command if not done so already + if [ "$VERBOSE" = "0" ]; then + echo "setup command: $cmd" + fi + echo "failed. stopping tests" + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then + echo + echo "hit enter to continue" + read a + fi + exit $rc + fi +} + +# set sysctl values in NS-A +set_sysctl() +{ + echo "SYSCTL: $*" + echo + run_cmd sysctl -q -w $* +} + +################################################################################ +# Setup for tests + +addr2str() +{ + case "$1" in + 127.0.0.1) echo "loopback";; + ::1) echo "IPv6 loopback";; + + ${NSA_IP}) echo "ns-A IP";; + ${NSA_IP6}) echo "ns-A IPv6";; + ${NSA_LO_IP}) echo "ns-A loopback IP";; + ${NSA_LO_IP6}) echo "ns-A loopback IPv6";; + ${NSA_LINKIP6}|${NSA_LINKIP6}%*) echo "ns-A IPv6 LLA";; + + ${NSB_IP}) echo "ns-B IP";; + ${NSB_IP6}) echo "ns-B IPv6";; + ${NSB_LO_IP}) echo "ns-B loopback IP";; + ${NSB_LO_IP6}) echo "ns-B loopback IPv6";; + ${NSB_LINKIP6}|${NSB_LINKIP6}%*) echo "ns-B IPv6 LLA";; + + ${VRF_IP}) echo "VRF IP";; + ${VRF_IP6}) echo "VRF IPv6";; + + ${MCAST}%*) echo "multicast IP";; + + *) echo "unknown";; + esac +} + +get_linklocal() +{ + local ns=$1 + local dev=$2 + local addr + + addr=$(ip -netns ${ns} -6 -br addr show dev ${dev} | \ + awk '{ + for (i = 3; i <= NF; ++i) { + if ($i ~ /^fe80/) + print $i + } + }' + ) + addr=${addr/\/*} + + [ -z "$addr" ] && return 1 + + echo $addr + + return 0 +} + +################################################################################ +# create namespaces and vrf + +create_vrf() +{ + local ns=$1 + local vrf=$2 + local table=$3 + local addr=$4 + local addr6=$5 + + ip -netns ${ns} link add ${vrf} type vrf table ${table} + ip -netns ${ns} link set ${vrf} up + ip -netns ${ns} route add vrf ${vrf} unreachable default metric 8192 + ip -netns ${ns} -6 route add vrf ${vrf} unreachable default metric 8192 + + ip -netns ${ns} addr add 127.0.0.1/8 dev ${vrf} + ip -netns ${ns} -6 addr add ::1 dev ${vrf} nodad + if [ "${addr}" != "-" ]; then + ip -netns ${ns} addr add dev ${vrf} ${addr} + fi + if [ "${addr6}" != "-" ]; then + ip -netns ${ns} -6 addr add dev ${vrf} ${addr6} + fi + + ip -netns ${ns} ru del pref 0 + ip -netns ${ns} ru add pref 32765 from all lookup local + ip -netns ${ns} -6 ru del pref 0 + ip -netns ${ns} -6 ru add pref 32765 from all lookup local +} + +create_ns() +{ + local ns=$1 + local addr=$2 + local addr6=$3 + + ip netns add ${ns} + + ip -netns ${ns} link set lo up + if [ "${addr}" != "-" ]; then + ip -netns ${ns} addr add dev lo ${addr} + fi + if [ "${addr6}" != "-" ]; then + ip -netns ${ns} -6 addr add dev lo ${addr6} + fi + + ip -netns ${ns} ro add unreachable default metric 8192 + ip -netns ${ns} -6 ro add unreachable default metric 8192 + + ip netns exec ${ns} sysctl -qw net.ipv4.ip_forward=1 + ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1 + ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.forwarding=1 + ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.forwarding=1 +} + +# create veth pair to connect namespaces and apply addresses. +connect_ns() +{ + local ns1=$1 + local ns1_dev=$2 + local ns1_addr=$3 + local ns1_addr6=$4 + local ns2=$5 + local ns2_dev=$6 + local ns2_addr=$7 + local ns2_addr6=$8 + + ip -netns ${ns1} li add ${ns1_dev} type veth peer name tmp + ip -netns ${ns1} li set ${ns1_dev} up + ip -netns ${ns1} li set tmp netns ${ns2} name ${ns2_dev} + ip -netns ${ns2} li set ${ns2_dev} up + + if [ "${ns1_addr}" != "-" ]; then + ip -netns ${ns1} addr add dev ${ns1_dev} ${ns1_addr} + ip -netns ${ns2} addr add dev ${ns2_dev} ${ns2_addr} + fi + + if [ "${ns1_addr6}" != "-" ]; then + ip -netns ${ns1} addr add dev ${ns1_dev} ${ns1_addr6} + ip -netns ${ns2} addr add dev ${ns2_dev} ${ns2_addr6} + fi +} + +cleanup() +{ + # explicit cleanups to check those code paths + ip netns | grep -q ${NSA} + if [ $? -eq 0 ]; then + ip -netns ${NSA} link delete ${VRF} + ip -netns ${NSA} ro flush table ${VRF_TABLE} + + ip -netns ${NSA} addr flush dev ${NSA_DEV} + ip -netns ${NSA} -6 addr flush dev ${NSA_DEV} + ip -netns ${NSA} link set dev ${NSA_DEV} down + ip -netns ${NSA} link del dev ${NSA_DEV} + + ip netns del ${NSA} + fi + + ip netns del ${NSB} +} + +setup() +{ + local with_vrf=${1} + + # make sure we are starting with a clean slate + kill_procs + cleanup 2>/dev/null + + log_debug "Configuring network namespaces" + set -e + + create_ns ${NSA} ${NSA_LO_IP}/32 ${NSA_LO_IP6}/128 + create_ns ${NSB} ${NSB_LO_IP}/32 ${NSB_LO_IP6}/128 + connect_ns ${NSA} ${NSA_DEV} ${NSA_IP}/24 ${NSA_IP6}/64 \ + ${NSB} ${NSB_DEV} ${NSB_IP}/24 ${NSB_IP6}/64 + + NSA_LINKIP6=$(get_linklocal ${NSA} ${NSA_DEV}) + NSB_LINKIP6=$(get_linklocal ${NSB} ${NSB_DEV}) + + # tell ns-A how to get to remote addresses of ns-B + if [ "${with_vrf}" = "yes" ]; then + create_vrf ${NSA} ${VRF} ${VRF_TABLE} ${VRF_IP} ${VRF_IP6} + + ip -netns ${NSA} link set dev ${NSA_DEV} vrf ${VRF} + ip -netns ${NSA} ro add vrf ${VRF} ${NSB_LO_IP}/32 via ${NSB_IP} dev ${NSA_DEV} + ip -netns ${NSA} -6 ro add vrf ${VRF} ${NSB_LO_IP6}/128 via ${NSB_IP6} dev ${NSA_DEV} + + ip -netns ${NSB} ro add ${VRF_IP}/32 via ${NSA_IP} dev ${NSB_DEV} + ip -netns ${NSB} -6 ro add ${VRF_IP6}/128 via ${NSA_IP6} dev ${NSB_DEV} + else + ip -netns ${NSA} ro add ${NSB_LO_IP}/32 via ${NSB_IP} dev ${NSA_DEV} + ip -netns ${NSA} ro add ${NSB_LO_IP6}/128 via ${NSB_IP6} dev ${NSA_DEV} + fi + + + # tell ns-B how to get to remote addresses of ns-A + ip -netns ${NSB} ro add ${NSA_LO_IP}/32 via ${NSA_IP} dev ${NSB_DEV} + ip -netns ${NSB} ro add ${NSA_LO_IP6}/128 via ${NSA_IP6} dev ${NSB_DEV} + + set +e + + sleep 1 +} + +################################################################################ +# IPv4 + +ipv4_ping_novrf() +{ + local a + + # + # out + # + for a in ${NSB_IP} ${NSB_LO_IP} + do + log_start + run_cmd ping -c1 -w1 ${a} + log_test_addr ${a} $? 0 "ping out" + + log_start + run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 0 "ping out, device bind" + + log_start + run_cmd ping -c1 -w1 -I ${NSA_LO_IP} ${a} + log_test_addr ${a} $? 0 "ping out, address bind" + done + + # + # in + # + for a in ${NSA_IP} ${NSA_LO_IP} + do + log_start + run_cmd_nsb ping -c1 -w1 ${a} + log_test_addr ${a} $? 0 "ping in" + done + + # + # local traffic + # + for a in ${NSA_IP} ${NSA_LO_IP} 127.0.0.1 + do + log_start + run_cmd ping -c1 -w1 ${a} + log_test_addr ${a} $? 0 "ping local" + done + + # + # local traffic, socket bound to device + # + # address on device + a=${NSA_IP} + log_start + run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 0 "ping local, device bind" + + # loopback addresses not reachable from device bind + # fails in a really weird way though because ipv4 special cases + # route lookups with oif set. + for a in ${NSA_LO_IP} 127.0.0.1 + do + log_start + show_hint "Fails since address on loopback device is out of device scope" + run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 1 "ping local, device bind" + done + + # + # ip rule blocks reachability to remote address + # + log_start + setup_cmd ip rule add pref 32765 from all lookup local + setup_cmd ip rule del pref 0 from all lookup local + setup_cmd ip rule add pref 50 to ${NSB_LO_IP} prohibit + setup_cmd ip rule add pref 51 from ${NSB_IP} prohibit + + a=${NSB_LO_IP} + run_cmd ping -c1 -w1 ${a} + log_test_addr ${a} $? 2 "ping out, blocked by rule" + + # NOTE: ipv4 actually allows the lookup to fail and yet still create + # a viable rtable if the oif (e.g., bind to device) is set, so this + # case succeeds despite the rule + # run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} + + a=${NSA_LO_IP} + log_start + show_hint "Response generates ICMP (or arp request is ignored) due to ip rule" + run_cmd_nsb ping -c1 -w1 ${a} + log_test_addr ${a} $? 1 "ping in, blocked by rule" + + [ "$VERBOSE" = "1" ] && echo + setup_cmd ip rule del pref 32765 from all lookup local + setup_cmd ip rule add pref 0 from all lookup local + setup_cmd ip rule del pref 50 to ${NSB_LO_IP} prohibit + setup_cmd ip rule del pref 51 from ${NSB_IP} prohibit + + # + # route blocks reachability to remote address + # + log_start + setup_cmd ip route replace unreachable ${NSB_LO_IP} + setup_cmd ip route replace unreachable ${NSB_IP} + + a=${NSB_LO_IP} + run_cmd ping -c1 -w1 ${a} + log_test_addr ${a} $? 2 "ping out, blocked by route" + + # NOTE: ipv4 actually allows the lookup to fail and yet still create + # a viable rtable if the oif (e.g., bind to device) is set, so this + # case succeeds despite not having a route for the address + # run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} + + a=${NSA_LO_IP} + log_start + show_hint "Response is dropped (or arp request is ignored) due to ip route" + run_cmd_nsb ping -c1 -w1 ${a} + log_test_addr ${a} $? 1 "ping in, blocked by route" + + # + # remove 'remote' routes; fallback to default + # + log_start + setup_cmd ip ro del ${NSB_LO_IP} + + a=${NSB_LO_IP} + run_cmd ping -c1 -w1 ${a} + log_test_addr ${a} $? 2 "ping out, unreachable default route" + + # NOTE: ipv4 actually allows the lookup to fail and yet still create + # a viable rtable if the oif (e.g., bind to device) is set, so this + # case succeeds despite not having a route for the address + # run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} +} + +ipv4_ping_vrf() +{ + local a + + # should default on; does not exist on older kernels + set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null + + # + # out + # + for a in ${NSB_IP} ${NSB_LO_IP} + do + log_start + run_cmd ping -c1 -w1 -I ${VRF} ${a} + log_test_addr ${a} $? 0 "ping out, VRF bind" + + log_start + run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 0 "ping out, device bind" + + log_start + run_cmd ip vrf exec ${VRF} ping -c1 -w1 -I ${NSA_IP} ${a} + log_test_addr ${a} $? 0 "ping out, vrf device + dev address bind" + + log_start + run_cmd ip vrf exec ${VRF} ping -c1 -w1 -I ${VRF_IP} ${a} + log_test_addr ${a} $? 0 "ping out, vrf device + vrf address bind" + done + + # + # in + # + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd_nsb ping -c1 -w1 ${a} + log_test_addr ${a} $? 0 "ping in" + done + + # + # local traffic, local address + # + for a in ${NSA_IP} ${VRF_IP} 127.0.0.1 + do + log_start + show_hint "Source address should be ${a}" + run_cmd ping -c1 -w1 -I ${VRF} ${a} + log_test_addr ${a} $? 0 "ping local, VRF bind" + done + + # + # local traffic, socket bound to device + # + # address on device + a=${NSA_IP} + log_start + run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 0 "ping local, device bind" + + # vrf device is out of scope + for a in ${VRF_IP} 127.0.0.1 + do + log_start + show_hint "Fails since address on vrf device is out of device scope" + run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 1 "ping local, device bind" + done + + # + # ip rule blocks address + # + log_start + setup_cmd ip rule add pref 50 to ${NSB_LO_IP} prohibit + setup_cmd ip rule add pref 51 from ${NSB_IP} prohibit + + a=${NSB_LO_IP} + run_cmd ping -c1 -w1 -I ${VRF} ${a} + log_test_addr ${a} $? 2 "ping out, vrf bind, blocked by rule" + + log_start + run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 2 "ping out, device bind, blocked by rule" + + a=${NSA_LO_IP} + log_start + show_hint "Response lost due to ip rule" + run_cmd_nsb ping -c1 -w1 ${a} + log_test_addr ${a} $? 1 "ping in, blocked by rule" + + [ "$VERBOSE" = "1" ] && echo + setup_cmd ip rule del pref 50 to ${NSB_LO_IP} prohibit + setup_cmd ip rule del pref 51 from ${NSB_IP} prohibit + + # + # remove 'remote' routes; fallback to default + # + log_start + setup_cmd ip ro del vrf ${VRF} ${NSB_LO_IP} + + a=${NSB_LO_IP} + run_cmd ping -c1 -w1 -I ${VRF} ${a} + log_test_addr ${a} $? 2 "ping out, vrf bind, unreachable route" + + log_start + run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 2 "ping out, device bind, unreachable route" + + a=${NSA_LO_IP} + log_start + show_hint "Response lost by unreachable route" + run_cmd_nsb ping -c1 -w1 ${a} + log_test_addr ${a} $? 1 "ping in, unreachable route" +} + +ipv4_ping() +{ + log_section "IPv4 ping" + + log_subsection "No VRF" + setup + set_sysctl net.ipv4.raw_l3mdev_accept=0 2>/dev/null + ipv4_ping_novrf + setup + set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null + ipv4_ping_novrf + + log_subsection "With VRF" + setup "yes" + ipv4_ping_vrf +} + +################################################################################ +# IPv4 TCP + +ipv4_tcp_novrf() +{ + local a + + # + # server tests + # + for a in ${NSA_IP} ${NSA_LO_IP} + do + log_start + run_cmd nettest -s & + sleep 1 + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "Global server" + done + + a=${NSA_IP} + log_start + run_cmd nettest -s -d ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "Device server" + + # verify TCP reset sent and received + for a in ${NSA_IP} ${NSA_LO_IP} + do + log_start + show_hint "Should fail 'Connection refused' since there is no server" + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + # + # client + # + for a in ${NSB_IP} ${NSB_LO_IP} + do + log_start + run_cmd_nsb nettest -s & + sleep 1 + run_cmd nettest -r ${a} -0 ${NSA_IP} + log_test_addr ${a} $? 0 "Client" + + log_start + run_cmd_nsb nettest -s & + sleep 1 + run_cmd nettest -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 0 "Client, device bind" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -r ${a} + log_test_addr ${a} $? 1 "No server, unbound client" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "No server, device client" + done + + # + # local address tests + # + for a in ${NSA_IP} ${NSA_LO_IP} 127.0.0.1 + do + log_start + run_cmd nettest -s & + sleep 1 + run_cmd nettest -r ${a} -0 ${a} -1 ${a} + log_test_addr ${a} $? 0 "Global server, local connection" + done + + a=${NSA_IP} + log_start + run_cmd nettest -s -d ${NSA_DEV} & + sleep 1 + run_cmd nettest -r ${a} -0 ${a} + log_test_addr ${a} $? 0 "Device server, unbound client, local connection" + + for a in ${NSA_LO_IP} 127.0.0.1 + do + log_start + show_hint "Should fail 'Connection refused' since addresses on loopback are out of device scope" + run_cmd nettest -s -d ${NSA_DEV} & + sleep 1 + run_cmd nettest -r ${a} + log_test_addr ${a} $? 1 "Device server, unbound client, local connection" + done + + a=${NSA_IP} + log_start + run_cmd nettest -s & + sleep 1 + run_cmd nettest -r ${a} -0 ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 0 "Global server, device client, local connection" + + for a in ${NSA_LO_IP} 127.0.0.1 + do + log_start + show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope" + run_cmd nettest -s & + sleep 1 + run_cmd nettest -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "Global server, device client, local connection" + done + + a=${NSA_IP} + log_start + run_cmd nettest -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -d ${NSA_DEV} -r ${a} -0 ${a} + log_test_addr ${a} $? 0 "Device server, device client, local connection" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 1 "No server, device client, local conn" +} + +ipv4_tcp_vrf() +{ + local a + + # disable global server + log_subsection "Global server disabled" + + set_sysctl net.ipv4.tcp_l3mdev_accept=0 + + # + # server tests + # + for a in ${NSA_IP} ${VRF_IP} + do + log_start + show_hint "Should fail 'Connection refused' since global server with VRF is disabled" + run_cmd nettest -s & + sleep 1 + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 1 "Global server" + + log_start + run_cmd nettest -s -d ${VRF} -2 ${VRF} & + sleep 1 + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "VRF server" + + log_start + run_cmd nettest -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "Device server" + + # verify TCP reset received + log_start + show_hint "Should fail 'Connection refused' since there is no server" + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + # local address tests + # (${VRF_IP} and 127.0.0.1 both timeout) + a=${NSA_IP} + log_start + show_hint "Should fail 'Connection refused' since global server with VRF is disabled" + run_cmd nettest -s & + sleep 1 + run_cmd nettest -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "Global server, local connection" + + # + # enable VRF global server + # + log_subsection "VRF Global server enabled" + set_sysctl net.ipv4.tcp_l3mdev_accept=1 + + for a in ${NSA_IP} ${VRF_IP} + do + log_start + show_hint "client socket should be bound to VRF" + run_cmd nettest -s -2 ${VRF} & + sleep 1 + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "Global server" + + log_start + show_hint "client socket should be bound to VRF" + run_cmd nettest -s -d ${VRF} -2 ${VRF} & + sleep 1 + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "VRF server" + + # verify TCP reset received + log_start + show_hint "Should fail 'Connection refused'" + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + a=${NSA_IP} + log_start + show_hint "client socket should be bound to device" + run_cmd nettest -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "Device server" + + # local address tests + for a in ${NSA_IP} ${VRF_IP} + do + log_start + show_hint "Should fail 'No route to host' since client is not bound to VRF" + run_cmd nettest -s -2 ${VRF} & + sleep 1 + run_cmd nettest -r ${a} + log_test_addr ${a} $? 1 "Global server, local connection" + done + + # + # client + # + for a in ${NSB_IP} ${NSB_LO_IP} + do + log_start + run_cmd_nsb nettest -s & + sleep 1 + run_cmd nettest -r ${a} -d ${VRF} + log_test_addr ${a} $? 0 "Client, VRF bind" + + log_start + run_cmd_nsb nettest -s & + sleep 1 + run_cmd nettest -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 0 "Client, device bind" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -r ${a} -d ${VRF} + log_test_addr ${a} $? 1 "No server, VRF client" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "No server, device client" + done + + for a in ${NSA_IP} ${VRF_IP} 127.0.0.1 + do + log_start + run_cmd nettest -s -d ${VRF} -2 ${VRF} & + sleep 1 + run_cmd nettest -r ${a} -d ${VRF} -0 ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local connection" + done + + a=${NSA_IP} + log_start + run_cmd nettest -s -d ${VRF} -2 ${VRF} & + sleep 1 + run_cmd nettest -r ${a} -d ${NSA_DEV} -0 ${a} + log_test_addr ${a} $? 0 "VRF server, device client, local connection" + + log_start + show_hint "Should fail 'No route to host' since client is out of VRF scope" + run_cmd nettest -s -d ${VRF} & + sleep 1 + run_cmd nettest -r ${a} + log_test_addr ${a} $? 1 "VRF server, unbound client, local connection" + + log_start + run_cmd nettest -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -r ${a} -d ${VRF} -0 ${a} + log_test_addr ${a} $? 0 "Device server, VRF client, local connection" + + log_start + run_cmd nettest -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -r ${a} -d ${NSA_DEV} -0 ${a} + log_test_addr ${a} $? 0 "Device server, device client, local connection" +} + +ipv4_tcp() +{ + log_section "IPv4/TCP" + + which nettest >/dev/null + if [ $? -ne 0 ]; then + log_error "nettest not found; skipping tests" + return + fi + + log_subsection "No VRF" + setup + + # tcp_l3mdev_accept should have no affect without VRF; + # run tests with it enabled and disabled to verify + log_subsection "tcp_l3mdev_accept disabled" + set_sysctl net.ipv4.tcp_l3mdev_accept=0 + ipv4_tcp_novrf + log_subsection "tcp_l3mdev_accept enabled" + set_sysctl net.ipv4.tcp_l3mdev_accept=1 + ipv4_tcp_novrf + + log_subsection "With VRF" + setup "yes" + ipv4_tcp_vrf +} + +################################################################################ +# IPv4 UDP + +ipv4_udp_novrf() +{ + local a + + # + # server tests + # + for a in ${NSA_IP} ${NSA_LO_IP} + do + log_start + run_cmd nettest -D -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "Global server" + + log_start + show_hint "Should fail 'Connection refused' since there is no server" + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + a=${NSA_IP} + log_start + run_cmd nettest -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "Device server" + + # + # client + # + for a in ${NSB_IP} ${NSB_LO_IP} + do + log_start + run_cmd_nsb nettest -D -s & + sleep 1 + run_cmd nettest -D -r ${a} -0 ${NSA_IP} + log_test_addr ${a} $? 0 "Client" + + log_start + run_cmd_nsb nettest -D -s & + sleep 1 + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -0 ${NSA_IP} + log_test_addr ${a} $? 0 "Client, device bind" + + log_start + run_cmd_nsb nettest -D -s & + sleep 1 + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -C -0 ${NSA_IP} + log_test_addr ${a} $? 0 "Client, device send via cmsg" + + log_start + run_cmd_nsb nettest -D -s & + sleep 1 + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -S -0 ${NSA_IP} + log_test_addr ${a} $? 0 "Client, device bind via IP_UNICAST_IF" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -D -r ${a} + log_test_addr ${a} $? 1 "No server, unbound client" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -D -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "No server, device client" + done + + # + # local address tests + # + for a in ${NSA_IP} ${NSA_LO_IP} 127.0.0.1 + do + log_start + run_cmd nettest -D -s & + sleep 1 + run_cmd nettest -D -r ${a} -0 ${a} -1 ${a} + log_test_addr ${a} $? 0 "Global server, local connection" + done + + a=${NSA_IP} + log_start + run_cmd nettest -s -D -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -r ${a} + log_test_addr ${a} $? 0 "Device server, unbound client, local connection" + + for a in ${NSA_LO_IP} 127.0.0.1 + do + log_start + show_hint "Should fail 'Connection refused' since address is out of device scope" + run_cmd nettest -s -D -d ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -r ${a} + log_test_addr ${a} $? 1 "Device server, unbound client, local connection" + done + + a=${NSA_IP} + log_start + run_cmd nettest -s -D & + sleep 1 + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Global server, device client, local connection" + + log_start + run_cmd nettest -s -D & + sleep 1 + run_cmd nettest -D -d ${NSA_DEV} -C -r ${a} + log_test_addr ${a} $? 0 "Global server, device send via cmsg, local connection" + + log_start + run_cmd nettest -s -D & + sleep 1 + run_cmd nettest -D -d ${NSA_DEV} -S -r ${a} + log_test_addr ${a} $? 0 "Global server, device client via IP_UNICAST_IF, local connection" + + # IPv4 with device bind has really weird behavior - it overrides the + # fib lookup, generates an rtable and tries to send the packet. This + # causes failures for local traffic at different places + for a in ${NSA_LO_IP} 127.0.0.1 + do + log_start + show_hint "Should fail since addresses on loopback are out of device scope" + run_cmd nettest -D -s & + sleep 1 + run_cmd nettest -D -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 2 "Global server, device client, local connection" + + log_start + show_hint "Should fail since addresses on loopback are out of device scope" + run_cmd nettest -D -s & + sleep 1 + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -C + log_test_addr ${a} $? 1 "Global server, device send via cmsg, local connection" + + log_start + show_hint "Should fail since addresses on loopback are out of device scope" + run_cmd nettest -D -s & + sleep 1 + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -S + log_test_addr ${a} $? 1 "Global server, device client via IP_UNICAST_IF, local connection" + done + + a=${NSA_IP} + log_start + run_cmd nettest -D -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -d ${NSA_DEV} -r ${a} -0 ${a} + log_test_addr ${a} $? 0 "Device server, device client, local conn" + + log_start + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 2 "No server, device client, local conn" +} + +ipv4_udp_vrf() +{ + local a + + # disable global server + log_subsection "Global server disabled" + set_sysctl net.ipv4.udp_l3mdev_accept=0 + + # + # server tests + # + for a in ${NSA_IP} ${VRF_IP} + do + log_start + show_hint "Fails because ingress is in a VRF and global server is disabled" + run_cmd nettest -D -s & + sleep 1 + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 1 "Global server" + + log_start + run_cmd nettest -D -d ${VRF} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "VRF server" + + log_start + run_cmd nettest -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server" + + log_start + show_hint "Should fail 'Connection refused' since there is no server" + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 1 "No server" + + log_start + show_hint "Should fail 'Connection refused' since global server is out of scope" + run_cmd nettest -D -s & + sleep 1 + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 1 "Global server, VRF client, local connection" + done + + a=${NSA_IP} + log_start + run_cmd nettest -s -D -d ${VRF} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + + log_start + run_cmd nettest -s -D -d ${VRF} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "VRF server, enslaved device client, local connection" + + a=${NSA_IP} + log_start + run_cmd nettest -s -D -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, VRF client, local conn" + + log_start + run_cmd nettest -s -D -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, device client, local conn" + + # enable global server + log_subsection "Global server enabled" + set_sysctl net.ipv4.udp_l3mdev_accept=1 + + # + # server tests + # + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd nettest -D -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "Global server" + + log_start + run_cmd nettest -D -d ${VRF} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "VRF server" + + log_start + run_cmd nettest -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + # + # client tests + # + log_start + run_cmd_nsb nettest -D -s & + sleep 1 + run_cmd nettest -d ${VRF} -D -r ${NSB_IP} -1 ${NSA_IP} + log_test $? 0 "VRF client" + + log_start + run_cmd_nsb nettest -D -s & + sleep 1 + run_cmd nettest -d ${NSA_DEV} -D -r ${NSB_IP} -1 ${NSA_IP} + log_test $? 0 "Enslaved device client" + + # negative test - should fail + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -D -d ${VRF} -r ${NSB_IP} + log_test $? 1 "No server, VRF client" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -D -d ${NSA_DEV} -r ${NSB_IP} + log_test $? 1 "No server, enslaved device client" + + # + # local address tests + # + a=${NSA_IP} + log_start + run_cmd nettest -D -s -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Global server, VRF client, local conn" + + log_start + run_cmd nettest -s -D -d ${VRF} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + + log_start + run_cmd nettest -s -D -d ${VRF} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "VRF server, device client, local conn" + + log_start + run_cmd nettest -s -D -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, VRF client, local conn" + + log_start + run_cmd nettest -s -D -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, device client, local conn" + + for a in ${VRF_IP} 127.0.0.1 + do + log_start + run_cmd nettest -D -s -2 ${VRF} & + sleep 1 + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Global server, VRF client, local conn" + done + + for a in ${VRF_IP} 127.0.0.1 + do + log_start + run_cmd nettest -s -D -d ${VRF} -2 ${VRF} & + sleep 1 + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + done + + # negative test - should fail + # verifies ECONNREFUSED + for a in ${NSA_IP} ${VRF_IP} 127.0.0.1 + do + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 1 "No server, VRF client, local conn" + done +} + +ipv4_udp() +{ + which nettest >/dev/null + if [ $? -ne 0 ]; then + log_error "nettest not found; skipping tests" + return + fi + + log_section "IPv4/UDP" + log_subsection "No VRF" + + setup + + # udp_l3mdev_accept should have no affect without VRF; + # run tests with it enabled and disabled to verify + log_subsection "udp_l3mdev_accept disabled" + set_sysctl net.ipv4.udp_l3mdev_accept=0 + ipv4_udp_novrf + log_subsection "udp_l3mdev_accept enabled" + set_sysctl net.ipv4.udp_l3mdev_accept=1 + ipv4_udp_novrf + + log_subsection "With VRF" + setup "yes" + ipv4_udp_vrf +} + +################################################################################ +# IPv4 address bind +# +# verifies ability or inability to bind to an address / device + +ipv4_addr_bind_novrf() +{ + # + # raw socket + # + for a in ${NSA_IP} ${NSA_LO_IP} + do + log_start + run_cmd nettest -s -R -P icmp -l ${a} -b + log_test_addr ${a} $? 0 "Raw socket bind to local address" + + log_start + run_cmd nettest -s -R -P icmp -l ${a} -d ${NSA_DEV} -b + log_test_addr ${a} $? 0 "Raw socket bind to local address after device bind" + done + + # + # tcp sockets + # + a=${NSA_IP} + log_start + run_cmd nettest -l ${a} -r ${NSB_IP} -t1 -b + log_test_addr ${a} $? 0 "TCP socket bind to local address" + + log_start + run_cmd nettest -l ${a} -r ${NSB_IP} -d ${NSA_DEV} -t1 -b + log_test_addr ${a} $? 0 "TCP socket bind to local address after device bind" + + # Sadly, the kernel allows binding a socket to a device and then + # binding to an address not on the device. The only restriction + # is that the address is valid in the L3 domain. So this test + # passes when it really should not + #a=${NSA_LO_IP} + #log_start + #show_hint "Should fail with 'Cannot assign requested address'" + #run_cmd nettest -s -l ${a} -d ${NSA_DEV} -t1 -b + #log_test_addr ${a} $? 1 "TCP socket bind to out of scope local address" +} + +ipv4_addr_bind_vrf() +{ + # + # raw socket + # + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd nettest -s -R -P icmp -l ${a} -b + log_test_addr ${a} $? 0 "Raw socket bind to local address" + + log_start + run_cmd nettest -s -R -P icmp -l ${a} -d ${NSA_DEV} -b + log_test_addr ${a} $? 0 "Raw socket bind to local address after device bind" + log_start + run_cmd nettest -s -R -P icmp -l ${a} -d ${VRF} -b + log_test_addr ${a} $? 0 "Raw socket bind to local address after VRF bind" + done + + a=${NSA_LO_IP} + log_start + show_hint "Address on loopback is out of VRF scope" + run_cmd nettest -s -R -P icmp -l ${a} -d ${VRF} -b + log_test_addr ${a} $? 1 "Raw socket bind to out of scope address after VRF bind" + + # + # tcp sockets + # + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd nettest -s -l ${a} -d ${VRF} -t1 -b + log_test_addr ${a} $? 0 "TCP socket bind to local address" + + log_start + run_cmd nettest -s -l ${a} -d ${NSA_DEV} -t1 -b + log_test_addr ${a} $? 0 "TCP socket bind to local address after device bind" + done + + a=${NSA_LO_IP} + log_start + show_hint "Address on loopback out of scope for VRF" + run_cmd nettest -s -l ${a} -d ${VRF} -t1 -b + log_test_addr ${a} $? 1 "TCP socket bind to invalid local address for VRF" + + log_start + show_hint "Address on loopback out of scope for device in VRF" + run_cmd nettest -s -l ${a} -d ${NSA_DEV} -t1 -b + log_test_addr ${a} $? 1 "TCP socket bind to invalid local address for device bind" +} + +ipv4_addr_bind() +{ + log_section "IPv4 address binds" + + log_subsection "No VRF" + setup + ipv4_addr_bind_novrf + + log_subsection "With VRF" + setup "yes" + ipv4_addr_bind_vrf +} + +################################################################################ +# IPv4 runtime tests + +ipv4_rt() +{ + local desc="$1" + local varg="$2" + local with_vrf="yes" + local a + + # + # server tests + # + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd nettest ${varg} -s & + sleep 1 + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, global server" + + setup ${with_vrf} + done + + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd nettest ${varg} -s -d ${VRF} & + sleep 1 + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, VRF server" + + setup ${with_vrf} + done + + a=${NSA_IP} + log_start + run_cmd nettest ${varg} -s -d ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, enslaved device server" + + setup ${with_vrf} + + # + # client test + # + log_start + run_cmd_nsb nettest ${varg} -s & + sleep 1 + run_cmd nettest ${varg} -d ${VRF} -r ${NSB_IP} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, VRF client" + + setup ${with_vrf} + + log_start + run_cmd_nsb nettest ${varg} -s & + sleep 1 + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${NSB_IP} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, enslaved device client" + + setup ${with_vrf} + + # + # local address tests + # + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd nettest ${varg} -s & + sleep 1 + run_cmd nettest ${varg} -d ${VRF} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, global server, VRF client, local" + + setup ${with_vrf} + done + + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd nettest ${varg} -d ${VRF} -s & + sleep 1 + run_cmd nettest ${varg} -d ${VRF} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, VRF server and client, local" + + setup ${with_vrf} + done + + a=${NSA_IP} + log_start + run_cmd nettest ${varg} -s & + sleep 1 + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, global server, enslaved device client, local" + + setup ${with_vrf} + + log_start + run_cmd nettest ${varg} -d ${VRF} -s & + sleep 1 + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, VRF server, enslaved device client, local" + + setup ${with_vrf} + + log_start + run_cmd nettest ${varg} -d ${NSA_DEV} -s & + sleep 1 + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, enslaved device server and client, local" +} + +ipv4_ping_rt() +{ + local with_vrf="yes" + local a + + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd_nsb ping -f ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "Device delete with active traffic - ping in" + + setup ${with_vrf} + done + + a=${NSB_IP} + log_start + run_cmd ping -f -I ${VRF} ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "Device delete with active traffic - ping out" +} + +ipv4_runtime() +{ + log_section "Run time tests - ipv4" + + setup "yes" + ipv4_ping_rt + + setup "yes" + ipv4_rt "TCP active socket" "-n -1" + + setup "yes" + ipv4_rt "TCP passive socket" "-i" +} + +################################################################################ +# IPv6 + +ipv6_ping_novrf() +{ + local a + + # should not have an impact, but make a known state + set_sysctl net.ipv4.raw_l3mdev_accept=0 2>/dev/null + + # + # out + # + for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV} + do + log_start + run_cmd ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 0 "ping out" + done + + for a in ${NSB_IP6} ${NSB_LO_IP6} + do + log_start + run_cmd ${ping6} -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 0 "ping out, device bind" + + log_start + run_cmd ${ping6} -c1 -w1 -I ${NSA_LO_IP6} ${a} + log_test_addr ${a} $? 0 "ping out, loopback address bind" + done + + # + # in + # + for a in ${NSA_IP6} ${NSA_LO_IP6} ${NSA_LINKIP6}%${NSB_DEV} ${MCAST}%${NSB_DEV} + do + log_start + run_cmd_nsb ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 0 "ping in" + done + + # + # local traffic, local address + # + for a in ${NSA_IP6} ${NSA_LO_IP6} ::1 ${NSA_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV} + do + log_start + run_cmd ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 0 "ping local, no bind" + done + + for a in ${NSA_IP6} ${NSA_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV} + do + log_start + run_cmd ${ping6} -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 0 "ping local, device bind" + done + + for a in ${NSA_LO_IP6} ::1 + do + log_start + show_hint "Fails since address on loopback is out of device scope" + run_cmd ${ping6} -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 2 "ping local, device bind" + done + + # + # ip rule blocks address + # + log_start + setup_cmd ip -6 rule add pref 32765 from all lookup local + setup_cmd ip -6 rule del pref 0 from all lookup local + setup_cmd ip -6 rule add pref 50 to ${NSB_LO_IP6} prohibit + setup_cmd ip -6 rule add pref 51 from ${NSB_IP6} prohibit + + a=${NSB_LO_IP6} + run_cmd ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 2 "ping out, blocked by rule" + + log_start + run_cmd ${ping6} -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 2 "ping out, device bind, blocked by rule" + + a=${NSA_LO_IP6} + log_start + show_hint "Response lost due to ip rule" + run_cmd_nsb ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 1 "ping in, blocked by rule" + + setup_cmd ip -6 rule add pref 0 from all lookup local + setup_cmd ip -6 rule del pref 32765 from all lookup local + setup_cmd ip -6 rule del pref 50 to ${NSB_LO_IP6} prohibit + setup_cmd ip -6 rule del pref 51 from ${NSB_IP6} prohibit + + # + # route blocks reachability to remote address + # + log_start + setup_cmd ip -6 route del ${NSB_LO_IP6} + setup_cmd ip -6 route add unreachable ${NSB_LO_IP6} metric 10 + setup_cmd ip -6 route add unreachable ${NSB_IP6} metric 10 + + a=${NSB_LO_IP6} + run_cmd ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 2 "ping out, blocked by route" + + log_start + run_cmd ${ping6} -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 2 "ping out, device bind, blocked by route" + + a=${NSA_LO_IP6} + log_start + show_hint "Response lost due to ip route" + run_cmd_nsb ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 1 "ping in, blocked by route" + + + # + # remove 'remote' routes; fallback to default + # + log_start + setup_cmd ip -6 ro del unreachable ${NSB_LO_IP6} + setup_cmd ip -6 ro del unreachable ${NSB_IP6} + + a=${NSB_LO_IP6} + run_cmd ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 2 "ping out, unreachable route" + + log_start + run_cmd ${ping6} -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 2 "ping out, device bind, unreachable route" +} + +ipv6_ping_vrf() +{ + local a + + # should default on; does not exist on older kernels + set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null + + # + # out + # + for a in ${NSB_IP6} ${NSB_LO_IP6} + do + log_start + run_cmd ${ping6} -c1 -w1 -I ${VRF} ${a} + log_test_addr ${a} $? 0 "ping out, VRF bind" + done + + for a in ${NSB_LINKIP6}%${VRF} ${MCAST}%${VRF} + do + log_start + show_hint "Fails since VRF device does not support linklocal or multicast" + run_cmd ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 2 "ping out, VRF bind" + done + + for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV} + do + log_start + run_cmd ${ping6} -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 0 "ping out, device bind" + done + + for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} + do + log_start + run_cmd ip vrf exec ${VRF} ${ping6} -c1 -w1 -I ${VRF_IP6} ${a} + log_test_addr ${a} $? 0 "ping out, vrf device+address bind" + done + + # + # in + # + for a in ${NSA_IP6} ${VRF_IP6} ${NSA_LINKIP6}%${NSB_DEV} ${MCAST}%${NSB_DEV} + do + log_start + run_cmd_nsb ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 0 "ping in" + done + + a=${NSA_LO_IP6} + log_start + show_hint "Fails since loopback address is out of VRF scope" + run_cmd_nsb ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 1 "ping in" + + # + # local traffic, local address + # + for a in ${NSA_IP6} ${VRF_IP6} ::1 + do + log_start + show_hint "Source address should be ${a}" + run_cmd ${ping6} -c1 -w1 -I ${VRF} ${a} + log_test_addr ${a} $? 0 "ping local, VRF bind" + done + + for a in ${NSA_IP6} ${NSA_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV} + do + log_start + run_cmd ${ping6} -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 0 "ping local, device bind" + done + + # LLA to GUA - remove ipv6 global addresses from ns-B + setup_cmd_nsb ip -6 addr del ${NSB_IP6}/64 dev ${NSB_DEV} + setup_cmd_nsb ip -6 addr del ${NSB_LO_IP6}/128 dev lo + setup_cmd_nsb ip -6 ro add ${NSA_IP6}/128 via ${NSA_LINKIP6} dev ${NSB_DEV} + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd_nsb ${ping6} -c1 -w1 ${NSA_IP6} + log_test_addr ${a} $? 0 "ping in, LLA to GUA" + done + + setup_cmd_nsb ip -6 ro del ${NSA_IP6}/128 via ${NSA_LINKIP6} dev ${NSB_DEV} + setup_cmd_nsb ip -6 addr add ${NSB_IP6}/64 dev ${NSB_DEV} + setup_cmd_nsb ip -6 addr add ${NSB_LO_IP6}/128 dev lo + + # + # ip rule blocks address + # + log_start + setup_cmd ip -6 rule add pref 50 to ${NSB_LO_IP6} prohibit + setup_cmd ip -6 rule add pref 51 from ${NSB_IP6} prohibit + + a=${NSB_LO_IP6} + run_cmd ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 2 "ping out, blocked by rule" + + log_start + run_cmd ${ping6} -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 2 "ping out, device bind, blocked by rule" + + a=${NSA_LO_IP6} + log_start + show_hint "Response lost due to ip rule" + run_cmd_nsb ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 1 "ping in, blocked by rule" + + log_start + setup_cmd ip -6 rule del pref 50 to ${NSB_LO_IP6} prohibit + setup_cmd ip -6 rule del pref 51 from ${NSB_IP6} prohibit + + # + # remove 'remote' routes; fallback to default + # + log_start + setup_cmd ip -6 ro del ${NSB_LO_IP6} vrf ${VRF} + + a=${NSB_LO_IP6} + run_cmd ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 2 "ping out, unreachable route" + + log_start + run_cmd ${ping6} -c1 -w1 -I ${NSA_DEV} ${a} + log_test_addr ${a} $? 2 "ping out, device bind, unreachable route" + + ip -netns ${NSB} -6 ro del ${NSA_LO_IP6} + a=${NSA_LO_IP6} + log_start + run_cmd_nsb ${ping6} -c1 -w1 ${a} + log_test_addr ${a} $? 2 "ping in, unreachable route" +} + +ipv6_ping() +{ + log_section "IPv6 ping" + + log_subsection "No VRF" + setup + ipv6_ping_novrf + + log_subsection "With VRF" + setup "yes" + ipv6_ping_vrf +} + +################################################################################ +# IPv6 TCP + +ipv6_tcp_novrf() +{ + local a + + # + # server tests + # + for a in ${NSA_IP6} ${NSA_LO_IP6} ${NSA_LINKIP6}%${NSB_DEV} + do + log_start + run_cmd nettest -6 -s & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Global server" + done + + # verify TCP reset received + for a in ${NSA_IP6} ${NSA_LO_IP6} ${NSA_LINKIP6}%${NSB_DEV} + do + log_start + show_hint "Should fail 'Connection refused'" + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + # + # client + # + for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} + do + log_start + run_cmd_nsb nettest -6 -s & + sleep 1 + run_cmd nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Client" + done + + for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} + do + log_start + run_cmd_nsb nettest -6 -s & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 0 "Client, device bind" + done + + for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} + do + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "No server, device client" + done + + # + # local address tests + # + for a in ${NSA_IP6} ${NSA_LO_IP6} ::1 + do + log_start + run_cmd nettest -6 -s & + sleep 1 + run_cmd nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Global server, local connection" + done + + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -r ${a} -0 ${a} + log_test_addr ${a} $? 0 "Device server, unbound client, local connection" + + for a in ${NSA_LO_IP6} ::1 + do + log_start + show_hint "Should fail 'Connection refused' since addresses on loopback are out of device scope" + run_cmd nettest -6 -s -d ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -r ${a} + log_test_addr ${a} $? 1 "Device server, unbound client, local connection" + done + + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} -0 ${a} + log_test_addr ${a} $? 0 "Global server, device client, local connection" + + for a in ${NSA_LO_IP6} ::1 + do + log_start + show_hint "Should fail 'Connection refused' since addresses on loopback are out of device scope" + run_cmd nettest -6 -s & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "Global server, device client, local connection" + done + + for a in ${NSA_IP6} ${NSA_LINKIP6} + do + log_start + run_cmd nettest -6 -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Device server, device client, local conn" + done + + for a in ${NSA_IP6} ${NSA_LINKIP6} + do + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -6 -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 1 "No server, device client, local conn" + done +} + +ipv6_tcp_vrf() +{ + local a + + # disable global server + log_subsection "Global server disabled" + + set_sysctl net.ipv4.tcp_l3mdev_accept=0 + + # + # server tests + # + for a in ${NSA_IP6} ${VRF_IP6} ${NSA_LINKIP6}%${NSB_DEV} + do + log_start + show_hint "Should fail 'Connection refused' since global server with VRF is disabled" + run_cmd nettest -6 -s & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 1 "Global server" + done + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -s -d ${VRF} -2 ${VRF} & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "VRF server" + done + + # link local is always bound to ingress device + a=${NSA_LINKIP6}%${NSB_DEV} + log_start + run_cmd nettest -6 -s -d ${VRF} -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "VRF server" + + for a in ${NSA_IP6} ${VRF_IP6} ${NSA_LINKIP6}%${NSB_DEV} + do + log_start + run_cmd nettest -6 -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Device server" + done + + # verify TCP reset received + for a in ${NSA_IP6} ${VRF_IP6} ${NSA_LINKIP6}%${NSB_DEV} + do + log_start + show_hint "Should fail 'Connection refused'" + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + # local address tests + a=${NSA_IP6} + log_start + show_hint "Should fail 'Connection refused' since global server with VRF is disabled" + run_cmd nettest -6 -s & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "Global server, local connection" + + # + # enable VRF global server + # + log_subsection "VRF Global server enabled" + set_sysctl net.ipv4.tcp_l3mdev_accept=1 + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -s -2 ${VRF} & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Global server" + done + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -s -d ${VRF} -2 ${VRF} & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "VRF server" + done + + # For LLA, child socket is bound to device + a=${NSA_LINKIP6}%${NSB_DEV} + log_start + run_cmd nettest -6 -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Global server" + + log_start + run_cmd nettest -6 -s -d ${VRF} -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "VRF server" + + for a in ${NSA_IP6} ${NSA_LINKIP6}%${NSB_DEV} + do + log_start + run_cmd nettest -6 -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Device server" + done + + # verify TCP reset received + for a in ${NSA_IP6} ${VRF_IP6} ${NSA_LINKIP6}%${NSB_DEV} + do + log_start + show_hint "Should fail 'Connection refused'" + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + # local address tests + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + show_hint "Fails 'No route to host' since client is not in VRF" + run_cmd nettest -6 -s -2 ${VRF} & + sleep 1 + run_cmd nettest -6 -r ${a} + log_test_addr ${a} $? 1 "Global server, local connection" + done + + + # + # client + # + for a in ${NSB_IP6} ${NSB_LO_IP6} + do + log_start + run_cmd_nsb nettest -6 -s & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${VRF} + log_test_addr ${a} $? 0 "Client, VRF bind" + done + + a=${NSB_LINKIP6} + log_start + show_hint "Fails since VRF device does not allow linklocal addresses" + run_cmd_nsb nettest -6 -s & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${VRF} + log_test_addr ${a} $? 1 "Client, VRF bind" + + for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6} + do + log_start + run_cmd_nsb nettest -6 -s & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 0 "Client, device bind" + done + + for a in ${NSB_IP6} ${NSB_LO_IP6} + do + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -6 -r ${a} -d ${VRF} + log_test_addr ${a} $? 1 "No server, VRF client" + done + + for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6} + do + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "No server, device client" + done + + for a in ${NSA_IP6} ${VRF_IP6} ::1 + do + log_start + run_cmd nettest -6 -s -d ${VRF} -2 ${VRF} & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${VRF} -0 ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local connection" + done + + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s -d ${VRF} -2 ${VRF} & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} -0 ${a} + log_test_addr ${a} $? 0 "VRF server, device client, local connection" + + a=${NSA_IP6} + log_start + show_hint "Should fail since unbound client is out of VRF scope" + run_cmd nettest -6 -s -d ${VRF} & + sleep 1 + run_cmd nettest -6 -r ${a} + log_test_addr ${a} $? 1 "VRF server, unbound client, local connection" + + log_start + run_cmd nettest -6 -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${VRF} -0 ${a} + log_test_addr ${a} $? 0 "Device server, VRF client, local connection" + + for a in ${NSA_IP6} ${NSA_LINKIP6} + do + log_start + run_cmd nettest -6 -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} -0 ${a} + log_test_addr ${a} $? 0 "Device server, device client, local connection" + done +} + +ipv6_tcp() +{ + log_section "IPv6/TCP" + + which nettest >/dev/null + if [ $? -ne 0 ]; then + log_error "nettest not found; skipping tests" + return + fi + + log_subsection "No VRF" + setup + + # tcp_l3mdev_accept should have no affect without VRF; + # run tests with it enabled and disabled to verify + log_subsection "tcp_l3mdev_accept disabled" + set_sysctl net.ipv4.tcp_l3mdev_accept=0 + ipv6_tcp_novrf + log_subsection "tcp_l3mdev_accept enabled" + set_sysctl net.ipv4.tcp_l3mdev_accept=1 + ipv6_tcp_novrf + + log_subsection "With VRF" + setup "yes" + ipv6_tcp_vrf +} + +################################################################################ +# IPv6 UDP + +ipv6_udp_novrf() +{ + local a + + # + # server tests + # + for a in ${NSA_IP6} ${NSA_LINKIP6}%${NSB_DEV} + do + log_start + run_cmd nettest -6 -D -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Global server" + + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Device server" + done + + a=${NSA_LO_IP6} + log_start + run_cmd nettest -6 -D -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Global server" + + # should fail since loopback address is out of scope for a device + # bound server, but it does not - hence this is more documenting + # behavior. + #log_start + #show_hint "Should fail since loopback address is out of scope" + #run_cmd nettest -6 -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + #sleep 1 + #run_cmd_nsb nettest -6 -D -r ${a} + #log_test_addr ${a} $? 1 "Device server" + + # negative test - should fail + for a in ${NSA_IP6} ${NSA_LO_IP6} ${NSA_LINKIP6}%${NSB_DEV} + do + log_start + show_hint "Should fail 'Connection refused' since there is no server" + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + # + # client + # + for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} + do + log_start + run_cmd_nsb nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -r ${a} -0 ${NSA_IP6} + log_test_addr ${a} $? 0 "Client" + + log_start + run_cmd_nsb nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -0 ${NSA_IP6} + log_test_addr ${a} $? 0 "Client, device bind" + + log_start + run_cmd_nsb nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -C -0 ${NSA_IP6} + log_test_addr ${a} $? 0 "Client, device send via cmsg" + + log_start + run_cmd_nsb nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -S -0 ${NSA_IP6} + log_test_addr ${a} $? 0 "Client, device bind via IPV6_UNICAST_IF" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -6 -D -r ${a} + log_test_addr ${a} $? 1 "No server, unbound client" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "No server, device client" + done + + # + # local address tests + # + for a in ${NSA_IP6} ${NSA_LO_IP6} ::1 + do + log_start + run_cmd nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -r ${a} -0 ${a} -1 ${a} + log_test_addr ${a} $? 0 "Global server, local connection" + done + + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s -D -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Device server, unbound client, local connection" + + for a in ${NSA_LO_IP6} ::1 + do + log_start + show_hint "Should fail 'Connection refused' since address is out of device scope" + run_cmd nettest -6 -s -D -d ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -r ${a} + log_test_addr ${a} $? 1 "Device server, local connection" + done + + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s -D & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Global server, device client, local connection" + + log_start + run_cmd nettest -6 -s -D & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -C -r ${a} + log_test_addr ${a} $? 0 "Global server, device send via cmsg, local connection" + + log_start + run_cmd nettest -6 -s -D & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -S -r ${a} + log_test_addr ${a} $? 0 "Global server, device client via IPV6_UNICAST_IF, local connection" + + for a in ${NSA_LO_IP6} ::1 + do + log_start + show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope" + run_cmd nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "Global server, device client, local connection" + + log_start + show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope" + run_cmd nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -C + log_test_addr ${a} $? 1 "Global server, device send via cmsg, local connection" + + log_start + show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope" + run_cmd nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -S + log_test_addr ${a} $? 1 "Global server, device client via IP_UNICAST_IF, local connection" + done + + a=${NSA_IP6} + log_start + run_cmd nettest -6 -D -s -d ${NSA_DEV} -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} -0 ${a} + log_test_addr ${a} $? 0 "Device server, device client, local conn" + + log_start + show_hint "Should fail 'Connection refused'" + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 1 "No server, device client, local conn" + + # LLA to GUA + run_cmd_nsb ip -6 addr del ${NSB_IP6}/64 dev ${NSB_DEV} + run_cmd_nsb ip -6 ro add ${NSA_IP6}/128 dev ${NSB_DEV} + log_start + run_cmd nettest -6 -s -D & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${NSA_IP6} + log_test $? 0 "UDP in - LLA to GUA" + + run_cmd_nsb ip -6 ro del ${NSA_IP6}/128 dev ${NSB_DEV} + run_cmd_nsb ip -6 addr add ${NSB_IP6}/64 dev ${NSB_DEV} nodad +} + +ipv6_udp_vrf() +{ + local a + + # disable global server + log_subsection "Global server disabled" + set_sysctl net.ipv4.udp_l3mdev_accept=0 + + # + # server tests + # + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + show_hint "Should fail 'Connection refused' since global server is disabled" + run_cmd nettest -6 -D -s & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 1 "Global server" + done + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -D -d ${VRF} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "VRF server" + done + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server" + done + + # negative test - should fail + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + show_hint "Should fail 'Connection refused' since there is no server" + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + # + # local address tests + # + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + show_hint "Should fail 'Connection refused' since global server is disabled" + run_cmd nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 1 "Global server, VRF client, local conn" + done + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -D -d ${VRF} -s & + sleep 1 + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + done + + a=${NSA_IP6} + log_start + show_hint "Should fail 'Connection refused' since global server is disabled" + run_cmd nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 1 "Global server, device client, local conn" + + log_start + run_cmd nettest -6 -D -d ${VRF} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "VRF server, device client, local conn" + + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, VRF client, local conn" + + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, device client, local conn" + + # disable global server + log_subsection "Global server enabled" + set_sysctl net.ipv4.udp_l3mdev_accept=1 + + # + # server tests + # + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -D -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Global server" + done + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -D -d ${VRF} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "VRF server" + done + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server" + done + + # negative test - should fail + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 1 "No server" + done + + # + # client tests + # + log_start + run_cmd_nsb nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -d ${VRF} -r ${NSB_IP6} + log_test $? 0 "VRF client" + + # negative test - should fail + log_start + run_cmd nettest -6 -D -d ${VRF} -r ${NSB_IP6} + log_test $? 1 "No server, VRF client" + + log_start + run_cmd_nsb nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSB_IP6} + log_test $? 0 "Enslaved device client" + + # negative test - should fail + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSB_IP6} + log_test $? 1 "No server, enslaved device client" + + # + # local address tests + # + a=${NSA_IP6} + log_start + run_cmd nettest -6 -D -s -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Global server, VRF client, local conn" + + #log_start + run_cmd nettest -6 -D -d ${VRF} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + + + a=${VRF_IP6} + log_start + run_cmd nettest -6 -D -s -2 ${VRF} & + sleep 1 + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Global server, VRF client, local conn" + + log_start + run_cmd nettest -6 -D -d ${VRF} -s -2 ${VRF} & + sleep 1 + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + + # negative test - should fail + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 1 "No server, VRF client, local conn" + done + + # device to global IP + a=${NSA_IP6} + log_start + run_cmd nettest -6 -D -s -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Global server, device client, local conn" + + log_start + run_cmd nettest -6 -D -d ${VRF} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "VRF server, device client, local conn" + + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Device server, VRF client, local conn" + + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -s -2 ${NSA_DEV} & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Device server, device client, local conn" + + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 1 "No server, device client, local conn" + + + # link local addresses + log_start + run_cmd nettest -6 -D -s & + sleep 1 + run_cmd_nsb nettest -6 -D -d ${NSB_DEV} -r ${NSA_LINKIP6} + log_test $? 0 "Global server, linklocal IP" + + log_start + run_cmd_nsb nettest -6 -D -d ${NSB_DEV} -r ${NSA_LINKIP6} + log_test $? 1 "No server, linklocal IP" + + + log_start + run_cmd_nsb nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSB_LINKIP6} + log_test $? 0 "Enslaved device client, linklocal IP" + + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSB_LINKIP6} + log_test $? 1 "No server, device client, peer linklocal IP" + + + log_start + run_cmd nettest -6 -D -s & + sleep 1 + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSA_LINKIP6} + log_test $? 0 "Enslaved device client, local conn - linklocal IP" + + log_start + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSA_LINKIP6} + log_test $? 1 "No server, device client, local conn - linklocal IP" + + # LLA to GUA + run_cmd_nsb ip -6 addr del ${NSB_IP6}/64 dev ${NSB_DEV} + run_cmd_nsb ip -6 ro add ${NSA_IP6}/128 dev ${NSB_DEV} + log_start + run_cmd nettest -6 -s -D & + sleep 1 + run_cmd_nsb nettest -6 -D -r ${NSA_IP6} + log_test $? 0 "UDP in - LLA to GUA" + + run_cmd_nsb ip -6 ro del ${NSA_IP6}/128 dev ${NSB_DEV} + run_cmd_nsb ip -6 addr add ${NSB_IP6}/64 dev ${NSB_DEV} nodad +} + +ipv6_udp() +{ + # should not matter, but set to known state + set_sysctl net.ipv4.udp_early_demux=1 + + log_section "IPv6/UDP" + log_subsection "No VRF" + setup + + # udp_l3mdev_accept should have no affect without VRF; + # run tests with it enabled and disabled to verify + log_subsection "udp_l3mdev_accept disabled" + set_sysctl net.ipv4.udp_l3mdev_accept=0 + ipv6_udp_novrf + log_subsection "udp_l3mdev_accept enabled" + set_sysctl net.ipv4.udp_l3mdev_accept=1 + ipv6_udp_novrf + + log_subsection "With VRF" + setup "yes" + ipv6_udp_vrf +} + +################################################################################ +# IPv6 address bind + +ipv6_addr_bind_novrf() +{ + # + # raw socket + # + for a in ${NSA_IP6} ${NSA_LO_IP6} + do + log_start + run_cmd nettest -6 -s -R -P ipv6-icmp -l ${a} -b + log_test_addr ${a} $? 0 "Raw socket bind to local address" + + log_start + run_cmd nettest -6 -s -R -P ipv6-icmp -l ${a} -d ${NSA_DEV} -b + log_test_addr ${a} $? 0 "Raw socket bind to local address after device bind" + done + + # + # tcp sockets + # + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s -l ${a} -t1 -b + log_test_addr ${a} $? 0 "TCP socket bind to local address" + + log_start + run_cmd nettest -6 -s -l ${a} -d ${NSA_DEV} -t1 -b + log_test_addr ${a} $? 0 "TCP socket bind to local address after device bind" + + a=${NSA_LO_IP6} + log_start + show_hint "Should fail with 'Cannot assign requested address'" + run_cmd nettest -6 -s -l ${a} -d ${NSA_DEV} -t1 -b + log_test_addr ${a} $? 1 "TCP socket bind to out of scope local address" +} + +ipv6_addr_bind_vrf() +{ + # + # raw socket + # + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -s -R -P ipv6-icmp -l ${a} -d ${VRF} -b + log_test_addr ${a} $? 0 "Raw socket bind to local address after vrf bind" + + log_start + run_cmd nettest -6 -s -R -P ipv6-icmp -l ${a} -d ${NSA_DEV} -b + log_test_addr ${a} $? 0 "Raw socket bind to local address after device bind" + done + + a=${NSA_LO_IP6} + log_start + show_hint "Address on loopback is out of VRF scope" + run_cmd nettest -6 -s -R -P ipv6-icmp -l ${a} -d ${VRF} -b + log_test_addr ${a} $? 1 "Raw socket bind to invalid local address after vrf bind" + + # + # tcp sockets + # + # address on enslaved device is valid for the VRF or device in a VRF + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -s -l ${a} -d ${VRF} -t1 -b + log_test_addr ${a} $? 0 "TCP socket bind to local address with VRF bind" + done + + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s -l ${a} -d ${NSA_DEV} -t1 -b + log_test_addr ${a} $? 0 "TCP socket bind to local address with device bind" + + a=${VRF_IP6} + log_start + run_cmd nettest -6 -s -l ${a} -d ${NSA_DEV} -t1 -b + log_test_addr ${a} $? 1 "TCP socket bind to VRF address with device bind" + + a=${NSA_LO_IP6} + log_start + show_hint "Address on loopback out of scope for VRF" + run_cmd nettest -6 -s -l ${a} -d ${VRF} -t1 -b + log_test_addr ${a} $? 1 "TCP socket bind to invalid local address for VRF" + + log_start + show_hint "Address on loopback out of scope for device in VRF" + run_cmd nettest -6 -s -l ${a} -d ${NSA_DEV} -t1 -b + log_test_addr ${a} $? 1 "TCP socket bind to invalid local address for device bind" + +} + +ipv6_addr_bind() +{ + log_section "IPv6 address binds" + + log_subsection "No VRF" + setup + ipv6_addr_bind_novrf + + log_subsection "With VRF" + setup "yes" + ipv6_addr_bind_vrf +} + +################################################################################ +# IPv6 runtime tests + +ipv6_rt() +{ + local desc="$1" + local varg="-6 $2" + local with_vrf="yes" + local a + + # + # server tests + # + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest ${varg} -s & + sleep 1 + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, global server" + + setup ${with_vrf} + done + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest ${varg} -d ${VRF} -s & + sleep 1 + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, VRF server" + + setup ${with_vrf} + done + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest ${varg} -d ${NSA_DEV} -s & + sleep 1 + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, enslaved device server" + + setup ${with_vrf} + done + + # + # client test + # + log_start + run_cmd_nsb nettest ${varg} -s & + sleep 1 + run_cmd nettest ${varg} -d ${VRF} -r ${NSB_IP6} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test 0 0 "${desc}, VRF client" + + setup ${with_vrf} + + log_start + run_cmd_nsb nettest ${varg} -s & + sleep 1 + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${NSB_IP6} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test 0 0 "${desc}, enslaved device client" + + setup ${with_vrf} + + + # + # local address tests + # + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest ${varg} -s & + sleep 1 + run_cmd nettest ${varg} -d ${VRF} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, global server, VRF client" + + setup ${with_vrf} + done + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest ${varg} -d ${VRF} -s & + sleep 1 + run_cmd nettest ${varg} -d ${VRF} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, VRF server and client" + + setup ${with_vrf} + done + + a=${NSA_IP6} + log_start + run_cmd nettest ${varg} -s & + sleep 1 + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, global server, device client" + + setup ${with_vrf} + + log_start + run_cmd nettest ${varg} -d ${VRF} -s & + sleep 1 + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, VRF server, device client" + + setup ${with_vrf} + + log_start + run_cmd nettest ${varg} -d ${NSA_DEV} -s & + sleep 1 + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "${desc}, device server, device client" +} + +ipv6_ping_rt() +{ + local with_vrf="yes" + local a + + a=${NSA_IP6} + log_start + run_cmd_nsb ${ping6} -f ${a} & + sleep 3 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "Device delete with active traffic - ping in" + + setup ${with_vrf} + + log_start + run_cmd ${ping6} -f ${NSB_IP6} -I ${VRF} & + sleep 1 + run_cmd ip link del ${VRF} + sleep 1 + log_test_addr ${a} 0 0 "Device delete with active traffic - ping out" +} + +ipv6_runtime() +{ + log_section "Run time tests - ipv6" + + setup "yes" + ipv6_ping_rt + + setup "yes" + ipv6_rt "TCP active socket" "-n -1" + + setup "yes" + ipv6_rt "TCP passive socket" "-i" + + setup "yes" + ipv6_rt "UDP active socket" "-D -n -1" +} + +################################################################################ +# netfilter blocking connections + +netfilter_tcp_reset() +{ + local a + + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd nettest -s & + sleep 1 + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 1 "Global server, reject with TCP-reset on Rx" + done +} + +netfilter_icmp() +{ + local stype="$1" + local arg + local a + + [ "${stype}" = "UDP" ] && arg="-D" + + for a in ${NSA_IP} ${VRF_IP} + do + log_start + run_cmd nettest ${arg} -s & + sleep 1 + run_cmd_nsb nettest ${arg} -r ${a} + log_test_addr ${a} $? 1 "Global ${stype} server, Rx reject icmp-port-unreach" + done +} + +ipv4_netfilter() +{ + which nettest >/dev/null + if [ $? -ne 0 ]; then + log_error "nettest not found; skipping tests" + return + fi + + log_section "IPv4 Netfilter" + log_subsection "TCP reset" + + setup "yes" + run_cmd iptables -A INPUT -p tcp --dport 12345 -j REJECT --reject-with tcp-reset + + netfilter_tcp_reset + + log_start + log_subsection "ICMP unreachable" + + log_start + run_cmd iptables -F + run_cmd iptables -A INPUT -p tcp --dport 12345 -j REJECT --reject-with icmp-port-unreachable + run_cmd iptables -A INPUT -p udp --dport 12345 -j REJECT --reject-with icmp-port-unreachable + + netfilter_icmp "TCP" + netfilter_icmp "UDP" + + log_start + iptables -F +} + +netfilter_tcp6_reset() +{ + local a + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -s & + sleep 1 + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 1 "Global server, reject with TCP-reset on Rx" + done +} + +netfilter_icmp6() +{ + local stype="$1" + local arg + local a + + [ "${stype}" = "UDP" ] && arg="$arg -D" + + for a in ${NSA_IP6} ${VRF_IP6} + do + log_start + run_cmd nettest -6 -s ${arg} & + sleep 1 + run_cmd_nsb nettest -6 ${arg} -r ${a} + log_test_addr ${a} $? 1 "Global ${stype} server, Rx reject icmp-port-unreach" + done +} + +ipv6_netfilter() +{ + which nettest >/dev/null + if [ $? -ne 0 ]; then + log_error "nettest not found; skipping tests" + return + fi + + log_section "IPv6 Netfilter" + log_subsection "TCP reset" + + setup "yes" + run_cmd ip6tables -A INPUT -p tcp --dport 12345 -j REJECT --reject-with tcp-reset + + netfilter_tcp6_reset + + log_subsection "ICMP unreachable" + + log_start + run_cmd ip6tables -F + run_cmd ip6tables -A INPUT -p tcp --dport 12345 -j REJECT --reject-with icmp6-port-unreachable + run_cmd ip6tables -A INPUT -p udp --dport 12345 -j REJECT --reject-with icmp6-port-unreachable + + netfilter_icmp6 "TCP" + netfilter_icmp6 "UDP" + + log_start + ip6tables -F +} + +################################################################################ +# specific use cases + +# VRF only. +# ns-A device enslaved to bridge. Verify traffic with and without +# br_netfilter module loaded. Repeat with SVI on bridge. +use_case_br() +{ + setup "yes" + + setup_cmd ip link set ${NSA_DEV} down + setup_cmd ip addr del dev ${NSA_DEV} ${NSA_IP}/24 + setup_cmd ip -6 addr del dev ${NSA_DEV} ${NSA_IP6}/64 + + setup_cmd ip link add br0 type bridge + setup_cmd ip addr add dev br0 ${NSA_IP}/24 + setup_cmd ip -6 addr add dev br0 ${NSA_IP6}/64 nodad + + setup_cmd ip li set ${NSA_DEV} master br0 + setup_cmd ip li set ${NSA_DEV} up + setup_cmd ip li set br0 up + setup_cmd ip li set br0 vrf ${VRF} + + rmmod br_netfilter 2>/dev/null + sleep 5 # DAD + + run_cmd ip neigh flush all + run_cmd ping -c1 -w1 -I br0 ${NSB_IP} + log_test $? 0 "Bridge into VRF - IPv4 ping out" + + run_cmd ip neigh flush all + run_cmd ${ping6} -c1 -w1 -I br0 ${NSB_IP6} + log_test $? 0 "Bridge into VRF - IPv6 ping out" + + run_cmd ip neigh flush all + run_cmd_nsb ping -c1 -w1 ${NSA_IP} + log_test $? 0 "Bridge into VRF - IPv4 ping in" + + run_cmd ip neigh flush all + run_cmd_nsb ${ping6} -c1 -w1 ${NSA_IP6} + log_test $? 0 "Bridge into VRF - IPv6 ping in" + + modprobe br_netfilter + if [ $? -eq 0 ]; then + run_cmd ip neigh flush all + run_cmd ping -c1 -w1 -I br0 ${NSB_IP} + log_test $? 0 "Bridge into VRF with br_netfilter - IPv4 ping out" + + run_cmd ip neigh flush all + run_cmd ${ping6} -c1 -w1 -I br0 ${NSB_IP6} + log_test $? 0 "Bridge into VRF with br_netfilter - IPv6 ping out" + + run_cmd ip neigh flush all + run_cmd_nsb ping -c1 -w1 ${NSA_IP} + log_test $? 0 "Bridge into VRF with br_netfilter - IPv4 ping in" + + run_cmd ip neigh flush all + run_cmd_nsb ${ping6} -c1 -w1 ${NSA_IP6} + log_test $? 0 "Bridge into VRF with br_netfilter - IPv6 ping in" + fi + + setup_cmd ip li set br0 nomaster + setup_cmd ip li add br0.100 link br0 type vlan id 100 + setup_cmd ip li set br0.100 vrf ${VRF} up + setup_cmd ip addr add dev br0.100 172.16.101.1/24 + setup_cmd ip -6 addr add dev br0.100 2001:db8:101::1/64 nodad + + setup_cmd_nsb ip li add vlan100 link ${NSB_DEV} type vlan id 100 + setup_cmd_nsb ip addr add dev vlan100 172.16.101.2/24 + setup_cmd_nsb ip -6 addr add dev vlan100 2001:db8:101::2/64 nodad + setup_cmd_nsb ip li set vlan100 up + sleep 1 + + rmmod br_netfilter 2>/dev/null + + run_cmd ip neigh flush all + run_cmd ping -c1 -w1 -I br0.100 172.16.101.2 + log_test $? 0 "Bridge vlan into VRF - IPv4 ping out" + + run_cmd ip neigh flush all + run_cmd ${ping6} -c1 -w1 -I br0.100 2001:db8:101::2 + log_test $? 0 "Bridge vlan into VRF - IPv6 ping out" + + run_cmd ip neigh flush all + run_cmd_nsb ping -c1 -w1 172.16.101.1 + log_test $? 0 "Bridge vlan into VRF - IPv4 ping in" + + run_cmd ip neigh flush all + run_cmd_nsb ${ping6} -c1 -w1 2001:db8:101::1 + log_test $? 0 "Bridge vlan into VRF - IPv6 ping in" + + modprobe br_netfilter + if [ $? -eq 0 ]; then + run_cmd ip neigh flush all + run_cmd ping -c1 -w1 -I br0.100 172.16.101.2 + log_test $? 0 "Bridge vlan into VRF with br_netfilter - IPv4 ping out" + + run_cmd ip neigh flush all + run_cmd ${ping6} -c1 -w1 -I br0.100 2001:db8:101::2 + log_test $? 0 "Bridge vlan into VRF with br_netfilter - IPv6 ping out" + + run_cmd ip neigh flush all + run_cmd_nsb ping -c1 -w1 172.16.101.1 + log_test $? 0 "Bridge vlan into VRF - IPv4 ping in" + + run_cmd ip neigh flush all + run_cmd_nsb ${ping6} -c1 -w1 2001:db8:101::1 + log_test $? 0 "Bridge vlan into VRF - IPv6 ping in" + fi + + setup_cmd ip li del br0 2>/dev/null + setup_cmd_nsb ip li del vlan100 2>/dev/null +} + +use_cases() +{ + log_section "Use cases" + use_case_br +} + +################################################################################ +# usage + +usage() +{ + cat <<EOF +usage: ${0##*/} OPTS + + -4 IPv4 tests only + -6 IPv6 tests only + -t <test> Test name/set to run + -p Pause on fail + -P Pause after each test + -v Be verbose +EOF +} + +################################################################################ +# main + +TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter" +TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter" +TESTS_OTHER="use_cases" + +PAUSE_ON_FAIL=no +PAUSE=no + +while getopts :46t:pPvh o +do + case $o in + 4) TESTS=ipv4;; + 6) TESTS=ipv6;; + t) TESTS=$OPTARG;; + p) PAUSE_ON_FAIL=yes;; + P) PAUSE=yes;; + v) VERBOSE=1;; + h) usage; exit 0;; + *) usage; exit 1;; + esac +done + +# make sure we don't pause twice +[ "${PAUSE}" = "yes" ] && PAUSE_ON_FAIL=no + +# +# show user test config +# +if [ -z "$TESTS" ]; then + TESTS="$TESTS_IPV4 $TESTS_IPV6 $TESTS_OTHER" +elif [ "$TESTS" = "ipv4" ]; then + TESTS="$TESTS_IPV4" +elif [ "$TESTS" = "ipv6" ]; then + TESTS="$TESTS_IPV6" +fi + +declare -i nfail=0 +declare -i nsuccess=0 + +for t in $TESTS +do + case $t in + ipv4_ping|ping) ipv4_ping;; + ipv4_tcp|tcp) ipv4_tcp;; + ipv4_udp|udp) ipv4_udp;; + ipv4_bind|bind) ipv4_addr_bind;; + ipv4_runtime) ipv4_runtime;; + ipv4_netfilter) ipv4_netfilter;; + + ipv6_ping|ping6) ipv6_ping;; + ipv6_tcp|tcp6) ipv6_tcp;; + ipv6_udp|udp6) ipv6_udp;; + ipv6_bind|bind6) ipv6_addr_bind;; + ipv6_runtime) ipv6_runtime;; + ipv6_netfilter) ipv6_netfilter;; + + use_cases) use_cases;; + + # setup namespaces and config, but do not run any tests + setup) setup; exit 0;; + vrf_setup) setup "yes"; exit 0;; + + help) echo "Test names: $TESTS"; exit 0;; + esac +done + +cleanup 2>/dev/null + +printf "\nTests passed: %3d\n" ${nsuccess} +printf "Tests failed: %3d\n" ${nfail} diff --git a/tools/testing/selftests/net/nettest.c b/tools/testing/selftests/net/nettest.c new file mode 100644 index 000000000000..83515e5ea4dc --- /dev/null +++ b/tools/testing/selftests/net/nettest.c @@ -0,0 +1,1756 @@ +// SPDX-License-Identifier: GPL-2.0 +/* nettest - used for functional tests of networking APIs + * + * Copyright (c) 2013-2019 David Ahern <dsahern@gmail.com>. All rights reserved. + */ + +#define _GNU_SOURCE +#include <features.h> +#include <sys/types.h> +#include <sys/ioctl.h> +#include <sys/socket.h> +#include <linux/tcp.h> +#include <arpa/inet.h> +#include <net/if.h> +#include <netinet/in.h> +#include <netdb.h> +#include <fcntl.h> +#include <libgen.h> +#include <limits.h> +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <time.h> +#include <errno.h> + +#ifndef IPV6_UNICAST_IF +#define IPV6_UNICAST_IF 76 +#endif +#ifndef IPV6_MULTICAST_IF +#define IPV6_MULTICAST_IF 17 +#endif + +#define DEFAULT_PORT 12345 + +#ifndef MAX +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif + +struct sock_args { + /* local address */ + union { + struct in_addr in; + struct in6_addr in6; + } local_addr; + + /* remote address */ + union { + struct in_addr in; + struct in6_addr in6; + } remote_addr; + int scope_id; /* remote scope; v6 send only */ + + struct in_addr grp; /* multicast group */ + + unsigned int has_local_ip:1, + has_remote_ip:1, + has_grp:1, + has_expected_laddr:1, + has_expected_raddr:1, + bind_test_only:1; + + unsigned short port; + + int type; /* DGRAM, STREAM, RAW */ + int protocol; + int version; /* AF_INET/AF_INET6 */ + + int use_setsockopt; + int use_cmsg; + const char *dev; + int ifindex; + const char *password; + + /* expected addresses and device index for connection */ + int expected_ifindex; + + /* local address */ + union { + struct in_addr in; + struct in6_addr in6; + } expected_laddr; + + /* remote address */ + union { + struct in_addr in; + struct in6_addr in6; + } expected_raddr; +}; + +static int server_mode; +static unsigned int prog_timeout = 5; +static unsigned int interactive; +static int iter = 1; +static char *msg = "Hello world!"; +static int msglen; +static int quiet; +static int try_broadcast = 1; + +static char *timestamp(char *timebuf, int buflen) +{ + time_t now; + + now = time(NULL); + if (strftime(timebuf, buflen, "%T", localtime(&now)) == 0) { + memset(timebuf, 0, buflen); + strncpy(timebuf, "00:00:00", buflen-1); + } + + return timebuf; +} + +static void log_msg(const char *format, ...) +{ + char timebuf[64]; + va_list args; + + if (quiet) + return; + + fprintf(stdout, "%s %s:", + timestamp(timebuf, sizeof(timebuf)), + server_mode ? "server" : "client"); + va_start(args, format); + vfprintf(stdout, format, args); + va_end(args); + + fflush(stdout); +} + +static void log_error(const char *format, ...) +{ + char timebuf[64]; + va_list args; + + if (quiet) + return; + + fprintf(stderr, "%s %s:", + timestamp(timebuf, sizeof(timebuf)), + server_mode ? "server" : "client"); + va_start(args, format); + vfprintf(stderr, format, args); + va_end(args); + + fflush(stderr); +} + +static void log_err_errno(const char *fmt, ...) +{ + char timebuf[64]; + va_list args; + + if (quiet) + return; + + fprintf(stderr, "%s %s: ", + timestamp(timebuf, sizeof(timebuf)), + server_mode ? "server" : "client"); + va_start(args, fmt); + vfprintf(stderr, fmt, args); + va_end(args); + + fprintf(stderr, ": %d: %s\n", errno, strerror(errno)); + fflush(stderr); +} + +static void log_address(const char *desc, struct sockaddr *sa) +{ + char addrstr[64]; + + if (quiet) + return; + + if (sa->sa_family == AF_INET) { + struct sockaddr_in *s = (struct sockaddr_in *) sa; + + log_msg("%s %s:%d", + desc, + inet_ntop(AF_INET, &s->sin_addr, addrstr, + sizeof(addrstr)), + ntohs(s->sin_port)); + + } else if (sa->sa_family == AF_INET6) { + struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) sa; + + log_msg("%s [%s]:%d", + desc, + inet_ntop(AF_INET6, &s6->sin6_addr, addrstr, + sizeof(addrstr)), + ntohs(s6->sin6_port)); + } + + printf("\n"); + + fflush(stdout); +} + +static int tcp_md5sig(int sd, void *addr, socklen_t alen, const char *password) +{ + struct tcp_md5sig md5sig; + int keylen = password ? strlen(password) : 0; + int rc; + + memset(&md5sig, 0, sizeof(md5sig)); + memcpy(&md5sig.tcpm_addr, addr, alen); + md5sig.tcpm_keylen = keylen; + + if (keylen) + memcpy(md5sig.tcpm_key, password, keylen); + + rc = setsockopt(sd, IPPROTO_TCP, TCP_MD5SIG, &md5sig, sizeof(md5sig)); + if (rc < 0) { + /* ENOENT is harmless. Returned when a password is cleared */ + if (errno == ENOENT) + rc = 0; + else + log_err_errno("setsockopt(TCP_MD5SIG)"); + } + + return rc; +} + +static int tcp_md5_remote(int sd, struct sock_args *args) +{ + struct sockaddr_in sin = { + .sin_family = AF_INET, + }; + struct sockaddr_in6 sin6 = { + .sin6_family = AF_INET6, + }; + void *addr; + int alen; + + switch (args->version) { + case AF_INET: + sin.sin_port = htons(args->port); + sin.sin_addr = args->remote_addr.in; + addr = &sin; + alen = sizeof(sin); + break; + case AF_INET6: + sin6.sin6_port = htons(args->port); + sin6.sin6_addr = args->remote_addr.in6; + addr = &sin6; + alen = sizeof(sin6); + break; + default: + log_error("unknown address family\n"); + exit(1); + } + + if (tcp_md5sig(sd, addr, alen, args->password)) + return -1; + + return 0; +} + +static int get_ifidx(const char *ifname) +{ + struct ifreq ifdata; + int sd, rc; + + if (!ifname || *ifname == '\0') + return 0; + + memset(&ifdata, 0, sizeof(ifdata)); + + strcpy(ifdata.ifr_name, ifname); + + sd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); + if (sd < 0) { + log_err_errno("socket failed"); + return 0; + } + + rc = ioctl(sd, SIOCGIFINDEX, (char *)&ifdata); + close(sd); + if (rc != 0) { + log_err_errno("ioctl(SIOCGIFINDEX) failed"); + return 0; + } + + return ifdata.ifr_ifindex; +} + +static int bind_to_device(int sd, const char *name) +{ + int rc; + + rc = setsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, name, strlen(name)+1); + if (rc < 0) + log_err_errno("setsockopt(SO_BINDTODEVICE)"); + + return rc; +} + +static int get_bind_to_device(int sd, char *name, size_t len) +{ + int rc; + socklen_t optlen = len; + + name[0] = '\0'; + rc = getsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, name, &optlen); + if (rc < 0) + log_err_errno("setsockopt(SO_BINDTODEVICE)"); + + return rc; +} + +static int check_device(int sd, struct sock_args *args) +{ + int ifindex = 0; + char name[32]; + + if (get_bind_to_device(sd, name, sizeof(name))) + *name = '\0'; + else + ifindex = get_ifidx(name); + + log_msg(" bound to device %s/%d\n", + *name ? name : "<none>", ifindex); + + if (!args->expected_ifindex) + return 0; + + if (args->expected_ifindex != ifindex) { + log_error("Device index mismatch: expected %d have %d\n", + args->expected_ifindex, ifindex); + return 1; + } + + log_msg("Device index matches: expected %d have %d\n", + args->expected_ifindex, ifindex); + + return 0; +} + +static int set_pktinfo_v4(int sd) +{ + int one = 1; + int rc; + + rc = setsockopt(sd, SOL_IP, IP_PKTINFO, &one, sizeof(one)); + if (rc < 0 && rc != -ENOTSUP) + log_err_errno("setsockopt(IP_PKTINFO)"); + + return rc; +} + +static int set_recvpktinfo_v6(int sd) +{ + int one = 1; + int rc; + + rc = setsockopt(sd, SOL_IPV6, IPV6_RECVPKTINFO, &one, sizeof(one)); + if (rc < 0 && rc != -ENOTSUP) + log_err_errno("setsockopt(IPV6_RECVPKTINFO)"); + + return rc; +} + +static int set_recverr_v4(int sd) +{ + int one = 1; + int rc; + + rc = setsockopt(sd, SOL_IP, IP_RECVERR, &one, sizeof(one)); + if (rc < 0 && rc != -ENOTSUP) + log_err_errno("setsockopt(IP_RECVERR)"); + + return rc; +} + +static int set_recverr_v6(int sd) +{ + int one = 1; + int rc; + + rc = setsockopt(sd, SOL_IPV6, IPV6_RECVERR, &one, sizeof(one)); + if (rc < 0 && rc != -ENOTSUP) + log_err_errno("setsockopt(IPV6_RECVERR)"); + + return rc; +} + +static int set_unicast_if(int sd, int ifindex, int version) +{ + int opt = IP_UNICAST_IF; + int level = SOL_IP; + int rc; + + ifindex = htonl(ifindex); + + if (version == AF_INET6) { + opt = IPV6_UNICAST_IF; + level = SOL_IPV6; + } + rc = setsockopt(sd, level, opt, &ifindex, sizeof(ifindex)); + if (rc < 0) + log_err_errno("setsockopt(IP_UNICAST_IF)"); + + return rc; +} + +static int set_multicast_if(int sd, int ifindex) +{ + struct ip_mreqn mreq = { .imr_ifindex = ifindex }; + int rc; + + rc = setsockopt(sd, SOL_IP, IP_MULTICAST_IF, &mreq, sizeof(mreq)); + if (rc < 0) + log_err_errno("setsockopt(IP_MULTICAST_IF)"); + + return rc; +} + +static int set_membership(int sd, uint32_t grp, uint32_t addr, const char *dev) +{ + uint32_t if_addr = addr; + struct ip_mreqn mreq; + int rc; + + if (addr == htonl(INADDR_ANY) && !dev) { + log_error("Either local address or device needs to be given for multicast membership\n"); + return -1; + } + + mreq.imr_multiaddr.s_addr = grp; + mreq.imr_address.s_addr = if_addr; + mreq.imr_ifindex = dev ? get_ifidx(dev) : 0; + + rc = setsockopt(sd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)); + if (rc < 0) { + log_err_errno("setsockopt(IP_ADD_MEMBERSHIP)"); + return -1; + } + + return 0; +} + +static int set_broadcast(int sd) +{ + unsigned int one = 1; + int rc = 0; + + if (setsockopt(sd, SOL_SOCKET, SO_BROADCAST, &one, sizeof(one)) != 0) { + log_err_errno("setsockopt(SO_BROADCAST)"); + rc = -1; + } + + return rc; +} + +static int set_reuseport(int sd) +{ + unsigned int one = 1; + int rc = 0; + + if (setsockopt(sd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)) != 0) { + log_err_errno("setsockopt(SO_REUSEPORT)"); + rc = -1; + } + + return rc; +} + +static int set_reuseaddr(int sd) +{ + unsigned int one = 1; + int rc = 0; + + if (setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) != 0) { + log_err_errno("setsockopt(SO_REUSEADDR)"); + rc = -1; + } + + return rc; +} + +static int str_to_uint(const char *str, int min, int max, unsigned int *value) +{ + int number; + char *end; + + errno = 0; + number = (unsigned int) strtoul(str, &end, 0); + + /* entire string should be consumed by conversion + * and value should be between min and max + */ + if (((*end == '\0') || (*end == '\n')) && (end != str) && + (errno != ERANGE) && (min <= number) && (number <= max)) { + *value = number; + return 0; + } + + return -1; +} + +static int expected_addr_match(struct sockaddr *sa, void *expected, + const char *desc) +{ + char addrstr[64]; + int rc = 0; + + if (sa->sa_family == AF_INET) { + struct sockaddr_in *s = (struct sockaddr_in *) sa; + struct in_addr *exp_in = (struct in_addr *) expected; + + if (s->sin_addr.s_addr != exp_in->s_addr) { + log_error("%s address does not match expected %s", + desc, + inet_ntop(AF_INET, exp_in, + addrstr, sizeof(addrstr))); + rc = 1; + } + } else if (sa->sa_family == AF_INET6) { + struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) sa; + struct in6_addr *exp_in = (struct in6_addr *) expected; + + if (memcmp(&s6->sin6_addr, exp_in, sizeof(*exp_in))) { + log_error("%s address does not match expected %s", + desc, + inet_ntop(AF_INET6, exp_in, + addrstr, sizeof(addrstr))); + rc = 1; + } + } else { + log_error("%s address does not match expected - unknown family", + desc); + rc = 1; + } + + if (!rc) + log_msg("%s address matches expected\n", desc); + + return rc; +} + +static int show_sockstat(int sd, struct sock_args *args) +{ + struct sockaddr_in6 local_addr, remote_addr; + socklen_t alen = sizeof(local_addr); + struct sockaddr *sa; + const char *desc; + int rc = 0; + + desc = server_mode ? "server local:" : "client local:"; + sa = (struct sockaddr *) &local_addr; + if (getsockname(sd, sa, &alen) == 0) { + log_address(desc, sa); + + if (args->has_expected_laddr) { + rc = expected_addr_match(sa, &args->expected_laddr, + "local"); + } + } else { + log_err_errno("getsockname failed"); + } + + sa = (struct sockaddr *) &remote_addr; + desc = server_mode ? "server peer:" : "client peer:"; + if (getpeername(sd, sa, &alen) == 0) { + log_address(desc, sa); + + if (args->has_expected_raddr) { + rc |= expected_addr_match(sa, &args->expected_raddr, + "remote"); + } + } else { + log_err_errno("getpeername failed"); + } + + return rc; +} + +static int get_index_from_cmsg(struct msghdr *m) +{ + struct cmsghdr *cm; + int ifindex = 0; + char buf[64]; + + for (cm = (struct cmsghdr *)CMSG_FIRSTHDR(m); + m->msg_controllen != 0 && cm; + cm = (struct cmsghdr *)CMSG_NXTHDR(m, cm)) { + + if (cm->cmsg_level == SOL_IP && + cm->cmsg_type == IP_PKTINFO) { + struct in_pktinfo *pi; + + pi = (struct in_pktinfo *)(CMSG_DATA(cm)); + inet_ntop(AF_INET, &pi->ipi_addr, buf, sizeof(buf)); + ifindex = pi->ipi_ifindex; + } else if (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_PKTINFO) { + struct in6_pktinfo *pi6; + + pi6 = (struct in6_pktinfo *)(CMSG_DATA(cm)); + inet_ntop(AF_INET6, &pi6->ipi6_addr, buf, sizeof(buf)); + ifindex = pi6->ipi6_ifindex; + } + } + + if (ifindex) { + log_msg(" pktinfo: ifindex %d dest addr %s\n", + ifindex, buf); + } + return ifindex; +} + +static int send_msg_no_cmsg(int sd, void *addr, socklen_t alen) +{ + int err; + +again: + err = sendto(sd, msg, msglen, 0, addr, alen); + if (err < 0) { + if (errno == EACCES && try_broadcast) { + try_broadcast = 0; + if (!set_broadcast(sd)) + goto again; + errno = EACCES; + } + + log_err_errno("sendto failed"); + return 1; + } + + return 0; +} + +static int send_msg_cmsg(int sd, void *addr, socklen_t alen, + int ifindex, int version) +{ + unsigned char cmsgbuf[64]; + struct iovec iov[2]; + struct cmsghdr *cm; + struct msghdr m; + int err; + + iov[0].iov_base = msg; + iov[0].iov_len = msglen; + m.msg_iov = iov; + m.msg_iovlen = 1; + m.msg_name = (caddr_t)addr; + m.msg_namelen = alen; + + memset(cmsgbuf, 0, sizeof(cmsgbuf)); + cm = (struct cmsghdr *)cmsgbuf; + m.msg_control = (caddr_t)cm; + + if (version == AF_INET) { + struct in_pktinfo *pi; + + cm->cmsg_level = SOL_IP; + cm->cmsg_type = IP_PKTINFO; + cm->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo)); + pi = (struct in_pktinfo *)(CMSG_DATA(cm)); + pi->ipi_ifindex = ifindex; + + m.msg_controllen = cm->cmsg_len; + + } else if (version == AF_INET6) { + struct in6_pktinfo *pi6; + + cm->cmsg_level = SOL_IPV6; + cm->cmsg_type = IPV6_PKTINFO; + cm->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo)); + + pi6 = (struct in6_pktinfo *)(CMSG_DATA(cm)); + pi6->ipi6_ifindex = ifindex; + + m.msg_controllen = cm->cmsg_len; + } + +again: + err = sendmsg(sd, &m, 0); + if (err < 0) { + if (errno == EACCES && try_broadcast) { + try_broadcast = 0; + if (!set_broadcast(sd)) + goto again; + errno = EACCES; + } + + log_err_errno("sendmsg failed"); + return 1; + } + + return 0; +} + + +static int send_msg(int sd, void *addr, socklen_t alen, struct sock_args *args) +{ + if (args->type == SOCK_STREAM) { + if (write(sd, msg, msglen) < 0) { + log_err_errno("write failed sending msg to peer"); + return 1; + } + } else if (args->ifindex && args->use_cmsg) { + if (send_msg_cmsg(sd, addr, alen, args->ifindex, args->version)) + return 1; + } else { + if (send_msg_no_cmsg(sd, addr, alen)) + return 1; + } + + log_msg("Sent message:\n"); + log_msg(" %.24s%s\n", msg, msglen > 24 ? " ..." : ""); + + return 0; +} + +static int socket_read_dgram(int sd, struct sock_args *args) +{ + unsigned char addr[sizeof(struct sockaddr_in6)]; + struct sockaddr *sa = (struct sockaddr *) addr; + socklen_t alen = sizeof(addr); + struct iovec iov[2]; + struct msghdr m = { + .msg_name = (caddr_t)addr, + .msg_namelen = alen, + .msg_iov = iov, + .msg_iovlen = 1, + }; + unsigned char cmsgbuf[256]; + struct cmsghdr *cm = (struct cmsghdr *)cmsgbuf; + char buf[16*1024]; + int ifindex; + int len; + + iov[0].iov_base = (caddr_t)buf; + iov[0].iov_len = sizeof(buf); + + memset(cmsgbuf, 0, sizeof(cmsgbuf)); + m.msg_control = (caddr_t)cm; + m.msg_controllen = sizeof(cmsgbuf); + + len = recvmsg(sd, &m, 0); + if (len == 0) { + log_msg("peer closed connection.\n"); + return 0; + } else if (len < 0) { + log_msg("failed to read message: %d: %s\n", + errno, strerror(errno)); + return -1; + } + + buf[len] = '\0'; + + log_address("Message from:", sa); + log_msg(" %.24s%s\n", buf, len > 24 ? " ..." : ""); + + ifindex = get_index_from_cmsg(&m); + if (args->expected_ifindex) { + if (args->expected_ifindex != ifindex) { + log_error("Device index mismatch: expected %d have %d\n", + args->expected_ifindex, ifindex); + return -1; + } + log_msg("Device index matches: expected %d have %d\n", + args->expected_ifindex, ifindex); + } + + if (!interactive && server_mode) { + if (sa->sa_family == AF_INET6) { + struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) sa; + struct in6_addr *in6 = &s6->sin6_addr; + + if (IN6_IS_ADDR_V4MAPPED(in6)) { + const uint32_t *pa = (uint32_t *) &in6->s6_addr; + struct in_addr in4; + struct sockaddr_in *sin; + + sin = (struct sockaddr_in *) addr; + pa += 3; + in4.s_addr = *pa; + sin->sin_addr = in4; + sin->sin_family = AF_INET; + if (send_msg_cmsg(sd, addr, alen, + ifindex, AF_INET) < 0) + goto out_err; + } + } +again: + iov[0].iov_len = len; + + if (args->version == AF_INET6) { + struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) sa; + + if (args->dev) { + /* avoid PKTINFO conflicts with bindtodev */ + if (sendto(sd, buf, len, 0, + (void *) addr, alen) < 0) + goto out_err; + } else { + /* kernel is allowing scope_id to be set to VRF + * index for LLA. for sends to global address + * reset scope id + */ + s6->sin6_scope_id = ifindex; + if (sendmsg(sd, &m, 0) < 0) + goto out_err; + } + } else { + int err; + + err = sendmsg(sd, &m, 0); + if (err < 0) { + if (errno == EACCES && try_broadcast) { + try_broadcast = 0; + if (!set_broadcast(sd)) + goto again; + errno = EACCES; + } + goto out_err; + } + } + log_msg("Sent message:\n"); + log_msg(" %.24s%s\n", buf, len > 24 ? " ..." : ""); + } + + return 1; +out_err: + log_err_errno("failed to send msg to peer"); + return -1; +} + +static int socket_read_stream(int sd) +{ + char buf[1024]; + int len; + + len = read(sd, buf, sizeof(buf)-1); + if (len == 0) { + log_msg("client closed connection.\n"); + return 0; + } else if (len < 0) { + log_msg("failed to read message\n"); + return -1; + } + + buf[len] = '\0'; + log_msg("Incoming message:\n"); + log_msg(" %.24s%s\n", buf, len > 24 ? " ..." : ""); + + if (!interactive && server_mode) { + if (write(sd, buf, len) < 0) { + log_err_errno("failed to send buf"); + return -1; + } + log_msg("Sent message:\n"); + log_msg(" %.24s%s\n", buf, len > 24 ? " ..." : ""); + } + + return 1; +} + +static int socket_read(int sd, struct sock_args *args) +{ + if (args->type == SOCK_STREAM) + return socket_read_stream(sd); + + return socket_read_dgram(sd, args); +} + +static int stdin_to_socket(int sd, int type, void *addr, socklen_t alen) +{ + char buf[1024]; + int len; + + if (fgets(buf, sizeof(buf), stdin) == NULL) + return 0; + + len = strlen(buf); + if (type == SOCK_STREAM) { + if (write(sd, buf, len) < 0) { + log_err_errno("failed to send buf"); + return -1; + } + } else { + int err; + +again: + err = sendto(sd, buf, len, 0, addr, alen); + if (err < 0) { + if (errno == EACCES && try_broadcast) { + try_broadcast = 0; + if (!set_broadcast(sd)) + goto again; + errno = EACCES; + } + log_err_errno("failed to send msg to peer"); + return -1; + } + } + log_msg("Sent message:\n"); + log_msg(" %.24s%s\n", buf, len > 24 ? " ..." : ""); + + return 1; +} + +static void set_recv_attr(int sd, int version) +{ + if (version == AF_INET6) { + set_recvpktinfo_v6(sd); + set_recverr_v6(sd); + } else { + set_pktinfo_v4(sd); + set_recverr_v4(sd); + } +} + +static int msg_loop(int client, int sd, void *addr, socklen_t alen, + struct sock_args *args) +{ + struct timeval timeout = { .tv_sec = prog_timeout }, *ptval = NULL; + fd_set rfds; + int nfds; + int rc; + + if (args->type != SOCK_STREAM) + set_recv_attr(sd, args->version); + + if (msg) { + msglen = strlen(msg); + + /* client sends first message */ + if (client) { + if (send_msg(sd, addr, alen, args)) + return 1; + } + if (!interactive) { + ptval = &timeout; + if (!prog_timeout) + timeout.tv_sec = 5; + } + } + + nfds = interactive ? MAX(fileno(stdin), sd) + 1 : sd + 1; + while (1) { + FD_ZERO(&rfds); + FD_SET(sd, &rfds); + if (interactive) + FD_SET(fileno(stdin), &rfds); + + rc = select(nfds, &rfds, NULL, NULL, ptval); + if (rc < 0) { + if (errno == EINTR) + continue; + + rc = 1; + log_err_errno("select failed"); + break; + } else if (rc == 0) { + log_error("Timed out waiting for response\n"); + rc = 2; + break; + } + + if (FD_ISSET(sd, &rfds)) { + rc = socket_read(sd, args); + if (rc < 0) { + rc = 1; + break; + } + if (rc == 0) + break; + } + + rc = 0; + + if (FD_ISSET(fileno(stdin), &rfds)) { + if (stdin_to_socket(sd, args->type, addr, alen) <= 0) + break; + } + + if (interactive) + continue; + + if (iter != -1) { + --iter; + if (iter == 0) + break; + } + + log_msg("Going into quiet mode\n"); + quiet = 1; + + if (client) { + if (send_msg(sd, addr, alen, args)) { + rc = 1; + break; + } + } + } + + return rc; +} + +static int msock_init(struct sock_args *args, int server) +{ + uint32_t if_addr = htonl(INADDR_ANY); + struct sockaddr_in laddr = { + .sin_family = AF_INET, + .sin_port = htons(args->port), + }; + int one = 1; + int sd; + + if (!server && args->has_local_ip) + if_addr = args->local_addr.in.s_addr; + + sd = socket(PF_INET, SOCK_DGRAM, 0); + if (sd < 0) { + log_err_errno("socket"); + return -1; + } + + if (setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, + (char *)&one, sizeof(one)) < 0) { + log_err_errno("Setting SO_REUSEADDR error"); + goto out_err; + } + + if (setsockopt(sd, SOL_SOCKET, SO_BROADCAST, + (char *)&one, sizeof(one)) < 0) + log_err_errno("Setting SO_BROADCAST error"); + + if (args->dev && bind_to_device(sd, args->dev) != 0) + goto out_err; + else if (args->use_setsockopt && + set_multicast_if(sd, args->ifindex)) + goto out_err; + + laddr.sin_addr.s_addr = if_addr; + + if (bind(sd, (struct sockaddr *) &laddr, sizeof(laddr)) < 0) { + log_err_errno("bind failed"); + goto out_err; + } + + if (server && + set_membership(sd, args->grp.s_addr, + args->local_addr.in.s_addr, args->dev)) + goto out_err; + + return sd; +out_err: + close(sd); + return -1; +} + +static int msock_server(struct sock_args *args) +{ + return msock_init(args, 1); +} + +static int msock_client(struct sock_args *args) +{ + return msock_init(args, 0); +} + +static int bind_socket(int sd, struct sock_args *args) +{ + struct sockaddr_in serv_addr = { + .sin_family = AF_INET, + }; + struct sockaddr_in6 serv6_addr = { + .sin6_family = AF_INET6, + }; + void *addr; + socklen_t alen; + + if (!args->has_local_ip && args->type == SOCK_RAW) + return 0; + + switch (args->version) { + case AF_INET: + serv_addr.sin_port = htons(args->port); + serv_addr.sin_addr = args->local_addr.in; + addr = &serv_addr; + alen = sizeof(serv_addr); + break; + + case AF_INET6: + serv6_addr.sin6_port = htons(args->port); + serv6_addr.sin6_addr = args->local_addr.in6; + addr = &serv6_addr; + alen = sizeof(serv6_addr); + break; + + default: + log_error("Invalid address family\n"); + return -1; + } + + if (bind(sd, addr, alen) < 0) { + log_err_errno("error binding socket"); + return -1; + } + + return 0; +} + +static int lsock_init(struct sock_args *args) +{ + long flags; + int sd; + + sd = socket(args->version, args->type, args->protocol); + if (sd < 0) { + log_err_errno("Error opening socket"); + return -1; + } + + if (set_reuseaddr(sd) != 0) + goto err; + + if (set_reuseport(sd) != 0) + goto err; + + if (args->dev && bind_to_device(sd, args->dev) != 0) + goto err; + else if (args->use_setsockopt && + set_unicast_if(sd, args->ifindex, args->version)) + goto err; + + if (bind_socket(sd, args)) + goto err; + + if (args->bind_test_only) + goto out; + + if (args->type == SOCK_STREAM && listen(sd, 1) < 0) { + log_err_errno("listen failed"); + goto err; + } + + flags = fcntl(sd, F_GETFL); + if ((flags < 0) || (fcntl(sd, F_SETFL, flags|O_NONBLOCK) < 0)) { + log_err_errno("Failed to set non-blocking option"); + goto err; + } + + if (fcntl(sd, F_SETFD, FD_CLOEXEC) < 0) + log_err_errno("Failed to set close-on-exec flag"); + +out: + return sd; + +err: + close(sd); + return -1; +} + +static int do_server(struct sock_args *args) +{ + struct timeval timeout = { .tv_sec = prog_timeout }, *ptval = NULL; + unsigned char addr[sizeof(struct sockaddr_in6)] = {}; + socklen_t alen = sizeof(addr); + int lsd, csd = -1; + + fd_set rfds; + int rc; + + if (prog_timeout) + ptval = &timeout; + + if (args->has_grp) + lsd = msock_server(args); + else + lsd = lsock_init(args); + + if (lsd < 0) + return 1; + + if (args->bind_test_only) { + close(lsd); + return 0; + } + + if (args->type != SOCK_STREAM) { + rc = msg_loop(0, lsd, (void *) addr, alen, args); + close(lsd); + return rc; + } + + if (args->password && tcp_md5_remote(lsd, args)) { + close(lsd); + return -1; + } + + while (1) { + log_msg("\n"); + log_msg("waiting for client connection.\n"); + FD_ZERO(&rfds); + FD_SET(lsd, &rfds); + + rc = select(lsd+1, &rfds, NULL, NULL, ptval); + if (rc == 0) { + rc = 2; + break; + } + + if (rc < 0) { + if (errno == EINTR) + continue; + + log_err_errno("select failed"); + break; + } + + if (FD_ISSET(lsd, &rfds)) { + + csd = accept(lsd, (void *) addr, &alen); + if (csd < 0) { + log_err_errno("accept failed"); + break; + } + + rc = show_sockstat(csd, args); + if (rc) + break; + + rc = check_device(csd, args); + if (rc) + break; + } + + rc = msg_loop(0, csd, (void *) addr, alen, args); + close(csd); + + if (!interactive) + break; + } + + close(lsd); + + return rc; +} + +static int wait_for_connect(int sd) +{ + struct timeval _tv = { .tv_sec = prog_timeout }, *tv = NULL; + fd_set wfd; + int val = 0, sz = sizeof(val); + int rc; + + FD_ZERO(&wfd); + FD_SET(sd, &wfd); + + if (prog_timeout) + tv = &_tv; + + rc = select(FD_SETSIZE, NULL, &wfd, NULL, tv); + if (rc == 0) { + log_error("connect timed out\n"); + return -2; + } else if (rc < 0) { + log_err_errno("select failed"); + return -3; + } + + if (getsockopt(sd, SOL_SOCKET, SO_ERROR, &val, (socklen_t *)&sz) < 0) { + log_err_errno("getsockopt(SO_ERROR) failed"); + return -4; + } + + if (val != 0) { + log_error("connect failed: %d: %s\n", val, strerror(val)); + return -1; + } + + return 0; +} + +static int connectsock(void *addr, socklen_t alen, struct sock_args *args) +{ + int sd, rc = -1; + long flags; + + sd = socket(args->version, args->type, args->protocol); + if (sd < 0) { + log_err_errno("Failed to create socket"); + return -1; + } + + flags = fcntl(sd, F_GETFL); + if ((flags < 0) || (fcntl(sd, F_SETFL, flags|O_NONBLOCK) < 0)) { + log_err_errno("Failed to set non-blocking option"); + goto err; + } + + if (set_reuseport(sd) != 0) + goto err; + + if (args->dev && bind_to_device(sd, args->dev) != 0) + goto err; + else if (args->use_setsockopt && + set_unicast_if(sd, args->ifindex, args->version)) + goto err; + + if (args->has_local_ip && bind_socket(sd, args)) + goto err; + + if (args->type != SOCK_STREAM) + goto out; + + if (args->password && tcp_md5sig(sd, addr, alen, args->password)) + goto err; + + if (args->bind_test_only) + goto out; + + if (connect(sd, addr, alen) < 0) { + if (errno != EINPROGRESS) { + log_err_errno("Failed to connect to remote host"); + rc = -1; + goto err; + } + rc = wait_for_connect(sd); + if (rc < 0) + goto err; + } +out: + return sd; + +err: + close(sd); + return rc; +} + +static int do_client(struct sock_args *args) +{ + struct sockaddr_in sin = { + .sin_family = AF_INET, + }; + struct sockaddr_in6 sin6 = { + .sin6_family = AF_INET6, + }; + void *addr; + int alen; + int rc = 0; + int sd; + + if (!args->has_remote_ip && !args->has_grp) { + fprintf(stderr, "remote IP or multicast group not given\n"); + return 1; + } + + switch (args->version) { + case AF_INET: + sin.sin_port = htons(args->port); + if (args->has_grp) + sin.sin_addr = args->grp; + else + sin.sin_addr = args->remote_addr.in; + addr = &sin; + alen = sizeof(sin); + break; + case AF_INET6: + sin6.sin6_port = htons(args->port); + sin6.sin6_addr = args->remote_addr.in6; + sin6.sin6_scope_id = args->scope_id; + addr = &sin6; + alen = sizeof(sin6); + break; + } + + if (args->has_grp) + sd = msock_client(args); + else + sd = connectsock(addr, alen, args); + + if (sd < 0) + return -sd; + + if (args->bind_test_only) + goto out; + + if (args->type == SOCK_STREAM) { + rc = show_sockstat(sd, args); + if (rc != 0) + goto out; + } + + rc = msg_loop(1, sd, addr, alen, args); + +out: + close(sd); + + return rc; +} + +enum addr_type { + ADDR_TYPE_LOCAL, + ADDR_TYPE_REMOTE, + ADDR_TYPE_MCAST, + ADDR_TYPE_EXPECTED_LOCAL, + ADDR_TYPE_EXPECTED_REMOTE, +}; + +static int convert_addr(struct sock_args *args, const char *_str, + enum addr_type atype) +{ + int family = args->version; + struct in6_addr *in6; + struct in_addr *in; + const char *desc; + char *str, *dev; + void *addr; + int rc = 0; + + str = strdup(_str); + if (!str) + return -ENOMEM; + + switch (atype) { + case ADDR_TYPE_LOCAL: + desc = "local"; + addr = &args->local_addr; + break; + case ADDR_TYPE_REMOTE: + desc = "remote"; + addr = &args->remote_addr; + break; + case ADDR_TYPE_MCAST: + desc = "mcast grp"; + addr = &args->grp; + break; + case ADDR_TYPE_EXPECTED_LOCAL: + desc = "expected local"; + addr = &args->expected_laddr; + break; + case ADDR_TYPE_EXPECTED_REMOTE: + desc = "expected remote"; + addr = &args->expected_raddr; + break; + default: + log_error("unknown address type"); + exit(1); + } + + switch (family) { + case AF_INET: + in = (struct in_addr *) addr; + if (str) { + if (inet_pton(AF_INET, str, in) == 0) { + log_error("Invalid %s IP address\n", desc); + rc = -1; + goto out; + } + } else { + in->s_addr = htonl(INADDR_ANY); + } + break; + + case AF_INET6: + dev = strchr(str, '%'); + if (dev) { + *dev = '\0'; + dev++; + } + + in6 = (struct in6_addr *) addr; + if (str) { + if (inet_pton(AF_INET6, str, in6) == 0) { + log_error("Invalid %s IPv6 address\n", desc); + rc = -1; + goto out; + } + } else { + *in6 = in6addr_any; + } + if (dev) { + args->scope_id = get_ifidx(dev); + if (args->scope_id < 0) { + log_error("Invalid scope on %s IPv6 address\n", + desc); + rc = -1; + goto out; + } + } + break; + + default: + log_error("Invalid address family\n"); + } + +out: + free(str); + return rc; +} + +static char *random_msg(int len) +{ + int i, n = 0, olen = len + 1; + char *m; + + if (len <= 0) + return NULL; + + m = malloc(olen); + if (!m) + return NULL; + + while (len > 26) { + i = snprintf(m + n, olen - n, "%.26s", + "abcdefghijklmnopqrstuvwxyz"); + n += i; + len -= i; + } + i = snprintf(m + n, olen - n, "%.*s", len, + "abcdefghijklmnopqrstuvwxyz"); + return m; +} + +#define GETOPT_STR "sr:l:p:t:g:P:DRn:M:d:SCi6L:0:1:2:Fbq" + +static void print_usage(char *prog) +{ + printf( + "usage: %s OPTS\n" + "Required:\n" + " -r addr remote address to connect to (client mode only)\n" + " -p port port to connect to (client mode)/listen on (server mode)\n" + " (default: %d)\n" + " -s server mode (default: client mode)\n" + " -t timeout seconds (default: none)\n" + "\n" + "Optional:\n" + " -F Restart server loop\n" + " -6 IPv6 (default is IPv4)\n" + " -P proto protocol for socket: icmp, ospf (default: none)\n" + " -D|R datagram (D) / raw (R) socket (default stream)\n" + " -l addr local address to bind to\n" + "\n" + " -d dev bind socket to given device name\n" + " -S use setsockopt (IP_UNICAST_IF or IP_MULTICAST_IF)\n" + " to set device binding\n" + " -C use cmsg and IP_PKTINFO to specify device binding\n" + "\n" + " -L len send random message of given length\n" + " -n num number of times to send message\n" + "\n" + " -M password use MD5 sum protection\n" + " -g grp multicast group (e.g., 239.1.1.1)\n" + " -i interactive mode (default is echo and terminate)\n" + "\n" + " -0 addr Expected local address\n" + " -1 addr Expected remote address\n" + " -2 dev Expected device name (or index) to receive packet\n" + "\n" + " -b Bind test only.\n" + " -q Be quiet. Run test without printing anything.\n" + , prog, DEFAULT_PORT); +} + +int main(int argc, char *argv[]) +{ + struct sock_args args = { + .version = AF_INET, + .type = SOCK_STREAM, + .port = DEFAULT_PORT, + }; + struct protoent *pe; + unsigned int tmp; + int forever = 0; + + /* process inputs */ + extern char *optarg; + int rc = 0; + + /* + * process input args + */ + + while ((rc = getopt(argc, argv, GETOPT_STR)) != -1) { + switch (rc) { + case 's': + server_mode = 1; + break; + case 'F': + forever = 1; + break; + case 'l': + args.has_local_ip = 1; + if (convert_addr(&args, optarg, ADDR_TYPE_LOCAL) < 0) + return 1; + break; + case 'r': + args.has_remote_ip = 1; + if (convert_addr(&args, optarg, ADDR_TYPE_REMOTE) < 0) + return 1; + break; + case 'p': + if (str_to_uint(optarg, 1, 65535, &tmp) != 0) { + fprintf(stderr, "Invalid port\n"); + return 1; + } + args.port = (unsigned short) tmp; + break; + case 't': + if (str_to_uint(optarg, 0, INT_MAX, + &prog_timeout) != 0) { + fprintf(stderr, "Invalid timeout\n"); + return 1; + } + break; + case 'D': + args.type = SOCK_DGRAM; + break; + case 'R': + args.type = SOCK_RAW; + args.port = 0; + break; + case 'P': + pe = getprotobyname(optarg); + if (pe) { + args.protocol = pe->p_proto; + } else { + if (str_to_uint(optarg, 0, 0xffff, &tmp) != 0) { + fprintf(stderr, "Invalid protocol\n"); + return 1; + } + args.protocol = tmp; + } + break; + case 'n': + iter = atoi(optarg); + break; + case 'L': + msg = random_msg(atoi(optarg)); + break; + case 'M': + args.password = optarg; + break; + case 'S': + args.use_setsockopt = 1; + break; + case 'C': + args.use_cmsg = 1; + break; + case 'd': + args.dev = optarg; + args.ifindex = get_ifidx(optarg); + if (args.ifindex < 0) { + fprintf(stderr, "Invalid device name\n"); + return 1; + } + break; + case 'i': + interactive = 1; + break; + case 'g': + args.has_grp = 1; + if (convert_addr(&args, optarg, ADDR_TYPE_MCAST) < 0) + return 1; + args.type = SOCK_DGRAM; + break; + case '6': + args.version = AF_INET6; + break; + case 'b': + args.bind_test_only = 1; + break; + case '0': + args.has_expected_laddr = 1; + if (convert_addr(&args, optarg, + ADDR_TYPE_EXPECTED_LOCAL)) + return 1; + break; + case '1': + args.has_expected_raddr = 1; + if (convert_addr(&args, optarg, + ADDR_TYPE_EXPECTED_REMOTE)) + return 1; + + break; + case '2': + if (str_to_uint(optarg, 0, 0x7ffffff, &tmp) != 0) { + tmp = get_ifidx(optarg); + if (tmp < 0) { + fprintf(stderr, + "Invalid device index\n"); + return 1; + } + } + args.expected_ifindex = (int)tmp; + break; + case 'q': + quiet = 1; + break; + default: + print_usage(argv[0]); + return 1; + } + } + + if (args.password && + (!args.has_remote_ip || args.type != SOCK_STREAM)) { + log_error("MD5 passwords apply to TCP only and require a remote ip for the password\n"); + return 1; + } + + if ((args.use_setsockopt || args.use_cmsg) && !args.ifindex) { + fprintf(stderr, "Device binding not specified\n"); + return 1; + } + if (args.use_setsockopt || args.use_cmsg) + args.dev = NULL; + + if (iter == 0) { + fprintf(stderr, "Invalid number of messages to send\n"); + return 1; + } + + if (args.type == SOCK_STREAM && !args.protocol) + args.protocol = IPPROTO_TCP; + if (args.type == SOCK_DGRAM && !args.protocol) + args.protocol = IPPROTO_UDP; + + if ((args.type == SOCK_STREAM || args.type == SOCK_DGRAM) && + args.port == 0) { + fprintf(stderr, "Invalid port number\n"); + return 1; + } + + if (!server_mode && !args.has_grp && + !args.has_remote_ip && !args.has_local_ip) { + fprintf(stderr, + "Local (server mode) or remote IP (client IP) required\n"); + return 1; + } + + if (interactive) { + prog_timeout = 0; + msg = NULL; + } + + if (server_mode) { + do { + rc = do_server(&args); + } while (forever); + + return rc; + } + return do_client(&args); +} diff --git a/tools/testing/selftests/tc-testing/README b/tools/testing/selftests/tc-testing/README index 22e5da9008fd..b0954c873e2f 100644 --- a/tools/testing/selftests/tc-testing/README +++ b/tools/testing/selftests/tc-testing/README @@ -128,7 +128,9 @@ optional arguments: -v, --verbose Show the commands that are being run -N, --notap Suppress tap results for command under test -d DEVICE, --device DEVICE - Execute the test case in flower category + Execute test cases that use a physical device, where + DEVICE is its name. (If not defined, tests that require + a physical device will be skipped) -P, --pause Pause execution just before post-suite stage selection: diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fifo.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fifo.json new file mode 100644 index 000000000000..9de61fa10878 --- /dev/null +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fifo.json @@ -0,0 +1,304 @@ +[ + { + "id": "a519", + "name": "Add bfifo qdisc with system default parameters on egress", + "__comment": "When omitted, queue size in bfifo is calculated as: txqueuelen * (MTU + LinkLayerHdrSize), where LinkLayerHdrSize=14 for Ethernet", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 handle 1: root bfifo", + "expExitCode": "0", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo 1: root.*limit [0-9]+b", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 handle 1: root bfifo", + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "585c", + "name": "Add pfifo qdisc with system default parameters on egress", + "__comment": "When omitted, queue size in pfifo is defaulted to the interface's txqueuelen value.", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 handle 1: root pfifo", + "expExitCode": "0", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc pfifo 1: root.*limit [0-9]+p", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 handle 1: root pfifo", + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "a86e", + "name": "Add bfifo qdisc with system default parameters on egress with handle of maximum value", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 root handle ffff: bfifo", + "expExitCode": "0", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo ffff: root.*limit [0-9]+b", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 handle ffff: root bfifo", + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "9ac8", + "name": "Add bfifo qdisc on egress with queue size of 3000 bytes", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 handle 1: root bfifo limit 3000b", + "expExitCode": "0", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo 1: root.*limit 3000b", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 handle 1: root bfifo", + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "f4e6", + "name": "Add pfifo qdisc on egress with queue size of 3000 packets", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 txqueuelen 3000 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 handle 1: root pfifo limit 3000", + "expExitCode": "0", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc pfifo 1: root.*limit 3000p", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 handle 1: root pfifo", + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "b1b1", + "name": "Add bfifo qdisc with system default parameters on egress with invalid handle exceeding maximum value", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 root handle 10000: bfifo", + "expExitCode": "255", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo 10000: root.*limit [0-9]+b", + "matchCount": "0", + "teardown": [ + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "8d5e", + "name": "Add bfifo qdisc on egress with unsupported argument", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 handle 1: root bfifo foorbar", + "expExitCode": "1", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo 1: root", + "matchCount": "0", + "teardown": [ + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "7787", + "name": "Add pfifo qdisc on egress with unsupported argument", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 handle 1: root pfifo foorbar", + "expExitCode": "1", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc pfifo 1: root", + "matchCount": "0", + "teardown": [ + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "c4b6", + "name": "Replace bfifo qdisc on egress with new queue size", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link del dev $DEV1 type dummy || /bin/true", + "$IP link add dev $DEV1 txqueuelen 1000 type dummy", + "$TC qdisc add dev $DEV1 handle 1: root bfifo" + ], + "cmdUnderTest": "$TC qdisc replace dev $DEV1 handle 1: root bfifo limit 3000b", + "expExitCode": "0", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo 1: root.*limit 3000b", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 handle 1: root bfifo", + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "3df6", + "name": "Replace pfifo qdisc on egress with new queue size", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link del dev $DEV1 type dummy || /bin/true", + "$IP link add dev $DEV1 txqueuelen 1000 type dummy", + "$TC qdisc add dev $DEV1 handle 1: root pfifo" + ], + "cmdUnderTest": "$TC qdisc replace dev $DEV1 handle 1: root pfifo limit 30", + "expExitCode": "0", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc pfifo 1: root.*limit 30p", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 handle 1: root pfifo", + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "7a67", + "name": "Add bfifo qdisc on egress with queue size in invalid format", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 handle 1: root bfifo limit foo-bar", + "expExitCode": "1", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo 1: root.*limit foo-bar", + "matchCount": "0", + "teardown": [ + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "1298", + "name": "Add duplicate bfifo qdisc on egress", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true", + "$TC qdisc add dev $DEV1 handle 1: root bfifo" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 handle 1: root bfifo", + "expExitCode": "2", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo 1: root", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 handle 1: root bfifo", + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "45a0", + "name": "Delete nonexistent bfifo qdisc", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc del dev $DEV1 root handle 1: bfifo", + "expExitCode": "2", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo 1: root", + "matchCount": "0", + "teardown": [ + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "972b", + "name": "Add prio qdisc on egress with invalid format for handles", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DEV1 root handle 123^ bfifo limit 100b", + "expExitCode": "255", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo 123 root", + "matchCount": "0", + "teardown": [ + "$IP link del dev $DEV1 type dummy" + ] + }, + { + "id": "4d39", + "name": "Delete bfifo qdisc twice", + "category": [ + "qdisc", + "fifo" + ], + "setup": [ + "$IP link add dev $DEV1 type dummy || /bin/true", + "$TC qdisc add dev $DEV1 root handle 1: bfifo", + "$TC qdisc del dev $DEV1 root handle 1: bfifo" + ], + "cmdUnderTest": "$TC qdisc del dev $DEV1 handle 1: root bfifo", + "expExitCode": "2", + "verifyCmd": "$TC qdisc show dev $DEV1", + "matchPattern": "qdisc bfifo 1: root", + "matchCount": "0", + "teardown": [ + "$IP link del dev $DEV1 type dummy" + ] + } +] diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py index f04321ace9fb..e566c70e64a1 100755 --- a/tools/testing/selftests/tc-testing/tdc.py +++ b/tools/testing/selftests/tc-testing/tdc.py @@ -356,12 +356,14 @@ def test_runner(pm, args, filtered_tests): time.sleep(2) for tidx in testlist: if "flower" in tidx["category"] and args.device == None: + errmsg = "Tests using the DEV2 variable must define the name of a " + errmsg += "physical NIC with the -d option when running tdc.\n" + errmsg += "Test has been skipped." if args.verbose > 1: - print('Not executing test {} {} because DEV2 not defined'. - format(tidx['id'], tidx['name'])) + print(errmsg) res = TestResult(tidx['id'], tidx['name']) res.set_result(ResultState.skip) - res.set_errormsg('Not executed because DEV2 is not defined') + res.set_errormsg(errmsg) tsr.add_resultdata(res) continue try: @@ -499,7 +501,9 @@ def set_args(parser): choices=['none', 'xunit', 'tap'], help='Specify the format for test results. (Default: TAP)') parser.add_argument('-d', '--device', - help='Execute the test case in flower category') + help='Execute test cases that use a physical device, ' + + 'where DEVICE is its name. (If not defined, tests ' + + 'that require a physical device will be skipped)') parser.add_argument( '-P', '--pause', action='store_true', help='Pause execution just before post-suite stage') |