diff options
Diffstat (limited to 'drivers/net')
353 files changed, 26594 insertions, 11750 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 4706386b7d34..d6607ee9c855 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -135,6 +135,7 @@ config MACVLAN config MACVTAP tristate "MAC-VLAN based tap driver" depends on MACVLAN + depends on INET help This adds a specialized tap character device driver that is based on the MAC-VLAN network interface, called macvtap. A macvtap device @@ -144,6 +145,26 @@ config MACVTAP To compile this driver as a module, choose M here: the module will be called macvtap. + +config IPVLAN + tristate "IP-VLAN support" + depends on INET + depends on IPV6 + ---help--- + This allows one to create virtual devices off of a main interface + and packets will be delivered based on the dest L3 (IPv6/IPv4 addr) + on packets. All interfaces (including the main interface) share L2 + making it transparent to the connected L2 switch. + + Ipvlan devices can be added using the "ip" command from the + iproute2 package starting with the iproute2-X.Y.ZZ release: + + "ip link add link <main-dev> [ NAME ] type ipvlan" + + To compile this driver as a module, choose M here: the module + will be called ipvlan. + + config VXLAN tristate "Virtual eXtensible Local Area Network (VXLAN)" depends on INET @@ -200,6 +221,7 @@ config RIONET_RX_SIZE config TUN tristate "Universal TUN/TAP device driver support" + depends on INET select CRC32 ---help--- TUN/TAP provides packet reception and transmission for user space diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 61aefdd1e173..e25fdd7d905e 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -6,6 +6,7 @@ # Networking Core Drivers # obj-$(CONFIG_BONDING) += bonding/ +obj-$(CONFIG_IPVLAN) += ipvlan/ obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_EQUALIZER) += eql.o obj-$(CONFIG_IFB) += ifb.o diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 2110215f3528..8baa87df1738 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -29,8 +29,8 @@ #include <linux/if_bonding.h> #include <linux/pkt_sched.h> #include <net/net_namespace.h> -#include "bonding.h" -#include "bond_3ad.h" +#include <net/bonding.h> +#include <net/bond_3ad.h> /* General definitions */ #define AD_SHORT_TIMEOUT 1 @@ -79,15 +79,21 @@ * -------------------------------------------------------------- * 16 6 1 0 */ -#define AD_DUPLEX_KEY_BITS 0x1 -#define AD_SPEED_KEY_BITS 0x3E -#define AD_USER_KEY_BITS 0xFFC0 - -#define AD_LINK_SPEED_BITMASK_1MBPS 0x1 -#define AD_LINK_SPEED_BITMASK_10MBPS 0x2 -#define AD_LINK_SPEED_BITMASK_100MBPS 0x4 -#define AD_LINK_SPEED_BITMASK_1000MBPS 0x8 -#define AD_LINK_SPEED_BITMASK_10000MBPS 0x10 +#define AD_DUPLEX_KEY_MASKS 0x1 +#define AD_SPEED_KEY_MASKS 0x3E +#define AD_USER_KEY_MASKS 0xFFC0 + +enum ad_link_speed_type { + AD_LINK_SPEED_1MBPS = 1, + AD_LINK_SPEED_10MBPS, + AD_LINK_SPEED_100MBPS, + AD_LINK_SPEED_1000MBPS, + AD_LINK_SPEED_2500MBPS, + AD_LINK_SPEED_10000MBPS, + AD_LINK_SPEED_20000MBPS, + AD_LINK_SPEED_40000MBPS, + AD_LINK_SPEED_56000MBPS +}; /* compare MAC addresses */ #define MAC_ADDRESS_EQUAL(A, B) \ @@ -240,12 +246,16 @@ static inline int __check_agg_selection_timer(struct port *port) * __get_link_speed - get a port's speed * @port: the port we're looking at * - * Return @port's speed in 802.3ad bitmask format. i.e. one of: + * Return @port's speed in 802.3ad enum format. i.e. one of: * 0, - * %AD_LINK_SPEED_BITMASK_10MBPS, - * %AD_LINK_SPEED_BITMASK_100MBPS, - * %AD_LINK_SPEED_BITMASK_1000MBPS, - * %AD_LINK_SPEED_BITMASK_10000MBPS + * %AD_LINK_SPEED_10MBPS, + * %AD_LINK_SPEED_100MBPS, + * %AD_LINK_SPEED_1000MBPS, + * %AD_LINK_SPEED_2500MBPS, + * %AD_LINK_SPEED_10000MBPS + * %AD_LINK_SPEED_20000MBPS + * %AD_LINK_SPEED_40000MBPS + * %AD_LINK_SPEED_56000MBPS */ static u16 __get_link_speed(struct port *port) { @@ -262,19 +272,35 @@ static u16 __get_link_speed(struct port *port) else { switch (slave->speed) { case SPEED_10: - speed = AD_LINK_SPEED_BITMASK_10MBPS; + speed = AD_LINK_SPEED_10MBPS; break; case SPEED_100: - speed = AD_LINK_SPEED_BITMASK_100MBPS; + speed = AD_LINK_SPEED_100MBPS; break; case SPEED_1000: - speed = AD_LINK_SPEED_BITMASK_1000MBPS; + speed = AD_LINK_SPEED_1000MBPS; + break; + + case SPEED_2500: + speed = AD_LINK_SPEED_2500MBPS; break; case SPEED_10000: - speed = AD_LINK_SPEED_BITMASK_10000MBPS; + speed = AD_LINK_SPEED_10000MBPS; + break; + + case SPEED_20000: + speed = AD_LINK_SPEED_20000MBPS; + break; + + case SPEED_40000: + speed = AD_LINK_SPEED_40000MBPS; + break; + + case SPEED_56000: + speed = AD_LINK_SPEED_56000MBPS; break; default: @@ -625,21 +651,33 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) if (aggregator->num_of_ports) { switch (__get_link_speed(aggregator->lag_ports)) { - case AD_LINK_SPEED_BITMASK_1MBPS: + case AD_LINK_SPEED_1MBPS: bandwidth = aggregator->num_of_ports; break; - case AD_LINK_SPEED_BITMASK_10MBPS: + case AD_LINK_SPEED_10MBPS: bandwidth = aggregator->num_of_ports * 10; break; - case AD_LINK_SPEED_BITMASK_100MBPS: + case AD_LINK_SPEED_100MBPS: bandwidth = aggregator->num_of_ports * 100; break; - case AD_LINK_SPEED_BITMASK_1000MBPS: + case AD_LINK_SPEED_1000MBPS: bandwidth = aggregator->num_of_ports * 1000; break; - case AD_LINK_SPEED_BITMASK_10000MBPS: + case AD_LINK_SPEED_2500MBPS: + bandwidth = aggregator->num_of_ports * 2500; + break; + case AD_LINK_SPEED_10000MBPS: bandwidth = aggregator->num_of_ports * 10000; break; + case AD_LINK_SPEED_20000MBPS: + bandwidth = aggregator->num_of_ports * 20000; + break; + case AD_LINK_SPEED_40000MBPS: + bandwidth = aggregator->num_of_ports * 40000; + break; + case AD_LINK_SPEED_56000MBPS: + bandwidth = aggregator->num_of_ports * 56000; + break; default: bandwidth = 0; /* to silence the compiler */ } @@ -1011,7 +1049,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) port->sm_rx_state); switch (port->sm_rx_state) { case AD_RX_INITIALIZE: - if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) + if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)) port->sm_vars &= ~AD_PORT_LACP_ENABLED; else port->sm_vars |= AD_PORT_LACP_ENABLED; @@ -1318,7 +1356,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) /* update the new aggregator's parameters * if port was responsed from the end-user */ - if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS) + if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS) /* if port is full duplex */ port->aggregator->is_individual = false; else @@ -1846,7 +1884,7 @@ void bond_3ad_bind_slave(struct slave *slave) /* if the port is not full duplex, then the port should be not * lacp Enabled */ - if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) + if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)) port->sm_vars &= ~AD_PORT_LACP_ENABLED; /* actor system is the bond's system */ port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; @@ -2214,7 +2252,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) spin_lock_bh(&slave->bond->mode_lock); - port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; + port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS; port->actor_oper_port_key = port->actor_admin_port_key |= (__get_link_speed(port) << 1); netdev_dbg(slave->bond->dev, "Port %d changed speed\n", port->actor_port_number); @@ -2247,7 +2285,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) spin_lock_bh(&slave->bond->mode_lock); - port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; + port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS; port->actor_oper_port_key = port->actor_admin_port_key |= __get_duplex(port); netdev_dbg(slave->bond->dev, "Port %d changed duplex\n", port->actor_port_number); @@ -2289,18 +2327,18 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) */ if (link == BOND_LINK_UP) { port->is_enabled = true; - port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; + port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS; port->actor_oper_port_key = port->actor_admin_port_key |= __get_duplex(port); - port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; + port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS; port->actor_oper_port_key = port->actor_admin_port_key |= (__get_link_speed(port) << 1); } else { /* link has failed */ port->is_enabled = false; - port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; + port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS; port->actor_oper_port_key = (port->actor_admin_port_key &= - ~AD_SPEED_KEY_BITS); + ~AD_SPEED_KEY_MASKS); } netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n", port->actor_port_number, diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h deleted file mode 100644 index c5f14ac63f3e..000000000000 --- a/drivers/net/bonding/bond_3ad.h +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - */ - -#ifndef __BOND_3AD_H__ -#define __BOND_3AD_H__ - -#include <asm/byteorder.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/if_ether.h> - -/* General definitions */ -#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW) -#define AD_TIMER_INTERVAL 100 /*msec*/ - -#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} - -#define AD_LACP_SLOW 0 -#define AD_LACP_FAST 1 - -typedef struct mac_addr { - u8 mac_addr_value[ETH_ALEN]; -} __packed mac_addr_t; - -enum { - BOND_AD_STABLE = 0, - BOND_AD_BANDWIDTH = 1, - BOND_AD_COUNT = 2, -}; - -/* rx machine states(43.4.11 in the 802.3ad standard) */ -typedef enum { - AD_RX_DUMMY, - AD_RX_INITIALIZE, /* rx Machine */ - AD_RX_PORT_DISABLED, /* rx Machine */ - AD_RX_LACP_DISABLED, /* rx Machine */ - AD_RX_EXPIRED, /* rx Machine */ - AD_RX_DEFAULTED, /* rx Machine */ - AD_RX_CURRENT /* rx Machine */ -} rx_states_t; - -/* periodic machine states(43.4.12 in the 802.3ad standard) */ -typedef enum { - AD_PERIODIC_DUMMY, - AD_NO_PERIODIC, /* periodic machine */ - AD_FAST_PERIODIC, /* periodic machine */ - AD_SLOW_PERIODIC, /* periodic machine */ - AD_PERIODIC_TX /* periodic machine */ -} periodic_states_t; - -/* mux machine states(43.4.13 in the 802.3ad standard) */ -typedef enum { - AD_MUX_DUMMY, - AD_MUX_DETACHED, /* mux machine */ - AD_MUX_WAITING, /* mux machine */ - AD_MUX_ATTACHED, /* mux machine */ - AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */ -} mux_states_t; - -/* tx machine states(43.4.15 in the 802.3ad standard) */ -typedef enum { - AD_TX_DUMMY, - AD_TRANSMIT /* tx Machine */ -} tx_states_t; - -/* rx indication types */ -typedef enum { - AD_TYPE_LACPDU = 1, /* type lacpdu */ - AD_TYPE_MARKER /* type marker */ -} pdu_type_t; - -/* rx marker indication types */ -typedef enum { - AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */ - AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */ -} bond_marker_subtype_t; - -/* timers types(43.4.9 in the 802.3ad standard) */ -typedef enum { - AD_CURRENT_WHILE_TIMER, - AD_ACTOR_CHURN_TIMER, - AD_PERIODIC_TIMER, - AD_PARTNER_CHURN_TIMER, - AD_WAIT_WHILE_TIMER -} ad_timers_t; - -#pragma pack(1) - -/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */ -typedef struct lacpdu { - u8 subtype; /* = LACP(= 0x01) */ - u8 version_number; - u8 tlv_type_actor_info; /* = actor information(type/length/value) */ - u8 actor_information_length; /* = 20 */ - __be16 actor_system_priority; - struct mac_addr actor_system; - __be16 actor_key; - __be16 actor_port_priority; - __be16 actor_port; - u8 actor_state; - u8 reserved_3_1[3]; /* = 0 */ - u8 tlv_type_partner_info; /* = partner information */ - u8 partner_information_length; /* = 20 */ - __be16 partner_system_priority; - struct mac_addr partner_system; - __be16 partner_key; - __be16 partner_port_priority; - __be16 partner_port; - u8 partner_state; - u8 reserved_3_2[3]; /* = 0 */ - u8 tlv_type_collector_info; /* = collector information */ - u8 collector_information_length;/* = 16 */ - __be16 collector_max_delay; - u8 reserved_12[12]; - u8 tlv_type_terminator; /* = terminator */ - u8 terminator_length; /* = 0 */ - u8 reserved_50[50]; /* = 0 */ -} __packed lacpdu_t; - -typedef struct lacpdu_header { - struct ethhdr hdr; - struct lacpdu lacpdu; -} __packed lacpdu_header_t; - -/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */ -typedef struct bond_marker { - u8 subtype; /* = 0x02 (marker PDU) */ - u8 version_number; /* = 0x01 */ - u8 tlv_type; /* = 0x01 (marker information) */ - /* = 0x02 (marker response information) */ - u8 marker_length; /* = 0x16 */ - u16 requester_port; /* The number assigned to the port by the requester */ - struct mac_addr requester_system; /* The requester's system id */ - u32 requester_transaction_id; /* The transaction id allocated by the requester, */ - u16 pad; /* = 0 */ - u8 tlv_type_terminator; /* = 0x00 */ - u8 terminator_length; /* = 0x00 */ - u8 reserved_90[90]; /* = 0 */ -} __packed bond_marker_t; - -typedef struct bond_marker_header { - struct ethhdr hdr; - struct bond_marker marker; -} __packed bond_marker_header_t; - -#pragma pack() - -struct slave; -struct bonding; -struct ad_info; -struct port; - -#ifdef __ia64__ -#pragma pack(8) -#endif - -/* aggregator structure(43.4.5 in the 802.3ad standard) */ -typedef struct aggregator { - struct mac_addr aggregator_mac_address; - u16 aggregator_identifier; - bool is_individual; - u16 actor_admin_aggregator_key; - u16 actor_oper_aggregator_key; - struct mac_addr partner_system; - u16 partner_system_priority; - u16 partner_oper_aggregator_key; - u16 receive_state; /* BOOLEAN */ - u16 transmit_state; /* BOOLEAN */ - struct port *lag_ports; - /* ****** PRIVATE PARAMETERS ****** */ - struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */ - u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */ - u16 num_of_ports; -} aggregator_t; - -struct port_params { - struct mac_addr system; - u16 system_priority; - u16 key; - u16 port_number; - u16 port_priority; - u16 port_state; -}; - -/* port structure(43.4.6 in the 802.3ad standard) */ -typedef struct port { - u16 actor_port_number; - u16 actor_port_priority; - struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */ - u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */ - u16 actor_port_aggregator_identifier; - bool ntt; - u16 actor_admin_port_key; - u16 actor_oper_port_key; - u8 actor_admin_port_state; - u8 actor_oper_port_state; - - struct port_params partner_admin; - struct port_params partner_oper; - - bool is_enabled; - - /* ****** PRIVATE PARAMETERS ****** */ - u16 sm_vars; /* all state machines variables for this port */ - rx_states_t sm_rx_state; /* state machine rx state */ - u16 sm_rx_timer_counter; /* state machine rx timer counter */ - periodic_states_t sm_periodic_state; /* state machine periodic state */ - u16 sm_periodic_timer_counter; /* state machine periodic timer counter */ - mux_states_t sm_mux_state; /* state machine mux state */ - u16 sm_mux_timer_counter; /* state machine mux timer counter */ - tx_states_t sm_tx_state; /* state machine tx state */ - u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */ - struct slave *slave; /* pointer to the bond slave that this port belongs to */ - struct aggregator *aggregator; /* pointer to an aggregator that this port related to */ - struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */ - u32 transaction_id; /* continuous number for identification of Marker PDU's; */ - struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */ -} port_t; - -/* system structure */ -struct ad_system { - u16 sys_priority; - struct mac_addr sys_mac_addr; -}; - -#ifdef __ia64__ -#pragma pack() -#endif - -/* ========== AD Exported structures to the main bonding code ========== */ -#define BOND_AD_INFO(bond) ((bond)->ad_info) -#define SLAVE_AD_INFO(slave) ((slave)->ad_info) - -struct ad_bond_info { - struct ad_system system; /* 802.3ad system structure */ - u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */ - u16 aggregator_identifier; -}; - -struct ad_slave_info { - struct aggregator aggregator; /* 802.3ad aggregator structure */ - struct port port; /* 802.3ad port structure */ - u16 id; -}; - -/* ========== AD Exported functions to the main bonding code ========== */ -void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution); -void bond_3ad_bind_slave(struct slave *slave); -void bond_3ad_unbind_slave(struct slave *slave); -void bond_3ad_state_machine_handler(struct work_struct *); -void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout); -void bond_3ad_adapter_speed_changed(struct slave *slave); -void bond_3ad_adapter_duplex_changed(struct slave *slave); -void bond_3ad_handle_link_change(struct slave *slave, char link); -int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); -int __bond_3ad_get_active_agg_info(struct bonding *bond, - struct ad_info *ad_info); -int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); -int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, - struct slave *slave); -int bond_3ad_set_carrier(struct bonding *bond); -void bond_3ad_update_lacp_rate(struct bonding *bond); -#endif /* __BOND_3AD_H__ */ - diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index d2eadab787c5..bb9e9fc45e1b 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -37,8 +37,8 @@ #include <net/arp.h> #include <net/ipv6.h> #include <asm/byteorder.h> -#include "bonding.h" -#include "bond_alb.h" +#include <net/bonding.h> +#include <net/bond_alb.h> @@ -475,12 +475,8 @@ static void rlb_update_client(struct rlb_client_info *client_info) skb->dev = client_info->slave->dev; if (client_info->vlan_id) { - skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id); - if (!skb) { - netdev_err(client_info->slave->bond->dev, - "failed to insert VLAN tag\n"); - continue; - } + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + client_info->vlan_id); } arp_xmit(skb); @@ -951,13 +947,8 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], skb->priority = TC_PRIO_CONTROL; skb->dev = slave->dev; - if (vid) { - skb = vlan_put_tag(skb, vlan_proto, vid); - if (!skb) { - netdev_err(slave->bond->dev, "failed to insert VLAN tag\n"); - return; - } - } + if (vid) + __vlan_hwaccel_put_tag(skb, vlan_proto, vid); dev_queue_xmit(skb); } @@ -1326,7 +1317,7 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, } /* no suitable interface, frame not sent */ - dev_kfree_skb_any(skb); + bond_tx_drop(bond->dev, skb); out: return NETDEV_TX_OK; } diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h deleted file mode 100644 index 1ad473b4ade5..000000000000 --- a/drivers/net/bonding/bond_alb.h +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - */ - -#ifndef __BOND_ALB_H__ -#define __BOND_ALB_H__ - -#include <linux/if_ether.h> - -struct bonding; -struct slave; - -#define BOND_ALB_INFO(bond) ((bond)->alb_info) -#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info) - -#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */ -#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing. - * Used for division - never set - * to zero !!! - */ -#define BOND_ALB_DEFAULT_LP_INTERVAL 1 -#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of - * learning packets to the switch - */ - -#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \ - * ALB_TIMER_TICKS_PER_SEC) - -#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \ - * ALB_TIMER_TICKS_PER_SEC) - -#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table. - * Note that this value MUST NOT be smaller - * because the key hash table is BYTE wide ! - */ - - -#define TLB_NULL_INDEX 0xffffffff - -/* rlb defs */ -#define RLB_HASH_TABLE_SIZE 256 -#define RLB_NULL_INDEX 0xffffffff -#define RLB_UPDATE_DELAY (2*ALB_TIMER_TICKS_PER_SEC) /* 2 seconds */ -#define RLB_ARP_BURST_SIZE 2 -#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb - * rebalance interval (5 min). - */ -/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is - * promiscuous after failover - */ -#define RLB_PROMISC_TIMEOUT (10*ALB_TIMER_TICKS_PER_SEC) - - -struct tlb_client_info { - struct slave *tx_slave; /* A pointer to slave used for transmiting - * packets to a Client that the Hash function - * gave this entry index. - */ - u32 tx_bytes; /* Each Client accumulates the BytesTx that - * were transmitted to it, and after each - * CallBack the LoadHistory is divided - * by the balance interval - */ - u32 load_history; /* This field contains the amount of Bytes - * that were transmitted to this client by - * the server on the previous balance - * interval in Bps. - */ - u32 next; /* The next Hash table entry index, assigned - * to use the same adapter for transmit. - */ - u32 prev; /* The previous Hash table entry index, - * assigned to use the same - */ -}; - -/* ------------------------------------------------------------------------- - * struct rlb_client_info contains all info related to a specific rx client - * connection. This is the Clients Hash Table entry struct. - * Note that this is not a proper hash table; if a new client's IP address - * hash collides with an existing client entry, the old entry is replaced. - * - * There is a linked list (linked by the used_next and used_prev members) - * linking all the used entries of the hash table. This allows updating - * all the clients without walking over all the unused elements of the table. - * - * There are also linked lists of entries with identical hash(ip_src). These - * allow cleaning up the table from ip_src<->mac_src associations that have - * become outdated and would cause sending out invalid ARP updates to the - * network. These are linked by the (src_next and src_prev members). - * ------------------------------------------------------------------------- - */ -struct rlb_client_info { - __be32 ip_src; /* the server IP address */ - __be32 ip_dst; /* the client IP address */ - u8 mac_src[ETH_ALEN]; /* the server MAC address */ - u8 mac_dst[ETH_ALEN]; /* the client MAC address */ - - /* list of used hash table entries, starting at rx_hashtbl_used_head */ - u32 used_next; - u32 used_prev; - - /* ip_src based hashing */ - u32 src_next; /* next entry with same hash(ip_src) */ - u32 src_prev; /* prev entry with same hash(ip_src) */ - u32 src_first; /* first entry with hash(ip_src) == this entry's index */ - - u8 assigned; /* checking whether this entry is assigned */ - u8 ntt; /* flag - need to transmit client info */ - struct slave *slave; /* the slave assigned to this client */ - unsigned short vlan_id; /* VLAN tag associated with IP address */ -}; - -struct tlb_slave_info { - u32 head; /* Index to the head of the bi-directional clients - * hash table entries list. The entries in the list - * are the entries that were assigned to use this - * slave for transmit. - */ - u32 load; /* Each slave sums the loadHistory of all clients - * assigned to it - */ -}; - -struct alb_bond_info { - struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ - u32 unbalanced_load; - int tx_rebalance_counter; - int lp_counter; - /* -------- rlb parameters -------- */ - int rlb_enabled; - struct rlb_client_info *rx_hashtbl; /* Receive hash table */ - u32 rx_hashtbl_used_head; - u8 rx_ntt; /* flag - need to transmit - * to all rx clients - */ - struct slave *rx_slave;/* last slave to xmit from */ - u8 primary_is_promisc; /* boolean */ - u32 rlb_promisc_timeout_counter;/* counts primary - * promiscuity time - */ - u32 rlb_update_delay_counter; - u32 rlb_update_retry_counter;/* counter of retries - * of client update - */ - u8 rlb_rebalance; /* flag - indicates that the - * rx traffic should be - * rebalanced - */ -}; - -int bond_alb_initialize(struct bonding *bond, int rlb_enabled); -void bond_alb_deinitialize(struct bonding *bond); -int bond_alb_init_slave(struct bonding *bond, struct slave *slave); -void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave); -void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link); -void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); -int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); -int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev); -void bond_alb_monitor(struct work_struct *); -int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); -void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); -#endif /* __BOND_ALB_H__ */ - diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index 8f99082f90eb..e52e25a977fa 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -3,8 +3,8 @@ #include <linux/device.h> #include <linux/netdevice.h> -#include "bonding.h" -#include "bond_alb.h" +#include <net/bonding.h> +#include <net/bond_alb.h> #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c9ac06cfe6b7..184c434ae305 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -77,9 +77,9 @@ #include <net/pkt_sched.h> #include <linux/rculist.h> #include <net/flow_keys.h> -#include "bonding.h" -#include "bond_3ad.h" -#include "bond_alb.h" +#include <net/bonding.h> +#include <net/bond_3ad.h> +#include <net/bond_alb.h> /*---------------------------- Module parameters ----------------------------*/ @@ -1526,6 +1526,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } #endif + if (!(bond_dev->features & NETIF_F_LRO)) + dev_disable_lro(slave_dev); + res = netdev_rx_handler_register(slave_dev, bond_handle_frame, new_slave); if (res) { @@ -2143,8 +2146,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n", ntohs(outer_tag->vlan_proto), tags->vlan_id); - skb = __vlan_put_tag(skb, tags->vlan_proto, - tags->vlan_id); + skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto, + tags->vlan_id); if (!skb) { net_err_ratelimited("failed to insert inner VLAN tag\n"); return; @@ -2156,12 +2159,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, if (outer_tag->vlan_id) { netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n", ntohs(outer_tag->vlan_proto), outer_tag->vlan_id); - skb = vlan_put_tag(skb, outer_tag->vlan_proto, - outer_tag->vlan_id); - if (!skb) { - net_err_ratelimited("failed to insert outer VLAN tag\n"); - return; - } + __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto, + outer_tag->vlan_id); } xmit: @@ -2471,7 +2470,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) bond_slave_state_change(bond); if (BOND_MODE(bond) == BOND_MODE_XOR) bond_update_slave_arr(bond, NULL); - } else if (do_failover) { + } + if (do_failover) { block_netpoll_tx(); bond_select_active_slave(bond); unblock_netpoll_tx(); @@ -3522,7 +3522,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl } } /* no slave that can tx has been found */ - dev_kfree_skb_any(skb); + bond_tx_drop(bond->dev, skb); } /** @@ -3584,7 +3584,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev slave_id = bond_rr_gen_slave_id(bond); bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); } else { - dev_kfree_skb_any(skb); + bond_tx_drop(bond_dev, skb); } } @@ -3603,7 +3603,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d if (slave) bond_dev_queue_xmit(bond, skb, slave->dev); else - dev_kfree_skb_any(skb); + bond_tx_drop(bond_dev, skb); return NETDEV_TX_OK; } @@ -3747,8 +3747,7 @@ int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; bond_dev_queue_xmit(bond, skb, slave->dev); } else { - dev_kfree_skb_any(skb); - atomic_long_inc(&dev->tx_dropped); + bond_tx_drop(dev, skb); } return NETDEV_TX_OK; @@ -3778,7 +3777,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) bond_dev_queue_xmit(bond, skb, slave->dev); else - dev_kfree_skb_any(skb); + bond_tx_drop(bond_dev, skb); return NETDEV_TX_OK; } @@ -3858,7 +3857,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev /* Should never happen, mode already checked */ netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); WARN_ON_ONCE(1); - dev_kfree_skb_any(skb); + bond_tx_drop(dev, skb); return NETDEV_TX_OK; } } @@ -3878,7 +3877,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) if (bond_has_slaves(bond)) ret = __bond_start_xmit(skb, dev); else - dev_kfree_skb_any(skb); + bond_tx_drop(dev, skb); rcu_read_unlock(); return ret; diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index c13d83e15ace..3e6eebd5be50 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -17,7 +17,7 @@ #include <linux/if_ether.h> #include <net/netlink.h> #include <net/rtnetlink.h> -#include "bonding.h" +#include <net/bonding.h> static size_t bond_get_slave_size(const struct net_device *bond_dev, const struct net_device *slave_dev) diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index b62697f4a3de..1a61cc9b3402 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -16,7 +16,7 @@ #include <linux/rcupdate.h> #include <linux/ctype.h> #include <linux/inet.h> -#include "bonding.h" +#include <net/bonding.h> static int bond_option_active_slave_set(struct bonding *bond, const struct bond_opt_value *newval); diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h deleted file mode 100644 index 17ded5b29176..000000000000 --- a/drivers/net/bonding/bond_options.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * drivers/net/bond/bond_options.h - bonding options - * Copyright (c) 2013 Nikolay Aleksandrov <nikolay@redhat.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef _BOND_OPTIONS_H -#define _BOND_OPTIONS_H - -#define BOND_OPT_MAX_NAMELEN 32 -#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST) -#define BOND_MODE_ALL_EX(x) (~(x)) - -/* Option flags: - * BOND_OPTFLAG_NOSLAVES - check if the bond device is empty before setting - * BOND_OPTFLAG_IFDOWN - check if the bond device is down before setting - * BOND_OPTFLAG_RAWVAL - the option parses the value itself - */ -enum { - BOND_OPTFLAG_NOSLAVES = BIT(0), - BOND_OPTFLAG_IFDOWN = BIT(1), - BOND_OPTFLAG_RAWVAL = BIT(2) -}; - -/* Value type flags: - * BOND_VALFLAG_DEFAULT - mark the value as default - * BOND_VALFLAG_(MIN|MAX) - mark the value as min/max - */ -enum { - BOND_VALFLAG_DEFAULT = BIT(0), - BOND_VALFLAG_MIN = BIT(1), - BOND_VALFLAG_MAX = BIT(2) -}; - -/* Option IDs, their bit positions correspond to their IDs */ -enum { - BOND_OPT_MODE, - BOND_OPT_PACKETS_PER_SLAVE, - BOND_OPT_XMIT_HASH, - BOND_OPT_ARP_VALIDATE, - BOND_OPT_ARP_ALL_TARGETS, - BOND_OPT_FAIL_OVER_MAC, - BOND_OPT_ARP_INTERVAL, - BOND_OPT_ARP_TARGETS, - BOND_OPT_DOWNDELAY, - BOND_OPT_UPDELAY, - BOND_OPT_LACP_RATE, - BOND_OPT_MINLINKS, - BOND_OPT_AD_SELECT, - BOND_OPT_NUM_PEER_NOTIF, - BOND_OPT_MIIMON, - BOND_OPT_PRIMARY, - BOND_OPT_PRIMARY_RESELECT, - BOND_OPT_USE_CARRIER, - BOND_OPT_ACTIVE_SLAVE, - BOND_OPT_QUEUE_ID, - BOND_OPT_ALL_SLAVES_ACTIVE, - BOND_OPT_RESEND_IGMP, - BOND_OPT_LP_INTERVAL, - BOND_OPT_SLAVES, - BOND_OPT_TLB_DYNAMIC_LB, - BOND_OPT_LAST -}; - -/* This structure is used for storing option values and for passing option - * values when changing an option. The logic when used as an arg is as follows: - * - if string != NULL -> parse it, if the opt is RAW type then return it, else - * return the parse result - * - if string == NULL -> parse value - */ -struct bond_opt_value { - char *string; - u64 value; - u32 flags; -}; - -struct bonding; - -struct bond_option { - int id; - const char *name; - const char *desc; - u32 flags; - - /* unsuppmodes is used to denote modes in which the option isn't - * supported. - */ - unsigned long unsuppmodes; - /* supported values which this option can have, can be a subset of - * BOND_OPTVAL_RANGE's value range - */ - const struct bond_opt_value *values; - - int (*set)(struct bonding *bond, const struct bond_opt_value *val); -}; - -int __bond_opt_set(struct bonding *bond, unsigned int option, - struct bond_opt_value *val); -int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf); - -const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt, - struct bond_opt_value *val); -const struct bond_option *bond_opt_get(unsigned int option); -const struct bond_option *bond_opt_get_by_name(const char *name); -const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val); - -/* This helper is used to initialize a bond_opt_value structure for parameter - * passing. There should be either a valid string or value, but not both. - * When value is ULLONG_MAX then string will be used. - */ -static inline void __bond_opt_init(struct bond_opt_value *optval, - char *string, u64 value) -{ - memset(optval, 0, sizeof(*optval)); - optval->value = ULLONG_MAX; - if (value == ULLONG_MAX) - optval->string = string; - else - optval->value = value; -} -#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value) -#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX) - -void bond_option_arp_ip_targets_clear(struct bonding *bond); - -#endif /* _BOND_OPTIONS_H */ diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index a3948f8d1e53..976f5ad2a0f2 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -2,7 +2,7 @@ #include <linux/export.h> #include <net/net_namespace.h> #include <net/netns/generic.h> -#include "bonding.h" +#include <net/bonding.h> static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 8ffbafd500fd..7e9e151d4d61 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -40,7 +40,7 @@ #include <net/netns/generic.h> #include <linux/nsproxy.h> -#include "bonding.h" +#include <net/bonding.h> #define to_dev(obj) container_of(obj, struct device, kobj) #define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd)))) diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index b01b0ce4d1be..23618a831612 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -12,7 +12,7 @@ #include <linux/kernel.h> #include <linux/netdevice.h> -#include "bonding.h" +#include <net/bonding.h> struct slave_attribute { struct attribute attr; diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h deleted file mode 100644 index 10920f0686e2..000000000000 --- a/drivers/net/bonding/bonding.h +++ /dev/null @@ -1,648 +0,0 @@ -/* - * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'. - * - * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes - * NCM: Network and Communications Management, Inc. - * - * BUT, I'm the one who modified it for ethernet, so: - * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov - * - * This software may be used and distributed according to the terms - * of the GNU Public License, incorporated herein by reference. - * - */ - -#ifndef _LINUX_BONDING_H -#define _LINUX_BONDING_H - -#include <linux/timer.h> -#include <linux/proc_fs.h> -#include <linux/if_bonding.h> -#include <linux/cpumask.h> -#include <linux/in6.h> -#include <linux/netpoll.h> -#include <linux/inetdevice.h> -#include <linux/etherdevice.h> -#include <linux/reciprocal_div.h> -#include <linux/if_link.h> - -#include "bond_3ad.h" -#include "bond_alb.h" -#include "bond_options.h" - -#define DRV_VERSION "3.7.1" -#define DRV_RELDATE "April 27, 2011" -#define DRV_NAME "bonding" -#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" - -#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" - -#define BOND_MAX_ARP_TARGETS 16 - -#define BOND_DEFAULT_MIIMON 100 - -/* - * Less bad way to call ioctl from within the kernel; this needs to be - * done some other way to get the call out of interrupt context. - * Needs "ioctl" variable to be supplied by calling context. - */ -#define IOCTL(dev, arg, cmd) ({ \ - int res = 0; \ - mm_segment_t fs = get_fs(); \ - set_fs(get_ds()); \ - res = ioctl(dev, arg, cmd); \ - set_fs(fs); \ - res; }) - -#define BOND_MODE(bond) ((bond)->params.mode) - -/* slave list primitives */ -#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower) - -#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond)) - -/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */ -#define bond_first_slave(bond) \ - (bond_has_slaves(bond) ? \ - netdev_adjacent_get_private(bond_slave_list(bond)->next) : \ - NULL) -#define bond_last_slave(bond) \ - (bond_has_slaves(bond) ? \ - netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \ - NULL) - -/* Caller must have rcu_read_lock */ -#define bond_first_slave_rcu(bond) \ - netdev_lower_get_first_private_rcu(bond->dev) - -#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond)) -#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond)) - -/** - * bond_for_each_slave - iterate over all slaves - * @bond: the bond holding this list - * @pos: current slave - * @iter: list_head * iterator - * - * Caller must hold RTNL - */ -#define bond_for_each_slave(bond, pos, iter) \ - netdev_for_each_lower_private((bond)->dev, pos, iter) - -/* Caller must have rcu_read_lock */ -#define bond_for_each_slave_rcu(bond, pos, iter) \ - netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) - -#ifdef CONFIG_NET_POLL_CONTROLLER -extern atomic_t netpoll_block_tx; - -static inline void block_netpoll_tx(void) -{ - atomic_inc(&netpoll_block_tx); -} - -static inline void unblock_netpoll_tx(void) -{ - atomic_dec(&netpoll_block_tx); -} - -static inline int is_netpoll_tx_blocked(struct net_device *dev) -{ - if (unlikely(netpoll_tx_running(dev))) - return atomic_read(&netpoll_block_tx); - return 0; -} -#else -#define block_netpoll_tx() -#define unblock_netpoll_tx() -#define is_netpoll_tx_blocked(dev) (0) -#endif - -struct bond_params { - int mode; - int xmit_policy; - int miimon; - u8 num_peer_notif; - int arp_interval; - int arp_validate; - int arp_all_targets; - int use_carrier; - int fail_over_mac; - int updelay; - int downdelay; - int lacp_fast; - unsigned int min_links; - int ad_select; - char primary[IFNAMSIZ]; - int primary_reselect; - __be32 arp_targets[BOND_MAX_ARP_TARGETS]; - int tx_queues; - int all_slaves_active; - int resend_igmp; - int lp_interval; - int packets_per_slave; - int tlb_dynamic_lb; - struct reciprocal_value reciprocal_packets_per_slave; -}; - -struct bond_parm_tbl { - char *modename; - int mode; -}; - -struct slave { - struct net_device *dev; /* first - useful for panic debug */ - struct bonding *bond; /* our master */ - int delay; - /* all three in jiffies */ - unsigned long last_link_up; - unsigned long last_rx; - unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; - s8 link; /* one of BOND_LINK_XXXX */ - s8 new_link; - u8 backup:1, /* indicates backup slave. Value corresponds with - BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ - inactive:1, /* indicates inactive slave */ - should_notify:1; /* indicateds whether the state changed */ - u8 duplex; - u32 original_mtu; - u32 link_failure_count; - u32 speed; - u16 queue_id; - u8 perm_hwaddr[ETH_ALEN]; - struct ad_slave_info *ad_info; - struct tlb_slave_info tlb_info; -#ifdef CONFIG_NET_POLL_CONTROLLER - struct netpoll *np; -#endif - struct kobject kobj; - struct rtnl_link_stats64 slave_stats; -}; - -struct bond_up_slave { - unsigned int count; - struct rcu_head rcu; - struct slave *arr[0]; -}; - -/* - * Link pseudo-state only used internally by monitors - */ -#define BOND_LINK_NOCHANGE -1 - -/* - * Here are the locking policies for the two bonding locks: - * Get rcu_read_lock when reading or RTNL when writing slave list. - */ -struct bonding { - struct net_device *dev; /* first - useful for panic debug */ - struct slave __rcu *curr_active_slave; - struct slave __rcu *current_arp_slave; - struct slave __rcu *primary_slave; - struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ - bool force_primary; - s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ - int (*recv_probe)(const struct sk_buff *, struct bonding *, - struct slave *); - /* mode_lock is used for mode-specific locking needs, currently used by: - * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and - * bond_3ad_state_machine_handler() concurrently and also - * the access to the state machine shared variables. - * TLB mode (5) - to sync the use and modifications of its hash table - * ALB mode (6) - to sync the use and modifications of its hash table - */ - spinlock_t mode_lock; - u8 send_peer_notif; - u8 igmp_retrans; -#ifdef CONFIG_PROC_FS - struct proc_dir_entry *proc_entry; - char proc_file_name[IFNAMSIZ]; -#endif /* CONFIG_PROC_FS */ - struct list_head bond_list; - u32 rr_tx_counter; - struct ad_bond_info ad_info; - struct alb_bond_info alb_info; - struct bond_params params; - struct workqueue_struct *wq; - struct delayed_work mii_work; - struct delayed_work arp_work; - struct delayed_work alb_work; - struct delayed_work ad_work; - struct delayed_work mcast_work; - struct delayed_work slave_arr_work; -#ifdef CONFIG_DEBUG_FS - /* debugging support via debugfs */ - struct dentry *debug_dir; -#endif /* CONFIG_DEBUG_FS */ - struct rtnl_link_stats64 bond_stats; -}; - -#define bond_slave_get_rcu(dev) \ - ((struct slave *) rcu_dereference(dev->rx_handler_data)) - -#define bond_slave_get_rtnl(dev) \ - ((struct slave *) rtnl_dereference(dev->rx_handler_data)) - -struct bond_vlan_tag { - __be16 vlan_proto; - unsigned short vlan_id; -}; - -/** - * Returns NULL if the net_device does not belong to any of the bond's slaves - * - * Caller must hold bond lock for read - */ -static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, - struct net_device *slave_dev) -{ - return netdev_lower_dev_get_private(bond->dev, slave_dev); -} - -static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) -{ - return slave->bond; -} - -static inline bool bond_should_override_tx_queue(struct bonding *bond) -{ - return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || - BOND_MODE(bond) == BOND_MODE_ROUNDROBIN; -} - -static inline bool bond_is_lb(const struct bonding *bond) -{ - return BOND_MODE(bond) == BOND_MODE_TLB || - BOND_MODE(bond) == BOND_MODE_ALB; -} - -static inline bool bond_is_nondyn_tlb(const struct bonding *bond) -{ - return (BOND_MODE(bond) == BOND_MODE_TLB) && - (bond->params.tlb_dynamic_lb == 0); -} - -static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond) -{ - return (BOND_MODE(bond) == BOND_MODE_8023AD || - BOND_MODE(bond) == BOND_MODE_XOR || - bond_is_nondyn_tlb(bond)); -} - -static inline bool bond_mode_uses_arp(int mode) -{ - return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB && - mode != BOND_MODE_ALB; -} - -static inline bool bond_mode_uses_primary(int mode) -{ - return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB || - mode == BOND_MODE_ALB; -} - -static inline bool bond_uses_primary(struct bonding *bond) -{ - return bond_mode_uses_primary(BOND_MODE(bond)); -} - -static inline bool bond_slave_is_up(struct slave *slave) -{ - return netif_running(slave->dev) && netif_carrier_ok(slave->dev); -} - -static inline void bond_set_active_slave(struct slave *slave) -{ - if (slave->backup) { - slave->backup = 0; - rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); - } -} - -static inline void bond_set_backup_slave(struct slave *slave) -{ - if (!slave->backup) { - slave->backup = 1; - rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); - } -} - -static inline void bond_set_slave_state(struct slave *slave, - int slave_state, bool notify) -{ - if (slave->backup == slave_state) - return; - - slave->backup = slave_state; - if (notify) { - rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); - slave->should_notify = 0; - } else { - if (slave->should_notify) - slave->should_notify = 0; - else - slave->should_notify = 1; - } -} - -static inline void bond_slave_state_change(struct bonding *bond) -{ - struct list_head *iter; - struct slave *tmp; - - bond_for_each_slave(bond, tmp, iter) { - if (tmp->link == BOND_LINK_UP) - bond_set_active_slave(tmp); - else if (tmp->link == BOND_LINK_DOWN) - bond_set_backup_slave(tmp); - } -} - -static inline void bond_slave_state_notify(struct bonding *bond) -{ - struct list_head *iter; - struct slave *tmp; - - bond_for_each_slave(bond, tmp, iter) { - if (tmp->should_notify) { - rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC); - tmp->should_notify = 0; - } - } -} - -static inline int bond_slave_state(struct slave *slave) -{ - return slave->backup; -} - -static inline bool bond_is_active_slave(struct slave *slave) -{ - return !bond_slave_state(slave); -} - -static inline bool bond_slave_can_tx(struct slave *slave) -{ - return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP && - bond_is_active_slave(slave); -} - -#define BOND_PRI_RESELECT_ALWAYS 0 -#define BOND_PRI_RESELECT_BETTER 1 -#define BOND_PRI_RESELECT_FAILURE 2 - -#define BOND_FOM_NONE 0 -#define BOND_FOM_ACTIVE 1 -#define BOND_FOM_FOLLOW 2 - -#define BOND_ARP_TARGETS_ANY 0 -#define BOND_ARP_TARGETS_ALL 1 - -#define BOND_ARP_VALIDATE_NONE 0 -#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE) -#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP) -#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ - BOND_ARP_VALIDATE_BACKUP) -#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1) -#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \ - BOND_ARP_FILTER) -#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \ - BOND_ARP_FILTER) - -#define BOND_SLAVE_NOTIFY_NOW true -#define BOND_SLAVE_NOTIFY_LATER false - -static inline int slave_do_arp_validate(struct bonding *bond, - struct slave *slave) -{ - return bond->params.arp_validate & (1 << bond_slave_state(slave)); -} - -static inline int slave_do_arp_validate_only(struct bonding *bond) -{ - return bond->params.arp_validate & BOND_ARP_FILTER; -} - -static inline int bond_is_ip_target_ok(__be32 addr) -{ - return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr); -} - -/* Get the oldest arp which we've received on this slave for bond's - * arp_targets. - */ -static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond, - struct slave *slave) -{ - int i = 1; - unsigned long ret = slave->target_last_arp_rx[0]; - - for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) - if (time_before(slave->target_last_arp_rx[i], ret)) - ret = slave->target_last_arp_rx[i]; - - return ret; -} - -static inline unsigned long slave_last_rx(struct bonding *bond, - struct slave *slave) -{ - if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL) - return slave_oldest_target_arp_rx(bond, slave); - - return slave->last_rx; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static inline void bond_netpoll_send_skb(const struct slave *slave, - struct sk_buff *skb) -{ - struct netpoll *np = slave->np; - - if (np) - netpoll_send_skb(np, skb); -} -#else -static inline void bond_netpoll_send_skb(const struct slave *slave, - struct sk_buff *skb) -{ -} -#endif - -static inline void bond_set_slave_inactive_flags(struct slave *slave, - bool notify) -{ - if (!bond_is_lb(slave->bond)) - bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); - if (!slave->bond->params.all_slaves_active) - slave->inactive = 1; -} - -static inline void bond_set_slave_active_flags(struct slave *slave, - bool notify) -{ - bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); - slave->inactive = 0; -} - -static inline bool bond_is_slave_inactive(struct slave *slave) -{ - return slave->inactive; -} - -static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local) -{ - struct in_device *in_dev; - __be32 addr = 0; - - rcu_read_lock(); - in_dev = __in_dev_get_rcu(dev); - - if (in_dev) - addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local, - RT_SCOPE_HOST); - rcu_read_unlock(); - return addr; -} - -struct bond_net { - struct net *net; /* Associated network namespace */ - struct list_head dev_list; -#ifdef CONFIG_PROC_FS - struct proc_dir_entry *proc_dir; -#endif - struct class_attribute class_attr_bonding_masters; -}; - -int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); -void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); -int bond_create(struct net *net, const char *name); -int bond_create_sysfs(struct bond_net *net); -void bond_destroy_sysfs(struct bond_net *net); -void bond_prepare_sysfs_group(struct bonding *bond); -int bond_sysfs_slave_add(struct slave *slave); -void bond_sysfs_slave_del(struct slave *slave); -int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); -int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); -u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb); -void bond_select_active_slave(struct bonding *bond); -void bond_change_active_slave(struct bonding *bond, struct slave *new_active); -void bond_create_debugfs(void); -void bond_destroy_debugfs(void); -void bond_debug_register(struct bonding *bond); -void bond_debug_unregister(struct bonding *bond); -void bond_debug_reregister(struct bonding *bond); -const char *bond_mode_name(int mode); -void bond_setup(struct net_device *bond_dev); -unsigned int bond_get_num_tx_queues(void); -int bond_netlink_init(void); -void bond_netlink_fini(void); -struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond); -const char *bond_slave_link_status(s8 link); -struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, - struct net_device *end_dev, - int level); -int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave); -void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay); - -#ifdef CONFIG_PROC_FS -void bond_create_proc_entry(struct bonding *bond); -void bond_remove_proc_entry(struct bonding *bond); -void bond_create_proc_dir(struct bond_net *bn); -void bond_destroy_proc_dir(struct bond_net *bn); -#else -static inline void bond_create_proc_entry(struct bonding *bond) -{ -} - -static inline void bond_remove_proc_entry(struct bonding *bond) -{ -} - -static inline void bond_create_proc_dir(struct bond_net *bn) -{ -} - -static inline void bond_destroy_proc_dir(struct bond_net *bn) -{ -} -#endif - -static inline struct slave *bond_slave_has_mac(struct bonding *bond, - const u8 *mac) -{ - struct list_head *iter; - struct slave *tmp; - - bond_for_each_slave(bond, tmp, iter) - if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) - return tmp; - - return NULL; -} - -/* Caller must hold rcu_read_lock() for read */ -static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond, - const u8 *mac) -{ - struct list_head *iter; - struct slave *tmp; - - bond_for_each_slave_rcu(bond, tmp, iter) - if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) - return tmp; - - return NULL; -} - -/* Caller must hold rcu_read_lock() for read */ -static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) -{ - struct list_head *iter; - struct slave *tmp; - struct netdev_hw_addr *ha; - - bond_for_each_slave_rcu(bond, tmp, iter) - if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) - return true; - - if (netdev_uc_empty(bond->dev)) - return false; - - netdev_for_each_uc_addr(ha, bond->dev) - if (ether_addr_equal_64bits(mac, ha->addr)) - return true; - - return false; -} - -/* Check if the ip is present in arp ip list, or first free slot if ip == 0 - * Returns -1 if not found, index if found - */ -static inline int bond_get_targets_ip(__be32 *targets, __be32 ip) -{ - int i; - - for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) - if (targets[i] == ip) - return i; - else if (targets[i] == 0) - break; - - return -1; -} - -/* exported from bond_main.c */ -extern int bond_net_id; -extern const struct bond_parm_tbl bond_lacp_tbl[]; -extern const struct bond_parm_tbl xmit_hashtype_tbl[]; -extern const struct bond_parm_tbl arp_validate_tbl[]; -extern const struct bond_parm_tbl arp_all_targets_tbl[]; -extern const struct bond_parm_tbl fail_over_mac_tbl[]; -extern const struct bond_parm_tbl pri_reselect_tbl[]; -extern struct bond_parm_tbl ad_select_tbl[]; - -/* exported from bond_netlink.c */ -extern struct rtnl_link_ops bond_link_ops; - -#endif /* _LINUX_BONDING_H */ diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index fc9304143f44..c533c62b0f5e 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -29,4 +29,5 @@ obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_RCAR) += rcar_can.o obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o -subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG +subdir-ccflags-y += -D__CHECK_ENDIAN__ +subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 8e78bb48f5a4..f94a9fa60488 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -35,6 +35,7 @@ #include <linux/list.h> #include <linux/io.h> #include <linux/pm_runtime.h> +#include <linux/pinctrl/consumer.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -603,6 +604,8 @@ static int c_can_start(struct net_device *dev) priv->can.state = CAN_STATE_ERROR_ACTIVE; + /* activate pins */ + pinctrl_pm_select_default_state(dev->dev.parent); return 0; } @@ -611,6 +614,9 @@ static void c_can_stop(struct net_device *dev) struct c_can_priv *priv = netdev_priv(dev); c_can_irq_control(priv, false); + + /* deactivate pins */ + pinctrl_pm_select_sleep_state(dev->dev.parent); priv->can.state = CAN_STATE_STOPPED; } @@ -1244,6 +1250,13 @@ int register_c_can_dev(struct net_device *dev) struct c_can_priv *priv = netdev_priv(dev); int err; + /* Deactivate pins to prevent DRA7 DCAN IP from being + * stuck in transition when module is disabled. + * Pins are activated in c_can_start() and deactivated + * in c_can_stop() + */ + pinctrl_pm_select_sleep_state(dev->dev.parent); + c_can_pm_runtime_enable(priv); dev->flags |= IFF_ECHO; /* we support local echo */ diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 99ad1aa576b0..8acdc7fa4792 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -169,6 +169,28 @@ enum c_can_dev_id { BOSCH_D_CAN, }; +struct raminit_bits { + u8 start; + u8 done; +}; + +struct c_can_driver_data { + enum c_can_dev_id id; + + /* RAMINIT register description. Optional. */ + const struct raminit_bits *raminit_bits; /* Array of START/DONE bit positions */ + u8 raminit_num; /* Number of CAN instances on the SoC */ + bool raminit_pulse; /* If set, sets and clears START bit (pulse) */ +}; + +/* Out of band RAMINIT register access via syscon regmap */ +struct c_can_raminit { + struct regmap *syscon; /* for raminit ctrl. reg. access */ + unsigned int reg; /* register index within syscon */ + struct raminit_bits bits; + bool needs_pulse; +}; + /* c_can private data structure */ struct c_can_priv { struct can_priv can; /* must be the first member */ @@ -186,8 +208,7 @@ struct c_can_priv { const u16 *regs; void *priv; /* for board-specific data */ enum c_can_dev_id type; - u32 __iomem *raminit_ctrlreg; - int instance; + struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */ void (*raminit) (const struct c_can_priv *priv, bool enable); u32 comm_rcv_high; u32 rxmasked; diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index fb279d6ae484..a4535d2142a7 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -32,14 +32,13 @@ #include <linux/clk.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> #include <linux/can/dev.h> #include "c_can.h" -#define CAN_RAMINIT_START_MASK(i) (0x001 << (i)) -#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i)) -#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i)) #define DCAN_RAM_INIT_BIT (1 << 3) static DEFINE_SPINLOCK(raminit_lock); /* @@ -72,39 +71,63 @@ static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv, writew(val, priv->base + 2 * priv->regs[index]); } -static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask, - u32 val) +static void c_can_hw_raminit_wait_syscon(const struct c_can_priv *priv, + u32 mask, u32 val) { + const struct c_can_raminit *raminit = &priv->raminit_sys; + int timeout = 0; + u32 ctrl = 0; + /* We look only at the bits of our instance. */ val &= mask; - while ((readl(priv->raminit_ctrlreg) & mask) != val) + do { udelay(1); + timeout++; + + regmap_read(raminit->syscon, raminit->reg, &ctrl); + if (timeout == 1000) { + dev_err(&priv->dev->dev, "%s: time out\n", __func__); + break; + } + } while ((ctrl & mask) != val); } -static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable) +static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable) { - u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance); - u32 ctrl; + const struct c_can_raminit *raminit = &priv->raminit_sys; + u32 ctrl = 0; + u32 mask; spin_lock(&raminit_lock); - ctrl = readl(priv->raminit_ctrlreg); + mask = 1 << raminit->bits.start | 1 << raminit->bits.done; + regmap_read(raminit->syscon, raminit->reg, &ctrl); + /* We clear the done and start bit first. The start bit is * looking at the 0 -> transition, but is not self clearing; * And we clear the init done bit as well. + * NOTE: DONE must be written with 1 to clear it. */ - ctrl &= ~CAN_RAMINIT_START_MASK(priv->instance); - ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); - writel(ctrl, priv->raminit_ctrlreg); - ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); - c_can_hw_raminit_wait_ti(priv, mask, ctrl); + ctrl &= ~(1 << raminit->bits.start); + ctrl |= 1 << raminit->bits.done; + regmap_write(raminit->syscon, raminit->reg, ctrl); + + ctrl &= ~(1 << raminit->bits.done); + c_can_hw_raminit_wait_syscon(priv, mask, ctrl); if (enable) { /* Set start bit and wait for the done bit. */ - ctrl |= CAN_RAMINIT_START_MASK(priv->instance); - writel(ctrl, priv->raminit_ctrlreg); - ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); - c_can_hw_raminit_wait_ti(priv, mask, ctrl); + ctrl |= 1 << raminit->bits.start; + regmap_write(raminit->syscon, raminit->reg, ctrl); + + /* clear START bit if start pulse is needed */ + if (raminit->needs_pulse) { + ctrl &= ~(1 << raminit->bits.start); + regmap_write(raminit->syscon, raminit->reg, ctrl); + } + + ctrl |= 1 << raminit->bits.done; + c_can_hw_raminit_wait_syscon(priv, mask, ctrl); } spin_unlock(&raminit_lock); } @@ -159,26 +182,60 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable) } } +static const struct c_can_driver_data c_can_drvdata = { + .id = BOSCH_C_CAN, +}; + +static const struct c_can_driver_data d_can_drvdata = { + .id = BOSCH_D_CAN, +}; + +static const struct raminit_bits dra7_raminit_bits[] = { + [0] = { .start = 3, .done = 1, }, + [1] = { .start = 5, .done = 2, }, +}; + +static const struct c_can_driver_data dra7_dcan_drvdata = { + .id = BOSCH_D_CAN, + .raminit_num = ARRAY_SIZE(dra7_raminit_bits), + .raminit_bits = dra7_raminit_bits, + .raminit_pulse = true, +}; + +static const struct raminit_bits am3352_raminit_bits[] = { + [0] = { .start = 0, .done = 8, }, + [1] = { .start = 1, .done = 9, }, +}; + +static const struct c_can_driver_data am3352_dcan_drvdata = { + .id = BOSCH_D_CAN, + .raminit_num = ARRAY_SIZE(am3352_raminit_bits), + .raminit_bits = am3352_raminit_bits, +}; + static struct platform_device_id c_can_id_table[] = { - [BOSCH_C_CAN_PLATFORM] = { + { .name = KBUILD_MODNAME, - .driver_data = BOSCH_C_CAN, + .driver_data = (kernel_ulong_t)&c_can_drvdata, }, - [BOSCH_C_CAN] = { + { .name = "c_can", - .driver_data = BOSCH_C_CAN, + .driver_data = (kernel_ulong_t)&c_can_drvdata, }, - [BOSCH_D_CAN] = { + { .name = "d_can", - .driver_data = BOSCH_D_CAN, - }, { - } + .driver_data = (kernel_ulong_t)&d_can_drvdata, + }, + { /* sentinel */ }, }; MODULE_DEVICE_TABLE(platform, c_can_id_table); static const struct of_device_id c_can_of_table[] = { - { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] }, - { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] }, + { .compatible = "bosch,c_can", .data = &c_can_drvdata }, + { .compatible = "bosch,d_can", .data = &d_can_drvdata }, + { .compatible = "ti,dra7-d_can", .data = &dra7_dcan_drvdata }, + { .compatible = "ti,am3352-d_can", .data = &am3352_dcan_drvdata }, + { .compatible = "ti,am4372-d_can", .data = &am3352_dcan_drvdata }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, c_can_of_table); @@ -190,21 +247,20 @@ static int c_can_plat_probe(struct platform_device *pdev) struct net_device *dev; struct c_can_priv *priv; const struct of_device_id *match; - const struct platform_device_id *id; - struct resource *mem, *res; + struct resource *mem; int irq; struct clk *clk; - - if (pdev->dev.of_node) { - match = of_match_device(c_can_of_table, &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "Failed to find matching dt id\n"); - ret = -EINVAL; - goto exit; - } - id = match->data; + const struct c_can_driver_data *drvdata; + struct device_node *np = pdev->dev.of_node; + + match = of_match_device(c_can_of_table, &pdev->dev); + if (match) { + drvdata = match->data; + } else if (pdev->id_entry->driver_data) { + drvdata = (struct c_can_driver_data *) + platform_get_device_id(pdev)->driver_data; } else { - id = platform_get_device_id(pdev); + return -ENODEV; } /* get the appropriate clk */ @@ -236,7 +292,7 @@ static int c_can_plat_probe(struct platform_device *pdev) } priv = netdev_priv(dev); - switch (id->driver_data) { + switch (drvdata->id) { case BOSCH_C_CAN: priv->regs = reg_map_c_can; switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) { @@ -263,27 +319,50 @@ static int c_can_plat_probe(struct platform_device *pdev) priv->read_reg32 = d_can_plat_read_reg32; priv->write_reg32 = d_can_plat_write_reg32; - if (pdev->dev.of_node) - priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can"); - else - priv->instance = pdev->id; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - /* Not all D_CAN modules have a separate register for the D_CAN - * RAM initialization. Use default RAM init bit in D_CAN module - * if not specified in DT. + /* Check if we need custom RAMINIT via syscon. Mostly for TI + * platforms. Only supported with DT boot. */ - if (!res) { + if (np && of_property_read_bool(np, "syscon-raminit")) { + u32 id; + struct c_can_raminit *raminit = &priv->raminit_sys; + + ret = -EINVAL; + raminit->syscon = syscon_regmap_lookup_by_phandle(np, + "syscon-raminit"); + if (IS_ERR(raminit->syscon)) { + /* can fail with -EPROBE_DEFER */ + ret = PTR_ERR(raminit->syscon); + free_c_can_dev(dev); + return ret; + } + + if (of_property_read_u32_index(np, "syscon-raminit", 1, + &raminit->reg)) { + dev_err(&pdev->dev, + "couldn't get the RAMINIT reg. offset!\n"); + goto exit_free_device; + } + + if (of_property_read_u32_index(np, "syscon-raminit", 2, + &id)) { + dev_err(&pdev->dev, + "couldn't get the CAN instance ID\n"); + goto exit_free_device; + } + + if (id >= drvdata->raminit_num) { + dev_err(&pdev->dev, + "Invalid CAN instance ID\n"); + goto exit_free_device; + } + + raminit->bits = drvdata->raminit_bits[id]; + raminit->needs_pulse = drvdata->raminit_pulse; + + priv->raminit = c_can_hw_raminit_syscon; + } else { priv->raminit = c_can_hw_raminit; - break; } - - priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!priv->raminit_ctrlreg || priv->instance < 0) - dev_info(&pdev->dev, "control memory is not used for raminit\n"); - else - priv->raminit = c_can_hw_raminit_ti; break; default: ret = -EINVAL; @@ -295,7 +374,7 @@ static int c_can_plat_probe(struct platform_device *pdev) priv->device = &pdev->dev; priv->can.clock.freq = clk_get_rate(clk); priv->priv = clk; - priv->type = id->driver_data; + priv->type = drvdata->id; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index d8379278d648..c486fe510f37 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -60,7 +60,7 @@ MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver"); * * The message objects 1..14 can be used for TX and RX while the message * objects 15 is optimized for RX. It has a shadow register for reliable - * data receiption under heavy bus load. Therefore it makes sense to use + * data reception under heavy bus load. Therefore it makes sense to use * this message object for the needed use case. The frame type (EFF/SFF) * for the message object 15 can be defined via kernel module parameter * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames, diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 02492d241e4c..3ec8f6f25e5f 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -110,7 +110,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, long rate; u64 v64; - /* Use CIA recommended sample points */ + /* Use CiA recommended sample points */ if (bt->sample_point) { sampl_pt = bt->sample_point; } else { @@ -273,6 +273,84 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, return err; } +static void can_update_state_error_stats(struct net_device *dev, + enum can_state new_state) +{ + struct can_priv *priv = netdev_priv(dev); + + if (new_state <= priv->state) + return; + + switch (new_state) { + case CAN_STATE_ERROR_WARNING: + priv->can_stats.error_warning++; + break; + case CAN_STATE_ERROR_PASSIVE: + priv->can_stats.error_passive++; + break; + case CAN_STATE_BUS_OFF: + default: + break; + }; +} + +static int can_tx_state_to_frame(struct net_device *dev, enum can_state state) +{ + switch (state) { + case CAN_STATE_ERROR_ACTIVE: + return CAN_ERR_CRTL_ACTIVE; + case CAN_STATE_ERROR_WARNING: + return CAN_ERR_CRTL_TX_WARNING; + case CAN_STATE_ERROR_PASSIVE: + return CAN_ERR_CRTL_TX_PASSIVE; + default: + return 0; + } +} + +static int can_rx_state_to_frame(struct net_device *dev, enum can_state state) +{ + switch (state) { + case CAN_STATE_ERROR_ACTIVE: + return CAN_ERR_CRTL_ACTIVE; + case CAN_STATE_ERROR_WARNING: + return CAN_ERR_CRTL_RX_WARNING; + case CAN_STATE_ERROR_PASSIVE: + return CAN_ERR_CRTL_RX_PASSIVE; + default: + return 0; + } +} + +void can_change_state(struct net_device *dev, struct can_frame *cf, + enum can_state tx_state, enum can_state rx_state) +{ + struct can_priv *priv = netdev_priv(dev); + enum can_state new_state = max(tx_state, rx_state); + + if (unlikely(new_state == priv->state)) { + netdev_warn(dev, "%s: oops, state did not change", __func__); + return; + } + + netdev_dbg(dev, "New error state: %d\n", new_state); + + can_update_state_error_stats(dev, new_state); + priv->state = new_state; + + if (unlikely(new_state == CAN_STATE_BUS_OFF)) { + cf->can_id |= CAN_ERR_BUSOFF; + return; + } + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= tx_state >= rx_state ? + can_tx_state_to_frame(dev, tx_state) : 0; + cf->data[1] |= tx_state <= rx_state ? + can_rx_state_to_frame(dev, rx_state) : 0; +} +EXPORT_SYMBOL_GPL(can_change_state); + /* * Local echo of CAN messages * @@ -382,7 +460,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx) BUG_ON(idx >= priv->echo_skb_max); if (priv->echo_skb[idx]) { - kfree_skb(priv->echo_skb[idx]); + dev_kfree_skb_any(priv->echo_skb[idx]); priv->echo_skb[idx] = NULL; } } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 60f86bd0434a..dde05486bc99 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -577,98 +577,30 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) return 1; } -static void do_state(struct net_device *dev, - struct can_frame *cf, enum can_state new_state) -{ - struct flexcan_priv *priv = netdev_priv(dev); - struct can_berr_counter bec; - - __flexcan_get_berr_counter(dev, &bec); - - switch (priv->can.state) { - case CAN_STATE_ERROR_ACTIVE: - /* - * from: ERROR_ACTIVE - * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF - * => : there was a warning int - */ - if (new_state >= CAN_STATE_ERROR_WARNING && - new_state <= CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "Error Warning IRQ\n"); - priv->can.can_stats.error_warning++; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - } - case CAN_STATE_ERROR_WARNING: /* fallthrough */ - /* - * from: ERROR_ACTIVE, ERROR_WARNING - * to : ERROR_PASSIVE, BUS_OFF - * => : error passive int - */ - if (new_state >= CAN_STATE_ERROR_PASSIVE && - new_state <= CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "Error Passive IRQ\n"); - priv->can.can_stats.error_passive++; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_PASSIVE : - CAN_ERR_CRTL_RX_PASSIVE; - } - break; - case CAN_STATE_BUS_OFF: - netdev_err(dev, "BUG! " - "hardware recovered automatically from BUS_OFF\n"); - break; - default: - break; - } - - /* process state changes depending on the new state */ - switch (new_state) { - case CAN_STATE_ERROR_WARNING: - netdev_dbg(dev, "Error Warning\n"); - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - break; - case CAN_STATE_ERROR_ACTIVE: - netdev_dbg(dev, "Error Active\n"); - cf->can_id |= CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_ACTIVE; - break; - case CAN_STATE_BUS_OFF: - cf->can_id |= CAN_ERR_BUSOFF; - can_bus_off(dev); - break; - default: - break; - } -} - static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; - enum can_state new_state; + enum can_state new_state = 0, rx_state = 0, tx_state = 0; int flt; + struct can_berr_counter bec; flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { - if (likely(!(reg_esr & (FLEXCAN_ESR_TX_WRN | - FLEXCAN_ESR_RX_WRN)))) - new_state = CAN_STATE_ERROR_ACTIVE; - else - new_state = CAN_STATE_ERROR_WARNING; - } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) + tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ? + CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; + rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? + CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; + new_state = max(tx_state, rx_state); + } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) { + __flexcan_get_berr_counter(dev, &bec); new_state = CAN_STATE_ERROR_PASSIVE; - else + rx_state = bec.rxerr >= bec.txerr ? new_state : 0; + tx_state = bec.rxerr <= bec.txerr ? new_state : 0; + } else { new_state = CAN_STATE_BUS_OFF; + } /* state hasn't changed */ if (likely(new_state == priv->can.state)) @@ -678,8 +610,11 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) if (unlikely(!skb)) return 0; - do_state(dev, cf, new_state); - priv->can.state = new_state; + can_change_state(dev, cf, tx_state, rx_state); + + if (unlikely(new_state == CAN_STATE_BUS_OFF)) + can_bus_off(dev); + netif_receive_skb(skb); dev->stats.rx_packets++; diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig index fca5482c09ac..04f20dd39007 100644 --- a/drivers/net/can/m_can/Kconfig +++ b/drivers/net/can/m_can/Kconfig @@ -1,4 +1,5 @@ config CAN_M_CAN + depends on HAS_IOMEM tristate "Bosch M_CAN devices" ---help--- Say Y here if you want to support for Bosch M_CAN controller. diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 10d571eaed85..d7bc462aafdc 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -105,14 +105,36 @@ enum m_can_mram_cfg { MRAM_CFG_NUM, }; +/* Fast Bit Timing & Prescaler Register (FBTP) */ +#define FBTR_FBRP_MASK 0x1f +#define FBTR_FBRP_SHIFT 16 +#define FBTR_FTSEG1_SHIFT 8 +#define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT) +#define FBTR_FTSEG2_SHIFT 4 +#define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT) +#define FBTR_FSJW_SHIFT 0 +#define FBTR_FSJW_MASK 0x3 + /* Test Register (TEST) */ #define TEST_LBCK BIT(4) /* CC Control Register(CCCR) */ -#define CCCR_TEST BIT(7) -#define CCCR_MON BIT(5) -#define CCCR_CCE BIT(1) -#define CCCR_INIT BIT(0) +#define CCCR_TEST BIT(7) +#define CCCR_CMR_MASK 0x3 +#define CCCR_CMR_SHIFT 10 +#define CCCR_CMR_CANFD 0x1 +#define CCCR_CMR_CANFD_BRS 0x2 +#define CCCR_CMR_CAN 0x3 +#define CCCR_CME_MASK 0x3 +#define CCCR_CME_SHIFT 8 +#define CCCR_CME_CAN 0 +#define CCCR_CME_CANFD 0x1 +#define CCCR_CME_CANFD_BRS 0x2 +#define CCCR_TEST BIT(7) +#define CCCR_MON BIT(5) +#define CCCR_CCE BIT(1) +#define CCCR_INIT BIT(0) +#define CCCR_CANFD 0x10 /* Bit Timing & Prescaler Register (BTP) */ #define BTR_BRP_MASK 0x3ff @@ -204,6 +226,7 @@ enum m_can_mram_cfg { /* Rx Buffer / FIFO Element Size Configuration (RXESC) */ #define M_CAN_RXESC_8BYTES 0x0 +#define M_CAN_RXESC_64BYTES 0x777 /* Tx Buffer Configuration(TXBC) */ #define TXBC_NDTB_OFF 16 @@ -211,6 +234,7 @@ enum m_can_mram_cfg { /* Tx Buffer Element Size Configuration(TXESC) */ #define TXESC_TBDS_8BYTES 0x0 +#define TXESC_TBDS_64BYTES 0x7 /* Tx Event FIFO Con.guration (TXEFC) */ #define TXEFC_EFS_OFF 16 @@ -219,11 +243,11 @@ enum m_can_mram_cfg { /* Message RAM Configuration (in bytes) */ #define SIDF_ELEMENT_SIZE 4 #define XIDF_ELEMENT_SIZE 8 -#define RXF0_ELEMENT_SIZE 16 -#define RXF1_ELEMENT_SIZE 16 +#define RXF0_ELEMENT_SIZE 72 +#define RXF1_ELEMENT_SIZE 72 #define RXB_ELEMENT_SIZE 16 #define TXE_ELEMENT_SIZE 8 -#define TXB_ELEMENT_SIZE 16 +#define TXB_ELEMENT_SIZE 72 /* Message RAM Elements */ #define M_CAN_FIFO_ID 0x0 @@ -231,11 +255,17 @@ enum m_can_mram_cfg { #define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) /* Rx Buffer Element */ +/* R0 */ #define RX_BUF_ESI BIT(31) #define RX_BUF_XTD BIT(30) #define RX_BUF_RTR BIT(29) +/* R1 */ +#define RX_BUF_ANMF BIT(31) +#define RX_BUF_EDL BIT(21) +#define RX_BUF_BRS BIT(20) /* Tx Buffer Element */ +/* R0 */ #define TX_BUF_XTD BIT(30) #define TX_BUF_RTR BIT(29) @@ -296,6 +326,7 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv, if (enable) { /* enable m_can configuration */ m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); + udelay(5); /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); } else { @@ -326,41 +357,67 @@ static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) m_can_write(priv, M_CAN_ILE, 0x0); } -static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf, - u32 rxfs) +static void m_can_read_fifo(struct net_device *dev, u32 rxfs) { + struct net_device_stats *stats = &dev->stats; struct m_can_priv *priv = netdev_priv(dev); - u32 id, fgi; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 id, fgi, dlc; + int i; /* calculate the fifo get index for where to read data */ fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; + dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); + if (dlc & RX_BUF_EDL) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + if (dlc & RX_BUF_EDL) + cf->len = can_dlc2len((dlc >> 16) & 0x0F); + else + cf->len = get_can_dlc((dlc >> 16) & 0x0F); + id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); if (id & RX_BUF_XTD) cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (id >> 18) & CAN_SFF_MASK; - if (id & RX_BUF_RTR) { + if (id & RX_BUF_ESI) { + cf->flags |= CANFD_ESI; + netdev_dbg(dev, "ESI Error\n"); + } + + if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) { cf->can_id |= CAN_RTR_FLAG; } else { - id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); - cf->can_dlc = get_can_dlc((id >> 16) & 0x0F); - *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi, - M_CAN_FIFO_DATA(0)); - *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi, - M_CAN_FIFO_DATA(1)); + if (dlc & RX_BUF_BRS) + cf->flags |= CANFD_BRS; + + for (i = 0; i < cf->len; i += 4) + *(u32 *)(cf->data + i) = + m_can_fifo_read(priv, fgi, + M_CAN_FIFO_DATA(i / 4)); } /* acknowledge rx fifo 0 */ m_can_write(priv, M_CAN_RXF0A, fgi); + + stats->rx_packets++; + stats->rx_bytes += cf->len; + + netif_receive_skb(skb); } static int m_can_do_rx_poll(struct net_device *dev, int quota) { struct m_can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; - struct can_frame *frame; u32 pkts = 0; u32 rxfs; @@ -374,18 +431,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) if (rxfs & RXFS_RFL) netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); - skb = alloc_can_skb(dev, &frame); - if (!skb) { - stats->rx_dropped++; - return pkts; - } - - m_can_read_fifo(dev, frame, rxfs); - - stats->rx_packets++; - stats->rx_bytes += frame->can_dlc; - - netif_receive_skb(skb); + m_can_read_fifo(dev, rxfs); quota--; pkts++; @@ -481,11 +527,23 @@ static int m_can_handle_lec_err(struct net_device *dev, return 1; } +static int __m_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct m_can_priv *priv = netdev_priv(dev); + unsigned int ecr; + + ecr = m_can_read(priv, M_CAN_ECR); + bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; + bec->txerr = ecr & ECR_TEC_MASK; + + return 0; +} + static int m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct m_can_priv *priv = netdev_priv(dev); - unsigned int ecr; int err; err = clk_prepare_enable(priv->hclk); @@ -498,9 +556,7 @@ static int m_can_get_berr_counter(const struct net_device *dev, return err; } - ecr = m_can_read(priv, M_CAN_ECR); - bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; - bec->txerr = ecr & ECR_TEC_MASK; + __m_can_get_berr_counter(dev, bec); clk_disable_unprepare(priv->cclk); clk_disable_unprepare(priv->hclk); @@ -544,7 +600,7 @@ static int m_can_handle_state_change(struct net_device *dev, if (unlikely(!skb)) return 0; - m_can_get_berr_counter(dev, &bec); + __m_can_get_berr_counter(dev, &bec); switch (new_state) { case CAN_STATE_ERROR_ACTIVE: @@ -596,14 +652,14 @@ static int m_can_handle_state_errors(struct net_device *dev, u32 psr) if ((psr & PSR_EP) && (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { - netdev_dbg(dev, "entered error warning state\n"); + netdev_dbg(dev, "entered error passive state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_ERROR_PASSIVE); } if ((psr & PSR_BO) && (priv->can.state != CAN_STATE_BUS_OFF)) { - netdev_dbg(dev, "entered error warning state\n"); + netdev_dbg(dev, "entered error bus off state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_BUS_OFF); } @@ -615,7 +671,7 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) { if (irqstatus & IR_WDI) netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); - if (irqstatus & IR_BEU) + if (irqstatus & IR_ELO) netdev_err(dev, "Error Logging Overflow\n"); if (irqstatus & IR_BEU) netdev_err(dev, "Bit Error Uncorrected\n"); @@ -733,10 +789,23 @@ static const struct can_bittiming_const m_can_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const m_can_data_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + static int m_can_set_bittiming(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; + const struct can_bittiming *dbt = &priv->can.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 reg_btp; @@ -747,7 +816,17 @@ static int m_can_set_bittiming(struct net_device *dev) reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); m_can_write(priv, M_CAN_BTP, reg_btp); - netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + brp = dbt->brp - 1; + sjw = dbt->sjw - 1; + tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; + tseg2 = dbt->phase_seg2 - 1; + reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) | + (tseg1 << FBTR_FTSEG1_SHIFT) | + (tseg2 << FBTR_FTSEG2_SHIFT); + m_can_write(priv, M_CAN_FBTP, reg_btp); + } return 0; } @@ -767,8 +846,8 @@ static void m_can_chip_config(struct net_device *dev) m_can_config_endisable(priv, true); - /* RX Buffer/FIFO Element Size 8 bytes data field */ - m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES); + /* RX Buffer/FIFO Element Size 64 bytes data field */ + m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES); /* Accept Non-matching Frames Into FIFO 0 */ m_can_write(priv, M_CAN_GFC, 0x0); @@ -777,8 +856,8 @@ static void m_can_chip_config(struct net_device *dev) m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | priv->mcfg[MRAM_TXB].off); - /* only support 8 bytes firstly */ - m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES); + /* support 64 bytes payload */ + m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES); m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | priv->mcfg[MRAM_TXE].off); @@ -793,7 +872,8 @@ static void m_can_chip_config(struct net_device *dev) RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); cccr = m_can_read(priv, M_CAN_CCCR); - cccr &= ~(CCCR_TEST | CCCR_MON); + cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | + (CCCR_CME_MASK << CCCR_CME_SHIFT)); test = m_can_read(priv, M_CAN_TEST); test &= ~TEST_LBCK; @@ -805,6 +885,9 @@ static void m_can_chip_config(struct net_device *dev) test |= TEST_LBCK; } + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; + m_can_write(priv, M_CAN_CCCR, cccr); m_can_write(priv, M_CAN_TEST, test); @@ -869,11 +952,13 @@ static struct net_device *alloc_m_can_dev(void) priv->dev = dev; priv->can.bittiming_const = &m_can_bittiming_const; + priv->can.data_bittiming_const = &m_can_data_bittiming_const; priv->can.do_set_mode = m_can_set_mode; priv->can.do_get_berr_counter = m_can_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_BERR_REPORTING; + CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD; return dev; } @@ -956,8 +1041,9 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); - struct can_frame *cf = (struct can_frame *)skb->data; - u32 id; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 id, cccr; + int i; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; @@ -976,11 +1062,28 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, /* message ram configuration */ m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0)); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4)); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16); + + for (i = 0; i < cf->len; i += 4) + m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4), + *(u32 *)(cf->data + i)); + can_put_echo_skb(skb, dev, 0); + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + cccr = m_can_read(priv, M_CAN_CCCR); + cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); + if (can_is_canfd_skb(skb)) { + if (cf->flags & CANFD_BRS) + cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT; + else + cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT; + } else { + cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; + } + m_can_write(priv, M_CAN_CCCR, cccr); + } + /* enable first TX buffer to start transfer */ m_can_write(priv, M_CAN_TXBTIE, 0x1); m_can_write(priv, M_CAN_TXBAR, 0x1); @@ -992,6 +1095,7 @@ static const struct net_device_ops m_can_netdev_ops = { .ndo_open = m_can_open, .ndo_stop = m_can_close, .ndo_start_xmit = m_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static int register_m_can_dev(struct net_device *dev) @@ -1009,7 +1113,7 @@ static int m_can_of_parse_mram(struct platform_device *pdev, struct resource *res; void __iomem *addr; u32 out_val[MRAM_CFG_LEN]; - int ret; + int i, start, end, ret; /* message ram could be shared */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); @@ -1060,6 +1164,15 @@ static int m_can_of_parse_mram(struct platform_device *pdev, priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); + /* initialize the entire Message RAM in use to avoid possible + * ECC/parity checksum errors when reading an uninitialized buffer + */ + start = priv->mcfg[MRAM_SIDF].off; + end = priv->mcfg[MRAM_TXB].off + + priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; + for (i = start; i < end; i += 4) + writel(0x0, priv->mram_base + i); + return 0; } diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index e0c9be5e2ab7..e36b7400d5cc 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -289,18 +289,15 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -/* This function returns the old state to see where we came from */ -static enum can_state check_set_state(struct net_device *dev, u8 canrflg) +static enum can_state get_new_state(struct net_device *dev, u8 canrflg) { struct mscan_priv *priv = netdev_priv(dev); - enum can_state state, old_state = priv->can.state; - if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) { - state = state_map[max(MSCAN_STATE_RX(canrflg), - MSCAN_STATE_TX(canrflg))]; - priv->can.state = state; - } - return old_state; + if (unlikely(canrflg & MSCAN_CSCIF)) + return state_map[max(MSCAN_STATE_RX(canrflg), + MSCAN_STATE_TX(canrflg))]; + + return priv->can.state; } static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) @@ -349,7 +346,7 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; - enum can_state old_state; + enum can_state new_state; netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg); frame->can_id = CAN_ERR_FLAG; @@ -363,27 +360,13 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, frame->data[1] = 0; } - old_state = check_set_state(dev, canrflg); - /* State changed */ - if (old_state != priv->can.state) { - switch (priv->can.state) { - case CAN_STATE_ERROR_WARNING: - frame->can_id |= CAN_ERR_CRTL; - priv->can.can_stats.error_warning++; - if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) < - (canrflg & MSCAN_RSTAT_MSK)) - frame->data[1] |= CAN_ERR_CRTL_RX_WARNING; - if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) < - (canrflg & MSCAN_TSTAT_MSK)) - frame->data[1] |= CAN_ERR_CRTL_TX_WARNING; - break; - case CAN_STATE_ERROR_PASSIVE: - frame->can_id |= CAN_ERR_CRTL; - priv->can.can_stats.error_passive++; - frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; - break; - case CAN_STATE_BUS_OFF: - frame->can_id |= CAN_ERR_BUSOFF; + new_state = get_new_state(dev, canrflg); + if (new_state != priv->can.state) { + can_change_state(dev, frame, + state_map[MSCAN_STATE_TX(canrflg)], + state_map[MSCAN_STATE_RX(canrflg)]); + + if (priv->can.state == CAN_STATE_BUS_OFF) { /* * The MSCAN on the MPC5200 does recover from bus-off * automatically. To avoid that we stop the chip doing @@ -396,9 +379,6 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, MSCAN_SLPRQ | MSCAN_INITRQ); } can_bus_off(dev); - break; - default: - break; } } priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index 1abe133d1594..9718248e55f1 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -628,6 +628,7 @@ static const struct net_device_ops rcar_can_netdev_ops = { .ndo_open = rcar_can_open, .ndo_stop = rcar_can_close, .ndo_start_xmit = rcar_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static void rcar_can_rx_pkt(struct rcar_can_priv *priv) diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c index 8ff3424d5147..15c00faeec61 100644 --- a/drivers/net/can/sja1000/kvaser_pci.c +++ b/drivers/net/can/sja1000/kvaser_pci.c @@ -214,7 +214,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, struct net_device *dev; struct sja1000_priv *priv; struct kvaser_pci *board; - int err, init_step; + int err; dev = alloc_sja1000dev(sizeof(struct kvaser_pci)); if (dev == NULL) @@ -235,7 +235,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, if (channel == 0) { board->xilinx_ver = ioread8(board->res_addr + XILINX_VERINT) >> 4; - init_step = 2; /* Assert PTADR# - we're in passive mode so the other bits are not important */ @@ -264,8 +263,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; - init_step = 4; - dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n", priv->reg_base, board->conf_addr, dev->irq); diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index b27ac6074afb..32bd7f451aa4 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -392,12 +392,20 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) struct can_frame *cf; struct sk_buff *skb; enum can_state state = priv->can.state; + enum can_state rx_state, tx_state; + unsigned int rxerr, txerr; uint8_t ecc, alc; skb = alloc_can_err_skb(dev, &cf); if (skb == NULL) return -ENOMEM; + txerr = priv->read_reg(priv, SJA1000_TXERR); + rxerr = priv->read_reg(priv, SJA1000_RXERR); + + cf->data[6] = txerr; + cf->data[7] = rxerr; + if (isrc & IRQ_DOI) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); @@ -412,13 +420,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) /* error warning interrupt */ netdev_dbg(dev, "error warning interrupt\n"); - if (status & SR_BS) { + if (status & SR_BS) state = CAN_STATE_BUS_OFF; - cf->can_id |= CAN_ERR_BUSOFF; - can_bus_off(dev); - } else if (status & SR_ES) { + else if (status & SR_ES) state = CAN_STATE_ERROR_WARNING; - } else + else state = CAN_STATE_ERROR_ACTIVE; } if (isrc & IRQ_BEI) { @@ -452,10 +458,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) if (isrc & IRQ_EPI) { /* error passive interrupt */ netdev_dbg(dev, "error passive interrupt\n"); - if (status & SR_ES) - state = CAN_STATE_ERROR_PASSIVE; + + if (state == CAN_STATE_ERROR_PASSIVE) + state = CAN_STATE_ERROR_WARNING; else - state = CAN_STATE_ERROR_ACTIVE; + state = CAN_STATE_ERROR_PASSIVE; } if (isrc & IRQ_ALI) { /* arbitration lost interrupt */ @@ -467,27 +474,15 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) cf->data[0] = alc & 0x1f; } - if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING || - state == CAN_STATE_ERROR_PASSIVE)) { - uint8_t rxerr = priv->read_reg(priv, SJA1000_RXERR); - uint8_t txerr = priv->read_reg(priv, SJA1000_TXERR); - cf->can_id |= CAN_ERR_CRTL; - if (state == CAN_STATE_ERROR_WARNING) { - priv->can.can_stats.error_warning++; - cf->data[1] = (txerr > rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - } else { - priv->can.can_stats.error_passive++; - cf->data[1] = (txerr > rxerr) ? - CAN_ERR_CRTL_TX_PASSIVE : - CAN_ERR_CRTL_RX_PASSIVE; - } - cf->data[6] = txerr; - cf->data[7] = rxerr; - } + if (state != priv->can.state) { + tx_state = txerr >= rxerr ? state : 0; + rx_state = txerr <= rxerr ? state : 0; - priv->can.state = state; + can_change_state(dev, cf, tx_state, rx_state); + + if(state == CAN_STATE_BUS_OFF) + can_bus_off(dev); + } netif_rx(skb); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index acb5b92ace92..c837eb91d43e 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -56,9 +56,6 @@ #include <linux/can.h> #include <linux/can/skb.h> -static __initconst const char banner[] = - KERN_INFO "slcan: serial line CAN interface driver\n"; - MODULE_ALIAS_LDISC(N_SLCAN); MODULE_DESCRIPTION("serial line CAN interface"); MODULE_LICENSE("GPL"); @@ -702,8 +699,8 @@ static int __init slcan_init(void) if (maxdev < 4) maxdev = 4; /* Sanity */ - printk(banner); - printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev); + pr_info("slcan: serial line CAN interface driver\n"); + pr_info("slcan: %d dynamic interface channels.\n", maxdev); slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL); if (!slcan_devs) diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 00f2534dde73..29d3f0938eb8 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -434,10 +434,9 @@ static void ems_usb_read_bulk_callback(struct urb *urb) if (urb->actual_length > CPC_HEADER_SIZE) { struct ems_cpc_msg *msg; u8 *ibuf = urb->transfer_buffer; - u8 msg_count, again, start; + u8 msg_count, start; msg_count = ibuf[0] & ~0x80; - again = ibuf[0] & 0x80; start = CPC_HEADER_SIZE; diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index b7c9e8b11460..c063a54ab8dd 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -464,7 +464,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb) { struct esd_tx_urb_context *context = urb->context; struct esd_usb2_net_priv *priv; - struct esd_usb2 *dev; struct net_device *netdev; size_t size = sizeof(struct esd_usb2_msg); @@ -472,7 +471,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb) priv = context->priv; netdev = priv->netdev; - dev = priv->usb2; /* free up our allocated buffer */ usb_free_coherent(urb->dev, size, @@ -1143,6 +1141,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf) } } unlink_all_urbs(dev); + kfree(dev); } } diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 04b0f84612f0..009acc8641fc 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -718,6 +718,7 @@ static const struct net_device_ops gs_usb_netdev_ops = { .ndo_open = gs_can_open, .ndo_stop = gs_can_close, .ndo_start_xmit = gs_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf) diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 4e94057ef5cf..674f367087c5 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -50,9 +50,6 @@ #include <linux/slab.h> #include <net/rtnetlink.h> -static __initconst const char banner[] = - KERN_INFO "vcan: Virtual CAN interface driver\n"; - MODULE_DESCRIPTION("virtual CAN interface"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>"); @@ -173,7 +170,7 @@ static struct rtnl_link_ops vcan_link_ops __read_mostly = { static __init int vcan_init_module(void) { - printk(banner); + pr_info("vcan: Virtual CAN interface driver\n"); if (echo) printk(KERN_INFO "vcan: enabled echo on driver level.\n"); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 5e8b5609c067..8a998e3884ce 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -300,7 +300,8 @@ static int xcan_set_bittiming(struct net_device *ndev) static int xcan_chip_start(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); - u32 err, reg_msr, reg_sr_mask; + u32 reg_msr, reg_sr_mask; + int err; unsigned long timeout; /* Check if it is in reset mode */ @@ -961,6 +962,7 @@ static const struct net_device_ops xcan_netdev_ops = { .ndo_open = xcan_open, .ndo_stop = xcan_close, .ndo_start_xmit = xcan_start_xmit, + .ndo_change_mtu = can_change_mtu, }; /** diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 9234d808cbb3..7cf8f4ac281f 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -37,13 +37,22 @@ config NET_DSA_MV88E6123_61_65 ethernet switch chips. config NET_DSA_MV88E6171 - tristate "Marvell 88E6171 ethernet switch chip support" + tristate "Marvell 88E6171/6172 ethernet switch chip support" select NET_DSA select NET_DSA_MV88E6XXX select NET_DSA_TAG_EDSA ---help--- - This enables support for the Marvell 88E6171 ethernet switch - chip. + This enables support for the Marvell 88E6171/6172 ethernet switch + chips. + +config NET_DSA_MV88E6352 + tristate "Marvell 88E6176/88E6352 ethernet switch chip support" + select NET_DSA + select NET_DSA_MV88E6XXX + select NET_DSA_TAG_EDSA + ---help--- + This enables support for the Marvell 88E6176 and 88E6352 ethernet + switch chips. config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 23a90de9830e..e2d51c4b9382 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -7,6 +7,9 @@ endif ifdef CONFIG_NET_DSA_MV88E6131 mv88e6xxx_drv-y += mv88e6131.o endif +ifdef CONFIG_NET_DSA_MV88E6352 +mv88e6xxx_drv-y += mv88e6352.o +endif ifdef CONFIG_NET_DSA_MV88E6171 mv88e6xxx_drv-y += mv88e6171.o endif diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index b9625968daac..4f4c2a7888e5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) return IRQ_HANDLED; } +static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_WATCHDOG_CTRL); + reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; + core_writel(priv, reg, CORE_WATCHDOG_CTRL); + + do { + reg = core_readl(priv, CORE_WATCHDOG_CTRL); + if (!(reg & SOFTWARE_RESET)) + break; + + usleep_range(1000, 2000); + } while (timeout-- > 0); + + if (timeout == 0) + return -ETIMEDOUT; + + return 0; +} + static int bcm_sf2_sw_setup(struct dsa_switch *ds) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; @@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) *base = of_iomap(dn, i); if (*base == NULL) { pr_err("unable to find register: %s\n", reg_names[i]); - return -ENODEV; + ret = -ENOMEM; + goto out_unmap; } base++; } + ret = bcm_sf2_sw_rst(priv); + if (ret) { + pr_err("unable to software reset switch: %d\n", ret); + goto out_unmap; + } + /* Disable all interrupts and request them */ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); @@ -484,7 +514,8 @@ out_free_irq0: out_unmap: base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - iounmap(*base); + if (*base) + iounmap(*base); base++; } return ret; @@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) return 0; } -static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) -{ - unsigned int timeout = 1000; - u32 reg; - - reg = core_readl(priv, CORE_WATCHDOG_CTRL); - reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; - core_writel(priv, reg, CORE_WATCHDOG_CTRL); - - do { - reg = core_readl(priv, CORE_WATCHDOG_CTRL); - if (!(reg & SOFTWARE_RESET)) - break; - - usleep_range(1000, 2000); - } while (timeout-- > 0); - - if (timeout == 0) - return -ETIMEDOUT; - - return 0; -} - static int bcm_sf2_sw_resume(struct dsa_switch *ds) { struct bcm_sf2_priv *priv = ds_to_priv(ds); diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 05b0ca3bf71d..c29aebe1e62b 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -69,8 +69,11 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr) ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); if (ret >= 0) { - ret &= 0xfff0; if (ret == 0x0600) + return "Marvell 88E6060 (A0)"; + if (ret == 0x0601 || ret == 0x0602) + return "Marvell 88E6060 (B0)"; + if ((ret & 0xfff0) == 0x0600) return "Marvell 88E6060"; } diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c index a332c53ff955..e9c736e1cef3 100644 --- a/drivers/net/dsa/mv88e6123_61_65.c +++ b/drivers/net/dsa/mv88e6123_61_65.c @@ -299,6 +299,7 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds) mutex_init(&ps->smi_mutex); mutex_init(&ps->stats_mutex); + mutex_init(&ps->phy_mutex); ret = mv88e6123_61_65_switch_reset(ds); if (ret < 0) @@ -329,16 +330,28 @@ static int mv88e6123_61_65_port_to_phy_addr(int port) static int mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int addr = mv88e6123_61_65_port_to_phy_addr(port); - return mv88e6xxx_phy_read(ds, addr, regnum); + int ret; + + mutex_lock(&ps->phy_mutex); + ret = mv88e6xxx_phy_read(ds, addr, regnum); + mutex_unlock(&ps->phy_mutex); + return ret; } static int mv88e6123_61_65_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int addr = mv88e6123_61_65_port_to_phy_addr(port); - return mv88e6xxx_phy_write(ds, addr, regnum, val); + int ret; + + mutex_lock(&ps->phy_mutex); + ret = mv88e6xxx_phy_write(ds, addr, regnum, val); + mutex_unlock(&ps->phy_mutex); + return ret; } static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = { @@ -372,6 +385,9 @@ static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = { { "hist_256_511bytes", 4, 0x0b, }, { "hist_512_1023bytes", 4, 0x0c, }, { "hist_1024_max_bytes", 4, 0x0d, }, + { "sw_in_discards", 4, 0x110, }, + { "sw_in_filtered", 2, 0x112, }, + { "sw_out_filtered", 2, 0x113, }, }; static void @@ -406,6 +422,11 @@ struct dsa_switch_driver mv88e6123_61_65_switch_driver = { .get_strings = mv88e6123_61_65_get_strings, .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats, .get_sset_count = mv88e6123_61_65_get_sset_count, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6xxx_get_temp, +#endif + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, }; MODULE_ALIAS("platform:mv88e6123"); diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 244c735014fa..1230f52aa70e 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -21,6 +21,7 @@ #define ID_6085 0x04a0 #define ID_6095 0x0950 #define ID_6131 0x1060 +#define ID_6131_B2 0x1066 static char *mv88e6131_probe(struct device *host_dev, int sw_addr) { @@ -32,12 +33,15 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr) ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); if (ret >= 0) { - ret &= 0xfff0; - if (ret == ID_6085) + int ret_masked = ret & 0xfff0; + + if (ret_masked == ID_6085) return "Marvell 88E6085"; - if (ret == ID_6095) + if (ret_masked == ID_6095) return "Marvell 88E6095/88E6095F"; - if (ret == ID_6131) + if (ret == ID_6131_B2) + return "Marvell 88E6131 (B2)"; + if (ret_masked == ID_6131) return "Marvell 88E6131"; } diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 1020a7af67cf..aa33d16f2e22 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -1,4 +1,4 @@ -/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support +/* net/dsa/mv88e6171.c - Marvell 88e6171/8826172 switch chip support * Copyright (c) 2008-2009 Marvell Semiconductor * Copyright (c) 2014 Claudio Leite <leitec@staticky.com> * @@ -29,6 +29,8 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr) if (ret >= 0) { if ((ret & 0xfff0) == 0x1710) return "Marvell 88E6171"; + if ((ret & 0xfff0) == 0x1720) + return "Marvell 88E6172"; } return NULL; @@ -314,6 +316,8 @@ static int mv88e6171_setup(struct dsa_switch *ds) return ret; } + mutex_init(&ps->phy_mutex); + return 0; } @@ -327,18 +331,28 @@ static int mv88e6171_port_to_phy_addr(int port) static int mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int addr = mv88e6171_port_to_phy_addr(port); + int ret; - return mv88e6xxx_phy_read(ds, addr, regnum); + mutex_lock(&ps->phy_mutex); + ret = mv88e6xxx_phy_read(ds, addr, regnum); + mutex_unlock(&ps->phy_mutex); + return ret; } static int mv88e6171_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int addr = mv88e6171_port_to_phy_addr(port); + int ret; - return mv88e6xxx_phy_write(ds, addr, regnum, val); + mutex_lock(&ps->phy_mutex); + ret = mv88e6xxx_phy_write(ds, addr, regnum, val); + mutex_unlock(&ps->phy_mutex); + return ret; } static struct mv88e6xxx_hw_stat mv88e6171_hw_stats[] = { @@ -395,7 +409,7 @@ static int mv88e6171_get_sset_count(struct dsa_switch *ds) } struct dsa_switch_driver mv88e6171_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_DSA, + .tag_protocol = DSA_TAG_PROTO_EDSA, .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6171_probe, .setup = mv88e6171_setup, @@ -406,6 +420,12 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .get_strings = mv88e6171_get_strings, .get_ethtool_stats = mv88e6171_get_ethtool_stats, .get_sset_count = mv88e6171_get_sset_count, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6xxx_get_temp, +#endif + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, }; MODULE_ALIAS("platform:mv88e6171"); +MODULE_ALIAS("platform:mv88e6172"); diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c new file mode 100644 index 000000000000..258d9ef5ef25 --- /dev/null +++ b/drivers/net/dsa/mv88e6352.c @@ -0,0 +1,788 @@ +/* + * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support + * + * Copyright (c) 2014 Guenter Roeck + * + * Derived from mv88e6123_61_65.c + * Copyright (c) 2008-2009 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/delay.h> +#include <linux/jiffies.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <net/dsa.h> +#include "mv88e6xxx.h" + +static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask) +{ + unsigned long timeout = jiffies + HZ / 10; + + while (time_before(jiffies, timeout)) { + int ret; + + ret = REG_READ(REG_GLOBAL2, reg); + if (ret < 0) + return ret; + + if (!(ret & mask)) + return 0; + + usleep_range(1000, 2000); + } + return -ETIMEDOUT; +} + +static inline int mv88e6352_phy_wait(struct dsa_switch *ds) +{ + return mv88e6352_wait(ds, 0x18, 0x8000); +} + +static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds) +{ + return mv88e6352_wait(ds, 0x14, 0x0800); +} + +static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds) +{ + return mv88e6352_wait(ds, 0x14, 0x8000); +} + +static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum) +{ + int ret; + + REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); + + ret = mv88e6352_phy_wait(ds); + if (ret < 0) + return ret; + + return REG_READ(REG_GLOBAL2, 0x19); +} + +static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum, + u16 val) +{ + REG_WRITE(REG_GLOBAL2, 0x19, val); + REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); + + return mv88e6352_phy_wait(ds); +} + +static char *mv88e6352_probe(struct device *host_dev, int sw_addr) +{ + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); + int ret; + + if (bus == NULL) + return NULL; + + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); + if (ret >= 0) { + if ((ret & 0xfff0) == 0x1760) + return "Marvell 88E6176"; + if (ret == 0x3521) + return "Marvell 88E6352 (A0)"; + if (ret == 0x3522) + return "Marvell 88E6352 (A1)"; + if ((ret & 0xfff0) == 0x3520) + return "Marvell 88E6352"; + } + + return NULL; +} + +static int mv88e6352_switch_reset(struct dsa_switch *ds) +{ + unsigned long timeout; + int ret; + int i; + + /* Set all ports to the disabled state. */ + for (i = 0; i < 7; i++) { + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); + } + + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); + + /* Reset the switch. Keep PPU active (bit 14, undocumented). + * The PPU needs to be active to support indirect phy register + * accesses through global registers 0x18 and 0x19. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0xc000); + + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = REG_READ(REG_GLOBAL, 0x00); + if ((ret & 0x8800) == 0x8800) + break; + usleep_range(1000, 2000); + } + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + return 0; +} + +static int mv88e6352_setup_global(struct dsa_switch *ds) +{ + int ret; + int i; + + /* Discard packets with excessive collisions, + * mask all interrupt sources, enable PPU (bit 14, undocumented). + */ + REG_WRITE(REG_GLOBAL, 0x04, 0x6000); + + /* Set the default address aging time to 5 minutes, and + * enable address learn messages to be sent to all message + * ports. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); + + /* Configure the priority mapping registers. */ + ret = mv88e6xxx_config_prio(ds); + if (ret < 0) + return ret; + + /* Configure the upstream port, and configure the upstream + * port as the port to which ingress and egress monitor frames + * are to be sent. + */ + REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110)); + + /* Disable remote management for now, and set the switch's + * DSA device number. + */ + REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:2x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x02, 0xffff); + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:0x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); + + /* Disable the loopback filter, disable flow control + * messages, disable flood broadcast override, disable + * removing of provider tags, disable ATU age violation + * interrupts, disable tag flow control, force flow + * control priority to the highest, and send all special + * multicast frames to the CPU at the highest priority. + */ + REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); + + /* Program the DSA routing table. */ + for (i = 0; i < 32; i++) { + int nexthop = 0x1f; + + if (i != ds->index && i < ds->dst->pd->nr_chips) + nexthop = ds->pd->rtable[i] & 0x1f; + + REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); + } + + /* Clear all trunk masks. */ + for (i = 0; i < 8; i++) + REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f); + + /* Clear all trunk mappings. */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); + + /* Disable ingress rate limiting by resetting all ingress + * rate limit registers to their initial state. + */ + for (i = 0; i < 7; i++) + REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); + + /* Initialise cross-chip port VLAN table to reset defaults. */ + REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000); + + /* Clear the priority override table. */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8)); + + /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */ + + return 0; +} + +static int mv88e6352_setup_port(struct dsa_switch *ds, int p) +{ + int addr = REG_PORT(p); + u16 val; + + /* MAC Forcing register: don't force link, speed, duplex + * or flow control state to any particular values on physical + * ports, but force the CPU port and all DSA ports to 1000 Mb/s + * full duplex. + */ + if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) + REG_WRITE(addr, 0x01, 0x003e); + else + REG_WRITE(addr, 0x01, 0x0003); + + /* Do not limit the period of time that this port can be + * paused for by the remote end or the period of time that + * this port can pause the remote end. + */ + REG_WRITE(addr, 0x02, 0x0000); + + /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, + * disable Header mode, enable IGMP/MLD snooping, disable VLAN + * tunneling, determine priority by looking at 802.1p and IP + * priority fields (IP prio has precedence), and set STP state + * to Forwarding. + * + * If this is the CPU link, use DSA or EDSA tagging depending + * on which tagging mode was configured. + * + * If this is a link to another switch, use DSA tagging mode. + * + * If this is the upstream port for this switch, enable + * forwarding of unknown unicasts and multicasts. + */ + val = 0x0433; + if (dsa_is_cpu_port(ds, p)) { + if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) + val |= 0x3300; + else + val |= 0x0100; + } + if (ds->dsa_port_mask & (1 << p)) + val |= 0x0100; + if (p == dsa_upstream_port(ds)) + val |= 0x000c; + REG_WRITE(addr, 0x04, val); + + /* Port Control 1: disable trunking. Also, if this is the + * CPU port, enable learn messages to be sent to this port. + */ + REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); + + /* Port based VLAN map: give each port its own address + * database, allow the CPU port to talk to each of the 'real' + * ports, and allow each of the 'real' ports to only talk to + * the upstream port. + */ + val = (p & 0xf) << 12; + if (dsa_is_cpu_port(ds, p)) + val |= ds->phys_port_mask; + else + val |= 1 << dsa_upstream_port(ds); + REG_WRITE(addr, 0x06, val); + + /* Default VLAN ID and priority: don't set a default VLAN + * ID, and set the default packet priority to zero. + */ + REG_WRITE(addr, 0x07, 0x0000); + + /* Port Control 2: don't force a good FCS, set the maximum + * frame size to 10240 bytes, don't let the switch add or + * strip 802.1q tags, don't discard tagged or untagged frames + * on this port, do a destination address lookup on all + * received packets as usual, disable ARP mirroring and don't + * send a copy of all transmitted/received frames on this port + * to the CPU. + */ + REG_WRITE(addr, 0x08, 0x2080); + + /* Egress rate control: disable egress rate control. */ + REG_WRITE(addr, 0x09, 0x0001); + + /* Egress rate control 2: disable egress rate control. */ + REG_WRITE(addr, 0x0a, 0x0000); + + /* Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + REG_WRITE(addr, 0x0b, 1 << p); + + /* Port ATU control: disable limiting the number of address + * database entries that this port is allowed to use. + */ + REG_WRITE(addr, 0x0c, 0x0000); + + /* Priority Override: disable DA, SA and VTU priority override. */ + REG_WRITE(addr, 0x0d, 0x0000); + + /* Port Ethertype: use the Ethertype DSA Ethertype value. */ + REG_WRITE(addr, 0x0f, ETH_P_EDSA); + + /* Tag Remap: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x18, 0x3210); + + /* Tag Remap 2: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x19, 0x7654); + + return 0; +} + +#ifdef CONFIG_NET_DSA_HWMON + +static int mv88e6352_phy_page_read(struct dsa_switch *ds, + int port, int page, int reg) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->phy_mutex); + ret = __mv88e6352_phy_write(ds, port, 0x16, page); + if (ret < 0) + goto error; + ret = __mv88e6352_phy_read(ds, port, reg); +error: + __mv88e6352_phy_write(ds, port, 0x16, 0x0); + mutex_unlock(&ps->phy_mutex); + return ret; +} + +static int mv88e6352_phy_page_write(struct dsa_switch *ds, + int port, int page, int reg, int val) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->phy_mutex); + ret = __mv88e6352_phy_write(ds, port, 0x16, page); + if (ret < 0) + goto error; + + ret = __mv88e6352_phy_write(ds, port, reg, val); +error: + __mv88e6352_phy_write(ds, port, 0x16, 0x0); + mutex_unlock(&ps->phy_mutex); + return ret; +} + +static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp) +{ + int ret; + + *temp = 0; + + ret = mv88e6352_phy_page_read(ds, 0, 6, 27); + if (ret < 0) + return ret; + + *temp = (ret & 0xff) - 25; + + return 0; +} + +static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp) +{ + int ret; + + *temp = 0; + + ret = mv88e6352_phy_page_read(ds, 0, 6, 26); + if (ret < 0) + return ret; + + *temp = (((ret >> 8) & 0x1f) * 5) - 25; + + return 0; +} + +static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp) +{ + int ret; + + ret = mv88e6352_phy_page_read(ds, 0, 6, 26); + if (ret < 0) + return ret; + temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); + return mv88e6352_phy_page_write(ds, 0, 6, 26, + (ret & 0xe0ff) | (temp << 8)); +} + +static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm) +{ + int ret; + + *alarm = false; + + ret = mv88e6352_phy_page_read(ds, 0, 6, 26); + if (ret < 0) + return ret; + + *alarm = !!(ret & 0x40); + + return 0; +} +#endif /* CONFIG_NET_DSA_HWMON */ + +static int mv88e6352_setup(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + int i; + + mutex_init(&ps->smi_mutex); + mutex_init(&ps->stats_mutex); + mutex_init(&ps->phy_mutex); + mutex_init(&ps->eeprom_mutex); + + ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; + + ret = mv88e6352_switch_reset(ds); + if (ret < 0) + return ret; + + /* @@@ initialise vtu and atu */ + + ret = mv88e6352_setup_global(ds); + if (ret < 0) + return ret; + + for (i = 0; i < 7; i++) { + ret = mv88e6352_setup_port(ds, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6352_port_to_phy_addr(int port) +{ + if (port >= 0 && port <= 4) + return port; + return -EINVAL; +} + +static int +mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int addr = mv88e6352_port_to_phy_addr(port); + int ret; + + if (addr < 0) + return addr; + + mutex_lock(&ps->phy_mutex); + ret = __mv88e6352_phy_read(ds, addr, regnum); + mutex_unlock(&ps->phy_mutex); + + return ret; +} + +static int +mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int addr = mv88e6352_port_to_phy_addr(port); + int ret; + + if (addr < 0) + return addr; + + mutex_lock(&ps->phy_mutex); + ret = __mv88e6352_phy_write(ds, addr, regnum, val); + mutex_unlock(&ps->phy_mutex); + + return ret; +} + +static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = { + { "in_good_octets", 8, 0x00, }, + { "in_bad_octets", 4, 0x02, }, + { "in_unicast", 4, 0x04, }, + { "in_broadcasts", 4, 0x06, }, + { "in_multicasts", 4, 0x07, }, + { "in_pause", 4, 0x16, }, + { "in_undersize", 4, 0x18, }, + { "in_fragments", 4, 0x19, }, + { "in_oversize", 4, 0x1a, }, + { "in_jabber", 4, 0x1b, }, + { "in_rx_error", 4, 0x1c, }, + { "in_fcs_error", 4, 0x1d, }, + { "out_octets", 8, 0x0e, }, + { "out_unicast", 4, 0x10, }, + { "out_broadcasts", 4, 0x13, }, + { "out_multicasts", 4, 0x12, }, + { "out_pause", 4, 0x15, }, + { "excessive", 4, 0x11, }, + { "collisions", 4, 0x1e, }, + { "deferred", 4, 0x05, }, + { "single", 4, 0x14, }, + { "multiple", 4, 0x17, }, + { "out_fcs_error", 4, 0x03, }, + { "late", 4, 0x1f, }, + { "hist_64bytes", 4, 0x08, }, + { "hist_65_127bytes", 4, 0x09, }, + { "hist_128_255bytes", 4, 0x0a, }, + { "hist_256_511bytes", 4, 0x0b, }, + { "hist_512_1023bytes", 4, 0x0c, }, + { "hist_1024_max_bytes", 4, 0x0d, }, + { "sw_in_discards", 4, 0x110, }, + { "sw_in_filtered", 2, 0x112, }, + { "sw_out_filtered", 2, 0x113, }, +}; + +static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->eeprom_mutex); + + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14, + 0xc000 | (addr & 0xff)); + if (ret < 0) + goto error; + + ret = mv88e6352_eeprom_busy_wait(ds); + if (ret < 0) + goto error; + + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15); +error: + mutex_unlock(&ps->eeprom_mutex); + return ret; +} + +static int mv88e6352_get_eeprom(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int offset; + int len; + int ret; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + eeprom->magic = 0xc3ec4951; + + ret = mv88e6352_eeprom_load_wait(ds); + if (ret < 0) + return ret; + + if (offset & 1) { + int word; + + word = mv88e6352_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + *data++ = (word >> 8) & 0xff; + + offset++; + len--; + eeprom->len++; + } + + while (len >= 2) { + int word; + + word = mv88e6352_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + *data++ = word & 0xff; + *data++ = (word >> 8) & 0xff; + + offset += 2; + len -= 2; + eeprom->len += 2; + } + + if (len) { + int word; + + word = mv88e6352_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + *data++ = word & 0xff; + + offset++; + len--; + eeprom->len++; + } + + return 0; +} + +static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds) +{ + int ret; + + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14); + if (ret < 0) + return ret; + + if (!(ret & 0x0400)) + return -EROFS; + + return 0; +} + +static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr, + u16 data) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->eeprom_mutex); + + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data); + if (ret < 0) + goto error; + + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14, + 0xb000 | (addr & 0xff)); + if (ret < 0) + goto error; + + ret = mv88e6352_eeprom_busy_wait(ds); +error: + mutex_unlock(&ps->eeprom_mutex); + return ret; +} + +static int mv88e6352_set_eeprom(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int offset; + int ret; + int len; + + if (eeprom->magic != 0xc3ec4951) + return -EINVAL; + + ret = mv88e6352_eeprom_is_readonly(ds); + if (ret) + return ret; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + ret = mv88e6352_eeprom_load_wait(ds); + if (ret < 0) + return ret; + + if (offset & 1) { + int word; + + word = mv88e6352_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + word = (*data++ << 8) | (word & 0xff); + + ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); + if (ret < 0) + return ret; + + offset++; + len--; + eeprom->len++; + } + + while (len >= 2) { + int word; + + word = *data++; + word |= *data++ << 8; + + ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); + if (ret < 0) + return ret; + + offset += 2; + len -= 2; + eeprom->len += 2; + } + + if (len) { + int word; + + word = mv88e6352_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + word = (word & 0xff00) | *data++; + + ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); + if (ret < 0) + return ret; + + offset++; + len--; + eeprom->len++; + } + + return 0; +} + +static void +mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats), + mv88e6352_hw_stats, port, data); +} + +static void +mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats), + mv88e6352_hw_stats, port, data); +} + +static int mv88e6352_get_sset_count(struct dsa_switch *ds) +{ + return ARRAY_SIZE(mv88e6352_hw_stats); +} + +struct dsa_switch_driver mv88e6352_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_EDSA, + .priv_size = sizeof(struct mv88e6xxx_priv_state), + .probe = mv88e6352_probe, + .setup = mv88e6352_setup, + .set_addr = mv88e6xxx_set_addr_indirect, + .phy_read = mv88e6352_phy_read, + .phy_write = mv88e6352_phy_write, + .poll_link = mv88e6xxx_poll_link, + .get_strings = mv88e6352_get_strings, + .get_ethtool_stats = mv88e6352_get_ethtool_stats, + .get_sset_count = mv88e6352_get_sset_count, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6352_get_temp, + .get_temp_limit = mv88e6352_get_temp_limit, + .set_temp_limit = mv88e6352_set_temp_limit, + .get_temp_alarm = mv88e6352_get_temp_alarm, +#endif + .get_eeprom = mv88e6352_get_eeprom, + .set_eeprom = mv88e6352_set_eeprom, + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, +}; + +MODULE_ALIAS("platform:mv88e6352"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index a6c90cf5634d..cd6807c6b4ed 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -485,20 +485,108 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, for (i = 0; i < nr_stats; i++) { struct mv88e6xxx_hw_stat *s = stats + i; u32 low; - u32 high; - + u32 high = 0; + + if (s->reg >= 0x100) { + int ret; + + ret = mv88e6xxx_reg_read(ds, REG_PORT(port), + s->reg - 0x100); + if (ret < 0) + goto error; + low = ret; + if (s->sizeof_stat == 4) { + ret = mv88e6xxx_reg_read(ds, REG_PORT(port), + s->reg - 0x100 + 1); + if (ret < 0) + goto error; + high = ret; + } + data[i] = (((u64)high) << 16) | low; + continue; + } mv88e6xxx_stats_read(ds, s->reg, &low); if (s->sizeof_stat == 8) mv88e6xxx_stats_read(ds, s->reg + 1, &high); - else - high = 0; data[i] = (((u64)high) << 32) | low; } - +error: mutex_unlock(&ps->stats_mutex); } +int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) +{ + return 32 * sizeof(u16); +} + +void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, + struct ethtool_regs *regs, void *_p) +{ + u16 *p = _p; + int i; + + regs->version = 0; + + memset(p, 0xff, 32 * sizeof(u16)); + + for (i = 0; i < 32; i++) { + int ret; + + ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i); + if (ret >= 0) + p[i] = ret; + } +} + +#ifdef CONFIG_NET_DSA_HWMON + +int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + int val; + + *temp = 0; + + mutex_lock(&ps->phy_mutex); + + ret = mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6); + if (ret < 0) + goto error; + + /* Enable temperature sensor */ + ret = mv88e6xxx_phy_read(ds, 0x0, 0x1a); + if (ret < 0) + goto error; + + ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5)); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + val = mv88e6xxx_phy_read(ds, 0x0, 0x1a); + if (val < 0) { + ret = val; + goto error; + } + + /* Disable temperature sensor */ + ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5)); + if (ret < 0) + goto error; + + *temp = ((val & 0x1f) - 5) * 5; + +error: + mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0); + mutex_unlock(&ps->phy_mutex); + return ret; +} +#endif /* CONFIG_NET_DSA_HWMON */ + static int __init mv88e6xxx_init(void) { #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) @@ -507,6 +595,9 @@ static int __init mv88e6xxx_init(void) #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) register_switch_driver(&mv88e6123_61_65_switch_driver); #endif +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352) + register_switch_driver(&mv88e6352_switch_driver); +#endif #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) register_switch_driver(&mv88e6171_switch_driver); #endif diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 5e5145ad9525..03e397efde36 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -37,6 +37,17 @@ struct mv88e6xxx_priv_state { */ struct mutex stats_mutex; + /* This mutex serializes phy access for chips with + * indirect phy addressing. It is unused for chips + * with direct phy access. + */ + struct mutex phy_mutex; + + /* This mutex serializes eeprom access for chips with + * eeprom support. + */ + struct mutex eeprom_mutex; + int id; /* switch product id */ }; @@ -67,9 +78,14 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds, void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int nr_stats, struct mv88e6xxx_hw_stat *stats, int port, uint64_t *data); +int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port); +void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, + struct ethtool_regs *regs, void *_p); +int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); extern struct dsa_switch_driver mv88e6131_switch_driver; extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; +extern struct dsa_switch_driver mv88e6352_switch_driver; extern struct dsa_switch_driver mv88e6171_switch_driver; #define REG_READ(addr, reg) \ diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index ff435fbd1ad0..413ca4f73997 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -38,6 +38,9 @@ #include <net/rtnetlink.h> #include <linux/u64_stats_sync.h> +#define DRV_NAME "dummy" +#define DRV_VERSION "1.0" + static int numdummies = 1; /* fake multicast ability */ @@ -120,12 +123,24 @@ static const struct net_device_ops dummy_netdev_ops = { .ndo_change_carrier = dummy_change_carrier, }; +static void dummy_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static const struct ethtool_ops dummy_ethtool_ops = { + .get_drvinfo = dummy_get_drvinfo, +}; + static void dummy_setup(struct net_device *dev) { ether_setup(dev); /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; + dev->ethtool_ops = &dummy_ethtool_ops; dev->destructor = free_netdev; /* Fill in device structure with ethernet-generic values. */ @@ -150,7 +165,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[]) } static struct rtnl_link_ops dummy_link_ops __read_mostly = { - .kind = "dummy", + .kind = DRV_NAME, .setup = dummy_setup, .validate = dummy_validate, }; @@ -209,4 +224,4 @@ static void __exit dummy_cleanup_module(void) module_init(dummy_init_module); module_exit(dummy_cleanup_module); MODULE_LICENSE("GPL"); -MODULE_ALIAS_RTNL_LINK("dummy"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 48775b88bac7..dede43f4ce09 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -1285,7 +1285,7 @@ typhoon_request_firmware(struct typhoon *tp) return err; } - image_data = (u8 *) typhoon_fw->data; + image_data = typhoon_fw->data; remaining = typhoon_fw->size; if (remaining < sizeof(struct typhoon_file_header)) goto invalid_fw; @@ -1343,7 +1343,7 @@ typhoon_download_firmware(struct typhoon *tp) int i; int err; - image_data = (u8 *) typhoon_fw->data; + image_data = typhoon_fw->data; fHdr = (struct typhoon_file_header *) image_data; /* Cannot just map the firmware image using pci_map_single() as diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 1ed1fbba5d58..df76050d0a9d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -155,6 +155,7 @@ source "drivers/net/ethernet/qualcomm/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" source "drivers/net/ethernet/renesas/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" +source "drivers/net/ethernet/rocker/Kconfig" config S6GMAC tristate "S6105 GMAC ethernet support" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 6e0b629e9859..bf56f8b36e90 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_SH_ETH) += renesas/ obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ +obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/ obj-$(CONFIG_S6GMAC) += s6gmac.o obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 8319c99331b0..7a5e4aa5415e 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -179,7 +179,7 @@ config SUNLANCE config AMD_XGBE tristate "AMD 10GbE Ethernet driver" - depends on OF_NET + depends on OF_NET && HAS_IOMEM select PHYLIB select AMD_XGBE_PHY select BITREVERSE diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index caade30820d5..75b08c63d39f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -125,9 +125,6 @@ #define DMA_AXIAWCR 0x3018 #define DMA_DSR0 0x3020 #define DMA_DSR1 0x3024 -#define DMA_DSR2 0x3028 -#define DMA_DSR3 0x302c -#define DMA_DSR4 0x3030 /* DMA register entry bit positions and sizes */ #define DMA_AXIARCR_DRC_INDEX 0 @@ -158,10 +155,6 @@ #define DMA_AXIAWCR_TDC_WIDTH 4 #define DMA_AXIAWCR_TDD_INDEX 28 #define DMA_AXIAWCR_TDD_WIDTH 2 -#define DMA_DSR0_RPS_INDEX 8 -#define DMA_DSR0_RPS_WIDTH 4 -#define DMA_DSR0_TPS_INDEX 12 -#define DMA_DSR0_TPS_WIDTH 4 #define DMA_ISR_MACIS_INDEX 17 #define DMA_ISR_MACIS_WIDTH 1 #define DMA_ISR_MTLIS_INDEX 16 @@ -175,6 +168,20 @@ #define DMA_SBMR_UNDEF_INDEX 0 #define DMA_SBMR_UNDEF_WIDTH 1 +/* DMA register values */ +#define DMA_DSR_RPS_WIDTH 4 +#define DMA_DSR_TPS_WIDTH 4 +#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH) +#define DMA_DSR0_RPS_START 8 +#define DMA_DSR0_TPS_START 12 +#define DMA_DSRX_FIRST_QUEUE 3 +#define DMA_DSRX_INC 4 +#define DMA_DSRX_QPR 4 +#define DMA_DSRX_RPS_START 0 +#define DMA_DSRX_TPS_START 4 +#define DMA_TPS_STOPPED 0x00 +#define DMA_TPS_SUSPENDED 0x06 + /* DMA channel register offsets * Multiple channels can be active. The first channel has registers * that begin at 0x3100. Each subsequent channel has registers that @@ -207,6 +214,8 @@ /* DMA channel register entry bit positions and sizes */ #define DMA_CH_CR_PBLX8_INDEX 16 #define DMA_CH_CR_PBLX8_WIDTH 1 +#define DMA_CH_CR_SPH_INDEX 24 +#define DMA_CH_CR_SPH_WIDTH 1 #define DMA_CH_IER_AIE_INDEX 15 #define DMA_CH_IER_AIE_WIDTH 1 #define DMA_CH_IER_FBEE_INDEX 12 @@ -306,6 +315,9 @@ #define MAC_MACA0LR 0x0304 #define MAC_MACA1HR 0x0308 #define MAC_MACA1LR 0x030c +#define MAC_RSSCR 0x0c80 +#define MAC_RSSAR 0x0c88 +#define MAC_RSSDR 0x0c8c #define MAC_TSCR 0x0d00 #define MAC_SSIR 0x0d04 #define MAC_STSR 0x0d08 @@ -429,6 +441,8 @@ #define MAC_RCR_CST_WIDTH 1 #define MAC_RCR_DCRCC_INDEX 3 #define MAC_RCR_DCRCC_WIDTH 1 +#define MAC_RCR_HDSMS_INDEX 12 +#define MAC_RCR_HDSMS_WIDTH 3 #define MAC_RCR_IPC_INDEX 9 #define MAC_RCR_IPC_WIDTH 1 #define MAC_RCR_JE_INDEX 8 @@ -445,6 +459,24 @@ #define MAC_RFCR_UP_WIDTH 1 #define MAC_RQC0R_RXQ0EN_INDEX 0 #define MAC_RQC0R_RXQ0EN_WIDTH 2 +#define MAC_RSSAR_ADDRT_INDEX 2 +#define MAC_RSSAR_ADDRT_WIDTH 1 +#define MAC_RSSAR_CT_INDEX 1 +#define MAC_RSSAR_CT_WIDTH 1 +#define MAC_RSSAR_OB_INDEX 0 +#define MAC_RSSAR_OB_WIDTH 1 +#define MAC_RSSAR_RSSIA_INDEX 8 +#define MAC_RSSAR_RSSIA_WIDTH 8 +#define MAC_RSSCR_IP2TE_INDEX 1 +#define MAC_RSSCR_IP2TE_WIDTH 1 +#define MAC_RSSCR_RSSE_INDEX 0 +#define MAC_RSSCR_RSSE_WIDTH 1 +#define MAC_RSSCR_TCP4TE_INDEX 2 +#define MAC_RSSCR_TCP4TE_WIDTH 1 +#define MAC_RSSCR_UDP4TE_INDEX 3 +#define MAC_RSSCR_UDP4TE_WIDTH 1 +#define MAC_RSSDR_DMCH_INDEX 0 +#define MAC_RSSDR_DMCH_WIDTH 4 #define MAC_SSIR_SNSINC_INDEX 8 #define MAC_SSIR_SNSINC_WIDTH 8 #define MAC_SSIR_SSINC_INDEX 16 @@ -844,9 +876,13 @@ #define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 #define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_WIDTH 16 +#define RX_NORMAL_DESC2_HL_INDEX 0 +#define RX_NORMAL_DESC2_HL_WIDTH 10 #define RX_NORMAL_DESC3_CDA_INDEX 27 #define RX_NORMAL_DESC3_CDA_WIDTH 1 #define RX_NORMAL_DESC3_CTXT_INDEX 30 @@ -855,14 +891,27 @@ #define RX_NORMAL_DESC3_ES_WIDTH 1 #define RX_NORMAL_DESC3_ETLT_INDEX 16 #define RX_NORMAL_DESC3_ETLT_WIDTH 4 +#define RX_NORMAL_DESC3_FD_INDEX 29 +#define RX_NORMAL_DESC3_FD_WIDTH 1 #define RX_NORMAL_DESC3_INTE_INDEX 30 #define RX_NORMAL_DESC3_INTE_WIDTH 1 +#define RX_NORMAL_DESC3_L34T_INDEX 20 +#define RX_NORMAL_DESC3_L34T_WIDTH 4 #define RX_NORMAL_DESC3_LD_INDEX 28 #define RX_NORMAL_DESC3_LD_WIDTH 1 #define RX_NORMAL_DESC3_OWN_INDEX 31 #define RX_NORMAL_DESC3_OWN_WIDTH 1 #define RX_NORMAL_DESC3_PL_INDEX 0 #define RX_NORMAL_DESC3_PL_WIDTH 14 +#define RX_NORMAL_DESC3_RSV_INDEX 26 +#define RX_NORMAL_DESC3_RSV_WIDTH 1 + +#define RX_DESC3_L34T_IPV4_TCP 1 +#define RX_DESC3_L34T_IPV4_UDP 2 +#define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV6_TCP 9 +#define RX_DESC3_L34T_IPV6_UDP 10 +#define RX_DESC3_L34T_IPV6_ICMP 11 #define RX_CONTEXT_DESC3_TSA_INDEX 4 #define RX_CONTEXT_DESC3_TSA_WIDTH 1 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 6fc5da01437d..51b68d1299fe 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -117,7 +117,7 @@ #include "xgbe.h" #include "xgbe-common.h" -static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *); +static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *); static void xgbe_free_ring(struct xgbe_prv_data *pdata, struct xgbe_ring *ring) @@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata, if (ring->rdata) { for (i = 0; i < ring->rdesc_count; i++) { rdata = XGBE_GET_DESC_DATA(ring, i); - xgbe_unmap_skb(pdata, rdata); + xgbe_unmap_rdata(pdata, rdata); } kfree(ring->rdata); ring->rdata = NULL; } + if (ring->rx_hdr_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, + ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_hdr_pa.pages); + + ring->rx_hdr_pa.pages = NULL; + ring->rx_hdr_pa.pages_len = 0; + ring->rx_hdr_pa.pages_offset = 0; + ring->rx_hdr_pa.pages_dma = 0; + } + + if (ring->rx_buf_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma, + ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_buf_pa.pages); + + ring->rx_buf_pa.pages = NULL; + ring->rx_buf_pa.pages_len = 0; + ring->rx_buf_pa.pages_offset = 0; + ring->rx_buf_pa.pages_dma = 0; + } + if (ring->rdesc) { dma_free_coherent(pdata->dev, (sizeof(struct xgbe_ring_desc) * @@ -233,6 +255,96 @@ err_ring: return ret; } +static int xgbe_alloc_pages(struct xgbe_prv_data *pdata, + struct xgbe_page_alloc *pa, gfp_t gfp, int order) +{ + struct page *pages = NULL; + dma_addr_t pages_dma; + int ret; + + /* Try to obtain pages, decreasing order if necessary */ + gfp |= __GFP_COLD | __GFP_COMP; + while (order >= 0) { + pages = alloc_pages(gfp, order); + if (pages) + break; + + order--; + } + if (!pages) + return -ENOMEM; + + /* Map the pages */ + pages_dma = dma_map_page(pdata->dev, pages, 0, + PAGE_SIZE << order, DMA_FROM_DEVICE); + ret = dma_mapping_error(pdata->dev, pages_dma); + if (ret) { + put_page(pages); + return ret; + } + + pa->pages = pages; + pa->pages_len = PAGE_SIZE << order; + pa->pages_offset = 0; + pa->pages_dma = pages_dma; + + return 0; +} + +static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd, + struct xgbe_page_alloc *pa, + unsigned int len) +{ + get_page(pa->pages); + bd->pa = *pa; + + bd->dma = pa->pages_dma + pa->pages_offset; + bd->dma_len = len; + + pa->pages_offset += len; + if ((pa->pages_offset + len) > pa->pages_len) { + /* This data descriptor is responsible for unmapping page(s) */ + bd->pa_unmap = *pa; + + /* Get a new allocation next time */ + pa->pages = NULL; + pa->pages_len = 0; + pa->pages_offset = 0; + pa->pages_dma = 0; + } +} + +static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, + struct xgbe_ring *ring, + struct xgbe_ring_data *rdata) +{ + int order, ret; + + if (!ring->rx_hdr_pa.pages) { + ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); + if (ret) + return ret; + } + + if (!ring->rx_buf_pa.pages) { + order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); + ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, + order); + if (ret) + return ret; + } + + /* Set up the header page info */ + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, + XGBE_SKB_ALLOC_SIZE); + + /* Set up the buffer page info */ + xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa, + pdata->rx_buf_size); + + return 0; +} + static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; @@ -266,7 +378,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) ring->cur = 0; ring->dirty = 0; - ring->tx.queue_stopped = 0; + memset(&ring->tx, 0, sizeof(ring->tx)); hw_if->tx_desc_init(channel); } @@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) struct xgbe_ring *ring; struct xgbe_ring_desc *rdesc; struct xgbe_ring_data *rdata; - dma_addr_t rdesc_dma, skb_dma; - struct sk_buff *skb = NULL; + dma_addr_t rdesc_dma; unsigned int i, j; DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); @@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) rdata->rdesc = rdesc; rdata->rdesc_dma = rdesc_dma; - /* Allocate skb & assign to each rdesc */ - skb = dev_alloc_skb(pdata->rx_buf_size); - if (skb == NULL) - break; - skb_dma = dma_map_single(pdata->dev, skb->data, - pdata->rx_buf_size, - DMA_FROM_DEVICE); - if (dma_mapping_error(pdata->dev, skb_dma)) { - netdev_alert(pdata->netdev, - "failed to do the dma map\n"); - dev_kfree_skb_any(skb); + if (xgbe_map_rx_buffer(pdata, ring, rdata)) break; - } - rdata->skb = skb; - rdata->skb_dma = skb_dma; - rdata->skb_dma_len = pdata->rx_buf_size; rdesc++; rdesc_dma += sizeof(struct xgbe_ring_desc); @@ -325,8 +422,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) ring->cur = 0; ring->dirty = 0; - ring->rx.realloc_index = 0; - ring->rx.realloc_threshold = 0; + memset(&ring->rx, 0, sizeof(ring->rx)); hw_if->rx_desc_init(channel); } @@ -334,8 +430,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) DBGPR("<--xgbe_wrapper_rx_descriptor_init\n"); } -static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, - struct xgbe_ring_data *rdata) +static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata, + struct xgbe_ring_data *rdata) { if (rdata->skb_dma) { if (rdata->mapped_as_page) { @@ -354,8 +450,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, rdata->skb = NULL; } - rdata->tso_header = 0; - rdata->len = 0; + if (rdata->rx.hdr.pa.pages) + put_page(rdata->rx.hdr.pa.pages); + + if (rdata->rx.hdr.pa_unmap.pages) { + dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma, + rdata->rx.hdr.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(rdata->rx.hdr.pa_unmap.pages); + } + + if (rdata->rx.buf.pa.pages) + put_page(rdata->rx.buf.pa.pages); + + if (rdata->rx.buf.pa_unmap.pages) { + dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma, + rdata->rx.buf.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(rdata->rx.buf.pa_unmap.pages); + } + + memset(&rdata->tx, 0, sizeof(rdata->tx)); + memset(&rdata->rx, 0, sizeof(rdata->rx)); + rdata->interrupt = 0; rdata->mapped_as_page = 0; @@ -414,7 +531,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) } rdata->skb_dma = skb_dma; rdata->skb_dma_len = packet->header_len; - rdata->tso_header = 1; offset = packet->header_len; @@ -494,7 +610,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) err_out: while (start_index < cur_index) { rdata = XGBE_GET_DESC_DATA(ring, start_index++); - xgbe_unmap_skb(pdata, rdata); + xgbe_unmap_rdata(pdata, rdata); } DBGPR("<--xgbe_map_tx_skb: count=0\n"); @@ -502,40 +618,25 @@ err_out: return 0; } -static void xgbe_realloc_skb(struct xgbe_channel *channel) +static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; - struct sk_buff *skb = NULL; - dma_addr_t skb_dma; int i; - DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n", + DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n", ring->rx.realloc_index); for (i = 0; i < ring->dirty; i++) { rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index); /* Reset rdata values */ - xgbe_unmap_skb(pdata, rdata); + xgbe_unmap_rdata(pdata, rdata); - /* Allocate skb & assign to each rdesc */ - skb = dev_alloc_skb(pdata->rx_buf_size); - if (skb == NULL) + if (xgbe_map_rx_buffer(pdata, ring, rdata)) break; - skb_dma = dma_map_single(pdata->dev, skb->data, - pdata->rx_buf_size, DMA_FROM_DEVICE); - if (dma_mapping_error(pdata->dev, skb_dma)) { - netdev_alert(pdata->netdev, - "failed to do the dma map\n"); - dev_kfree_skb_any(skb); - break; - } - rdata->skb = skb; - rdata->skb_dma = skb_dma; - rdata->skb_dma_len = pdata->rx_buf_size; hw_if->rx_desc_reset(rdata); @@ -543,7 +644,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel) } ring->dirty = 0; - DBGPR("<--xgbe_realloc_skb\n"); + DBGPR("<--xgbe_realloc_rx_buffer\n"); } void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) @@ -553,8 +654,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; desc_if->free_ring_resources = xgbe_free_ring_resources; desc_if->map_tx_skb = xgbe_map_tx_skb; - desc_if->realloc_skb = xgbe_realloc_skb; - desc_if->unmap_skb = xgbe_unmap_skb; + desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer; + desc_if->unmap_rdata = xgbe_unmap_rdata; desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 9da3a03e8c07..53f5f66ec2ee 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -335,6 +335,161 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) } } +static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1); + } + + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); +} + +static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, + unsigned int index, unsigned int val) +{ + unsigned int wait; + int ret = 0; + + mutex_lock(&pdata->rss_mutex); + + if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { + ret = -EBUSY; + goto unlock; + } + + XGMAC_IOWRITE(pdata, MAC_RSSDR, val); + + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); + + wait = 1000; + while (wait--) { + if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) + goto unlock; + + usleep_range(1000, 1500); + } + + ret = -EBUSY; + +unlock: + mutex_unlock(&pdata->rss_mutex); + + return ret; +} + +static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) +{ + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + unsigned int *key = (unsigned int *)&pdata->rss_key; + int ret; + + while (key_regs--) { + ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, + key_regs, *key++); + if (ret) + return ret; + } + + return 0; +} + +static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + ret = xgbe_write_rss_reg(pdata, + XGBE_RSS_LOOKUP_TABLE_TYPE, i, + pdata->rss_table[i]); + if (ret) + return ret; + } + + return 0; +} + +static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key) +{ + memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); + + return xgbe_write_rss_hash_key(pdata); +} + +static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, + const u32 *table) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); + + return xgbe_write_rss_lookup_table(pdata); +} + +static int xgbe_enable_rss(struct xgbe_prv_data *pdata) +{ + int ret; + + if (!pdata->hw_feat.rss) + return -EOPNOTSUPP; + + /* Program the hash key */ + ret = xgbe_write_rss_hash_key(pdata); + if (ret) + return ret; + + /* Program the lookup table */ + ret = xgbe_write_rss_lookup_table(pdata); + if (ret) + return ret; + + /* Set the RSS options */ + XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); + + /* Enable RSS */ + XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); + + return 0; +} + +static int xgbe_disable_rss(struct xgbe_prv_data *pdata) +{ + if (!pdata->hw_feat.rss) + return -EOPNOTSUPP; + + XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); + + return 0; +} + +static void xgbe_config_rss(struct xgbe_prv_data *pdata) +{ + int ret; + + if (!pdata->hw_feat.rss) + return; + + if (pdata->netdev->features & NETIF_F_RXHASH) + ret = xgbe_enable_rss(pdata); + else + ret = xgbe_disable_rss(pdata); + + if (ret) + netdev_err(pdata->netdev, + "error configuring RSS, RSS disabled\n"); +} + static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { unsigned int max_q_count, q_count; @@ -465,17 +620,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) if (channel->tx_ring) { /* Enable the following Tx interrupts - * TIE - Transmit Interrupt Enable (unless polling) + * TIE - Transmit Interrupt Enable (unless using + * per channel interrupts) */ - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + if (!pdata->per_channel_irq) + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); } if (channel->rx_ring) { /* Enable following Rx interrupts * RBUE - Receive Buffer Unavailable Enable - * RIE - Receive Interrupt Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts) */ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + if (!pdata->per_channel_irq) + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); } XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); @@ -880,13 +1039,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) rdesc->desc1 = 0; rdesc->desc2 = 0; rdesc->desc3 = 0; + + /* Make sure ownership is written to the descriptor */ + wmb(); } static void xgbe_tx_desc_init(struct xgbe_channel *channel) { struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata; - struct xgbe_ring_desc *rdesc; int i; int start_index = ring->cur; @@ -895,26 +1056,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel) /* Initialze all descriptors */ for (i = 0; i < ring->rdesc_count; i++) { rdata = XGBE_GET_DESC_DATA(ring, i); - rdesc = rdata->rdesc; - /* Initialize Tx descriptor - * Set buffer 1 (lo) address to zero - * Set buffer 1 (hi) address to zero - * Reset all other control bits (IC, TTSE, B2L & B1L) - * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, - * etc) - */ - rdesc->desc0 = 0; - rdesc->desc1 = 0; - rdesc->desc2 = 0; - rdesc->desc3 = 0; + /* Initialize Tx descriptor */ + xgbe_tx_desc_reset(rdata); } - /* Make sure everything is written to the descriptor(s) before - * telling the device about them - */ - wmb(); - /* Update the total number of Tx descriptors */ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); @@ -933,19 +1079,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata) struct xgbe_ring_desc *rdesc = rdata->rdesc; /* Reset the Rx descriptor - * Set buffer 1 (lo) address to dma address (lo) - * Set buffer 1 (hi) address to dma address (hi) - * Set buffer 2 (lo) address to zero - * Set buffer 2 (hi) address to zero and set control bits - * OWN and INTE + * Set buffer 1 (lo) address to header dma address (lo) + * Set buffer 1 (hi) address to header dma address (hi) + * Set buffer 2 (lo) address to buffer dma address (lo) + * Set buffer 2 (hi) address to buffer dma address (hi) and + * set control bits OWN and INTE */ - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); - rdesc->desc2 = 0; + rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); + rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); + rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); + rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); - rdesc->desc3 = 0; - if (rdata->interrupt) - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1); + XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, + rdata->interrupt ? 1 : 0); /* Since the Rx DMA engine is likely running, make sure everything * is written to the descriptor(s) before setting the OWN bit @@ -964,7 +1110,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; - struct xgbe_ring_desc *rdesc; unsigned int start_index = ring->cur; unsigned int rx_coalesce, rx_frames; unsigned int i; @@ -977,34 +1122,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) /* Initialize all descriptors */ for (i = 0; i < ring->rdesc_count; i++) { rdata = XGBE_GET_DESC_DATA(ring, i); - rdesc = rdata->rdesc; - /* Initialize Rx descriptor - * Set buffer 1 (lo) address to dma address (lo) - * Set buffer 1 (hi) address to dma address (hi) - * Set buffer 2 (lo) address to zero - * Set buffer 2 (hi) address to zero and set control - * bits OWN and INTE appropriateley - */ - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); - rdesc->desc2 = 0; - rdesc->desc3 = 0; - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1); - rdata->interrupt = 1; - if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) { - /* Clear interrupt on completion bit */ - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, - 0); + /* Set interrupt on completion bit as appropriate */ + if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) rdata->interrupt = 0; - } - } + else + rdata->interrupt = 1; - /* Make sure everything is written to the descriptors before - * telling the device about them - */ - wmb(); + /* Initialize Rx descriptor */ + xgbe_rx_desc_reset(rdata); + } /* Update the total number of Rx descriptors */ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); @@ -1198,7 +1325,30 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) xgbe_config_flow_control(pdata); } -static void xgbe_pre_xmit(struct xgbe_channel *channel) +static void xgbe_tx_start_xmit(struct xgbe_channel *channel, + struct xgbe_ring *ring) +{ + struct xgbe_prv_data *pdata = channel->pdata; + struct xgbe_ring_data *rdata; + + /* Issue a poll command to Tx DMA by writing address + * of next immediate free descriptor */ + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, + lower_32_bits(rdata->rdesc_dma)); + + /* Start the Tx coalescing timer */ + if (pdata->tx_usecs && !channel->tx_timer_active) { + channel->tx_timer_active = 1; + hrtimer_start(&channel->tx_timer, + ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC), + HRTIMER_MODE_REL); + } + + ring->tx.xmit_more = 0; +} + +static void xgbe_dev_xmit(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring *ring = channel->tx_ring; @@ -1207,11 +1357,11 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) struct xgbe_packet_data *packet = &ring->packet_data; unsigned int csum, tso, vlan; unsigned int tso_context, vlan_context; - unsigned int tx_coalesce, tx_frames; + unsigned int tx_set_ic; int start_index = ring->cur; int i; - DBGPR("-->xgbe_pre_xmit\n"); + DBGPR("-->xgbe_dev_xmit\n"); csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, CSUM_ENABLE); @@ -1230,10 +1380,26 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) else vlan_context = 0; - tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0; - tx_frames = pdata->tx_frames; - if (tx_coalesce && !channel->tx_timer_active) - ring->coalesce_count = 0; + /* Determine if an interrupt should be generated for this Tx: + * Interrupt: + * - Tx frame count exceeds the frame count setting + * - Addition of Tx frame count to the frame count since the + * last interrupt was set exceeds the frame count setting + * No interrupt: + * - No frame count setting specified (ethtool -C ethX tx-frames 0) + * - Addition of Tx frame count to the frame count since the + * last interrupt was set does not exceed the frame count setting + */ + ring->coalesce_count += packet->tx_packets; + if (!pdata->tx_frames) + tx_set_ic = 0; + else if (packet->tx_packets > pdata->tx_frames) + tx_set_ic = 1; + else if ((ring->coalesce_count % pdata->tx_frames) < + packet->tx_packets) + tx_set_ic = 1; + else + tx_set_ic = 0; rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdesc = rdata->rdesc; @@ -1300,13 +1466,6 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); - /* Set IC bit based on Tx coalescing settings */ - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); - if (tx_coalesce && (!tx_frames || - (++ring->coalesce_count % tx_frames))) - /* Clear IC bit */ - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0); - /* Mark it as First Descriptor */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); @@ -1351,13 +1510,6 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, rdata->skb_dma_len); - /* Set IC bit based on Tx coalescing settings */ - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); - if (tx_coalesce && (!tx_frames || - (++ring->coalesce_count % tx_frames))) - /* Clear IC bit */ - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0); - /* Set OWN bit */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); @@ -1373,6 +1525,14 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) /* Set LAST bit for the last descriptor */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); + /* Set IC bit based on Tx coalescing settings */ + if (tx_set_ic) + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); + + /* Save the Tx info to report back during cleanup */ + rdata->tx.packets = packet->tx_packets; + rdata->tx.bytes = packet->tx_bytes; + /* In case the Tx DMA engine is running, make sure everything * is written to the descriptor(s) before setting the OWN bit * for the first descriptor @@ -1391,26 +1551,19 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) /* Make sure ownership is written to the descriptor */ wmb(); - /* Issue a poll command to Tx DMA by writing address - * of next immediate free descriptor */ ring->cur++; - rdata = XGBE_GET_DESC_DATA(ring, ring->cur); - XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, - lower_32_bits(rdata->rdesc_dma)); - - /* Start the Tx coalescing timer */ - if (tx_coalesce && !channel->tx_timer_active) { - channel->tx_timer_active = 1; - hrtimer_start(&channel->tx_timer, - ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC), - HRTIMER_MODE_REL); - } + if (!packet->skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, + channel->queue_index))) + xgbe_tx_start_xmit(channel, ring); + else + ring->tx.xmit_more = 1; DBGPR(" %s: descriptors %u to %u written\n", channel->name, start_index & (ring->rdesc_count - 1), (ring->cur - 1) & (ring->rdesc_count - 1)); - DBGPR("<--xgbe_pre_xmit\n"); + DBGPR("<--xgbe_dev_xmit\n"); } static int xgbe_dev_read(struct xgbe_channel *channel) @@ -1420,7 +1573,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel) struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; struct net_device *netdev = channel->pdata->netdev; - unsigned int err, etlt; + unsigned int err, etlt, l34t; DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); @@ -1431,6 +1584,9 @@ static int xgbe_dev_read(struct xgbe_channel *channel) if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) return 1; + /* Make sure descriptor fields are read after reading the OWN bit */ + rmb(); + #ifdef XGMAC_ENABLE_RX_DESC_DUMP xgbe_dump_rx_desc(ring, rdesc, ring->cur); #endif @@ -1454,8 +1610,33 @@ static int xgbe_dev_read(struct xgbe_channel *channel) XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT_NEXT, 1); + /* Get the header length */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) + rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, + RX_NORMAL_DESC2, HL); + + /* Get the RSS hash */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + RSS_HASH, 1); + + packet->rss_hash = le32_to_cpu(rdesc->desc1); + + l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); + switch (l34t) { + case RX_DESC3_L34T_IPV4_TCP: + case RX_DESC3_L34T_IPV4_UDP: + case RX_DESC3_L34T_IPV6_TCP: + case RX_DESC3_L34T_IPV6_UDP: + packet->rss_hash_type = PKT_HASH_TYPE_L4; + break; + default: + packet->rss_hash_type = PKT_HASH_TYPE_L3; + } + } + /* Get the packet length */ - rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); + rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { /* Not all the data has been transferred for this packet */ @@ -1478,7 +1659,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel) etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); DBGPR(" err=%u, etlt=%#x\n", err, etlt); - if (!err || (err && !etlt)) { + if (!err || !etlt) { + /* No error if err is 0 or etlt is 0 */ if ((etlt == 0x09) && (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, @@ -2298,6 +2480,47 @@ static void xgbe_config_mmc(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); } +static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, + struct xgbe_channel *channel) +{ + unsigned int tx_dsr, tx_pos, tx_qidx; + unsigned int tx_status; + unsigned long tx_timeout; + + /* Calculate the status register to read and the position within */ + if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { + tx_dsr = DMA_DSR0; + tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) + + DMA_DSR0_TPS_START; + } else { + tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; + + tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); + tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + + DMA_DSRX_TPS_START; + } + + /* The Tx engine cannot be stopped if it is actively processing + * descriptors. Wait for the Tx engine to enter the stopped or + * suspended state. Don't wait forever though... + */ + tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); + while (time_before(jiffies, tx_timeout)) { + tx_status = XGMAC_IOREAD(pdata, tx_dsr); + tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); + if ((tx_status == DMA_TPS_STOPPED) || + (tx_status == DMA_TPS_SUSPENDED)) + break; + + usleep_range(500, 1000); + } + + if (!time_before(jiffies, tx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Tx DMA channel %u to stop\n", + channel->queue_index); +} + static void xgbe_enable_tx(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; @@ -2326,6 +2549,15 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata) struct xgbe_channel *channel; unsigned int i; + /* Prepare for Tx DMA channel stop */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + xgbe_prepare_tx_stop(pdata, channel); + } + /* Disable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); @@ -2417,6 +2649,15 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) struct xgbe_channel *channel; unsigned int i; + /* Prepare for Tx DMA channel stop */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + xgbe_prepare_tx_stop(pdata, channel); + } + /* Disable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); @@ -2485,6 +2726,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata) xgbe_config_tx_coalesce(pdata); xgbe_config_rx_buffer_size(pdata); xgbe_config_tso_mode(pdata); + xgbe_config_sph_mode(pdata); + xgbe_config_rss(pdata); desc_if->wrapper_tx_desc_init(pdata); desc_if->wrapper_rx_desc_init(pdata); xgbe_enable_dma_interrupts(pdata); @@ -2561,7 +2804,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->powerup_rx = xgbe_powerup_rx; hw_if->powerdown_rx = xgbe_powerdown_rx; - hw_if->pre_xmit = xgbe_pre_xmit; + hw_if->dev_xmit = xgbe_dev_xmit; hw_if->dev_read = xgbe_dev_read; hw_if->enable_int = xgbe_enable_int; hw_if->disable_int = xgbe_disable_int; @@ -2575,6 +2818,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->rx_desc_reset = xgbe_rx_desc_reset; hw_if->is_last_desc = xgbe_is_last_desc; hw_if->is_context_desc = xgbe_is_context_desc; + hw_if->tx_start_xmit = xgbe_tx_start_xmit; /* For FLOW ctrl */ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; @@ -2620,5 +2864,11 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->config_dcb_tc = xgbe_config_dcb_tc; hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; + /* For Receive Side Scaling */ + hw_if->enable_rss = xgbe_enable_rss; + hw_if->disable_rss = xgbe_disable_rss; + hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; + hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; + DBGPR("<--xgbe_init_function_ptrs\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 29554992215a..bedfdb1c430d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -114,6 +114,7 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ +#include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/tcp.h> #include <linux/if_vlan.h> @@ -126,14 +127,126 @@ #include "xgbe.h" #include "xgbe-common.h" -static int xgbe_poll(struct napi_struct *, int); +static int xgbe_one_poll(struct napi_struct *, int); +static int xgbe_all_poll(struct napi_struct *, int); static void xgbe_set_rx_mode(struct net_device *); +static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel_mem, *channel; + struct xgbe_ring *tx_ring, *rx_ring; + unsigned int count, i; + int ret = -ENOMEM; + + count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); + + channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL); + if (!channel_mem) + goto err_channel; + + tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring), + GFP_KERNEL); + if (!tx_ring) + goto err_tx_ring; + + rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring), + GFP_KERNEL); + if (!rx_ring) + goto err_rx_ring; + + for (i = 0, channel = channel_mem; i < count; i++, channel++) { + snprintf(channel->name, sizeof(channel->name), "channel-%d", i); + channel->pdata = pdata; + channel->queue_index = i; + channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + + (DMA_CH_INC * i); + + if (pdata->per_channel_irq) { + /* Get the DMA interrupt (offset 1) */ + ret = platform_get_irq(pdata->pdev, i + 1); + if (ret < 0) { + netdev_err(pdata->netdev, + "platform_get_irq %u failed\n", + i + 1); + goto err_irq; + } + + channel->dma_irq = ret; + } + + if (i < pdata->tx_ring_count) { + spin_lock_init(&tx_ring->lock); + channel->tx_ring = tx_ring++; + } + + if (i < pdata->rx_ring_count) { + spin_lock_init(&rx_ring->lock); + channel->rx_ring = rx_ring++; + } + + DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n", + channel->name, channel->queue_index, channel->dma_regs, + channel->dma_irq, channel->tx_ring, channel->rx_ring); + } + + pdata->channel = channel_mem; + pdata->channel_count = count; + + return 0; + +err_irq: + kfree(rx_ring); + +err_rx_ring: + kfree(tx_ring); + +err_tx_ring: + kfree(channel_mem); + +err_channel: + return ret; +} + +static void xgbe_free_channels(struct xgbe_prv_data *pdata) +{ + if (!pdata->channel) + return; + + kfree(pdata->channel->rx_ring); + kfree(pdata->channel->tx_ring); + kfree(pdata->channel); + + pdata->channel = NULL; + pdata->channel_count = 0; +} + static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) { return (ring->rdesc_count - (ring->cur - ring->dirty)); } +static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, + struct xgbe_ring *ring, unsigned int count) +{ + struct xgbe_prv_data *pdata = channel->pdata; + + if (count > xgbe_tx_avail_desc(ring)) { + DBGPR(" Tx queue stopped, not enough descriptors available\n"); + netif_stop_subqueue(pdata->netdev, channel->queue_index); + ring->tx.queue_stopped = 1; + + /* If we haven't notified the hardware because of xmit_more + * support, tell it now + */ + if (ring->tx.xmit_more) + pdata->hw_if.tx_start_xmit(channel, ring); + + return NETDEV_TX_BUSY; + } + + return 0; +} + static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) { unsigned int rx_buf_size; @@ -144,8 +257,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) } rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE) - rx_buf_size = XGBE_RX_MIN_BUF_SIZE; + rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE); + rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & ~(XGBE_RX_BUF_ALIGN - 1); @@ -213,11 +326,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) if (!dma_isr) goto isr_done; - DBGPR("-->xgbe_isr\n"); - DBGPR(" DMA_ISR = %08x\n", dma_isr); - DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0)); - DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1)); for (i = 0; i < pdata->channel_count; i++) { if (!(dma_isr & (1 << i))) @@ -228,6 +337,10 @@ static irqreturn_t xgbe_isr(int irq, void *data) dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); + /* If we get a TI or RI interrupt that means per channel DMA + * interrupts are not enabled, so we use the private data napi + * structure, not the per channel napi structure + */ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) { if (napi_schedule_prep(&pdata->napi)) { @@ -270,12 +383,28 @@ static irqreturn_t xgbe_isr(int irq, void *data) DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); - DBGPR("<--xgbe_isr\n"); - isr_done: return IRQ_HANDLED; } +static irqreturn_t xgbe_dma_isr(int irq, void *data) +{ + struct xgbe_channel *channel = data; + + /* Per channel DMA interrupts are enabled, so we use the per + * channel napi structure and not the private data napi structure + */ + if (napi_schedule_prep(&channel->napi)) { + /* Disable Tx and Rx interrupts */ + disable_irq(channel->dma_irq); + + /* Turn on polling */ + __napi_schedule(&channel->napi); + } + + return IRQ_HANDLED; +} + static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) { struct xgbe_channel *channel = container_of(timer, @@ -283,18 +412,24 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) tx_timer); struct xgbe_ring *ring = channel->tx_ring; struct xgbe_prv_data *pdata = channel->pdata; + struct napi_struct *napi; unsigned long flags; DBGPR("-->xgbe_tx_timer\n"); + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; + spin_lock_irqsave(&ring->lock, flags); - if (napi_schedule_prep(&pdata->napi)) { + if (napi_schedule_prep(napi)) { /* Disable Tx and Rx interrupts */ - xgbe_disable_rx_tx_ints(pdata); + if (pdata->per_channel_irq) + disable_irq(channel->dma_irq); + else + xgbe_disable_rx_tx_ints(pdata); /* Turn on polling */ - __napi_schedule(&pdata->napi); + __napi_schedule(napi); } channel->tx_timer_active = 0; @@ -430,18 +565,46 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) { - if (add) - netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll, - NAPI_POLL_WEIGHT); - napi_enable(&pdata->napi); + struct xgbe_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (add) + netif_napi_add(pdata->netdev, &channel->napi, + xgbe_one_poll, NAPI_POLL_WEIGHT); + + napi_enable(&channel->napi); + } + } else { + if (add) + netif_napi_add(pdata->netdev, &pdata->napi, + xgbe_all_poll, NAPI_POLL_WEIGHT); + + napi_enable(&pdata->napi); + } } static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) { - napi_disable(&pdata->napi); + struct xgbe_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + napi_disable(&channel->napi); - if (del) - netif_napi_del(&pdata->napi); + if (del) + netif_napi_del(&channel->napi); + } + } else { + napi_disable(&pdata->napi); + + if (del) + netif_napi_del(&pdata->napi); + } } void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) @@ -472,7 +635,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) DBGPR("<--xgbe_init_rx_coalesce\n"); } -static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) +static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) { struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_channel *channel; @@ -480,7 +643,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) struct xgbe_ring_data *rdata; unsigned int i, j; - DBGPR("-->xgbe_free_tx_skbuff\n"); + DBGPR("-->xgbe_free_tx_data\n"); channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -490,14 +653,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) for (j = 0; j < ring->rdesc_count; j++) { rdata = XGBE_GET_DESC_DATA(ring, j); - desc_if->unmap_skb(pdata, rdata); + desc_if->unmap_rdata(pdata, rdata); } } - DBGPR("<--xgbe_free_tx_skbuff\n"); + DBGPR("<--xgbe_free_tx_data\n"); } -static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) +static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) { struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_channel *channel; @@ -505,7 +668,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) struct xgbe_ring_data *rdata; unsigned int i, j; - DBGPR("-->xgbe_free_rx_skbuff\n"); + DBGPR("-->xgbe_free_rx_data\n"); channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -515,11 +678,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) for (j = 0; j < ring->rdesc_count; j++) { rdata = XGBE_GET_DESC_DATA(ring, j); - desc_if->unmap_skb(pdata, rdata); + desc_if->unmap_rdata(pdata, rdata); } } - DBGPR("<--xgbe_free_rx_skbuff\n"); + DBGPR("<--xgbe_free_rx_data\n"); } static void xgbe_adjust_link(struct net_device *netdev) @@ -735,7 +898,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata) static void xgbe_stop(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_channel *channel; struct net_device *netdev = pdata->netdev; + struct netdev_queue *txq; + unsigned int i; DBGPR("-->xgbe_stop\n"); @@ -749,12 +915,23 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + continue; + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + netdev_tx_reset_queue(txq); + } + DBGPR("<--xgbe_stop\n"); } static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) { + struct xgbe_channel *channel; struct xgbe_hw_if *hw_if = &pdata->hw_if; + unsigned int i; DBGPR("-->xgbe_restart_dev\n"); @@ -763,10 +940,15 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) return; xgbe_stop(pdata); - synchronize_irq(pdata->irq_number); + synchronize_irq(pdata->dev_irq); + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + synchronize_irq(channel->dma_irq); + } - xgbe_free_tx_skbuff(pdata); - xgbe_free_rx_skbuff(pdata); + xgbe_free_tx_data(pdata); + xgbe_free_rx_data(pdata); /* Issue software reset to device if requested */ if (reset) @@ -1008,6 +1190,12 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) packet->tcp_header_len, packet->tcp_payload_len); DBGPR(" packet->mss=%u\n", packet->mss); + /* Update the number of packets that will ultimately be transmitted + * along with the extra bytes for each extra packet + */ + packet->tx_packets = skb_shinfo(skb)->gso_segs; + packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len; + return 0; } @@ -1033,17 +1221,22 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, unsigned int len; unsigned int i; + packet->skb = skb; + context_desc = 0; packet->rdesc_count = 0; + packet->tx_packets = 1; + packet->tx_bytes = skb->len; + if (xgbe_is_tso(skb)) { - /* TSO requires an extra desriptor if mss is different */ + /* TSO requires an extra descriptor if mss is different */ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { context_desc = 1; packet->rdesc_count++; } - /* TSO requires an extra desriptor for TSO header */ + /* TSO requires an extra descriptor for TSO header */ packet->rdesc_count++; XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, @@ -1091,6 +1284,8 @@ static int xgbe_open(struct net_device *netdev) struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_channel *channel = NULL; + unsigned int i = 0; int ret; DBGPR("-->xgbe_open\n"); @@ -1119,24 +1314,48 @@ static int xgbe_open(struct net_device *netdev) goto err_ptpclk; pdata->rx_buf_size = ret; + /* Allocate the channel and ring structures */ + ret = xgbe_alloc_channels(pdata); + if (ret) + goto err_ptpclk; + /* Allocate the ring descriptors and buffers */ ret = desc_if->alloc_ring_resources(pdata); if (ret) - goto err_ptpclk; + goto err_channels; /* Initialize the device restart and Tx timestamp work struct */ INIT_WORK(&pdata->restart_work, xgbe_restart); INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); /* Request interrupts */ - ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0, + ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, netdev->name, pdata); if (ret) { netdev_alert(netdev, "error requesting irq %d\n", - pdata->irq_number); - goto err_irq; + pdata->dev_irq); + goto err_rings; + } + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + snprintf(channel->dma_irq_name, + sizeof(channel->dma_irq_name) - 1, + "%s-TxRx-%u", netdev_name(netdev), + channel->queue_index); + + ret = devm_request_irq(pdata->dev, channel->dma_irq, + xgbe_dma_isr, 0, + channel->dma_irq_name, channel); + if (ret) { + netdev_alert(netdev, + "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + } } - pdata->irq_number = netdev->irq; ret = xgbe_start(pdata); if (ret) @@ -1149,12 +1368,21 @@ static int xgbe_open(struct net_device *netdev) err_start: hw_if->exit(pdata); - devm_free_irq(pdata->dev, pdata->irq_number, pdata); - pdata->irq_number = 0; - err_irq: + if (pdata->per_channel_irq) { + /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ + for (i--, channel--; i < pdata->channel_count; i--, channel--) + devm_free_irq(pdata->dev, channel->dma_irq, channel); + } + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + +err_rings: desc_if->free_ring_resources(pdata); +err_channels: + xgbe_free_channels(pdata); + err_ptpclk: clk_disable_unprepare(pdata->ptpclk); @@ -1172,6 +1400,8 @@ static int xgbe_close(struct net_device *netdev) struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_channel *channel; + unsigned int i; DBGPR("-->xgbe_close\n"); @@ -1181,15 +1411,20 @@ static int xgbe_close(struct net_device *netdev) /* Issue software reset to device */ hw_if->exit(pdata); - /* Free all the ring data */ + /* Free the ring descriptors and buffers */ desc_if->free_ring_resources(pdata); - /* Release the interrupt */ - if (pdata->irq_number != 0) { - devm_free_irq(pdata->dev, pdata->irq_number, pdata); - pdata->irq_number = 0; + /* Release the interrupts */ + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + devm_free_irq(pdata->dev, channel->dma_irq, channel); } + /* Free the channel and ring structures */ + xgbe_free_channels(pdata); + /* Disable the clocks */ clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->sysclk); @@ -1210,12 +1445,14 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_packet_data *packet; + struct netdev_queue *txq; unsigned long flags; int ret; DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); channel = pdata->channel + skb->queue_mapping; + txq = netdev_get_tx_queue(netdev, channel->queue_index); ring = channel->tx_ring; packet = &ring->packet_data; @@ -1234,13 +1471,9 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) xgbe_packet_info(pdata, ring, skb, packet); /* Check that there are enough descriptors available */ - if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) { - DBGPR(" Tx queue stopped, not enough descriptors available\n"); - netif_stop_subqueue(netdev, channel->queue_index); - ring->tx.queue_stopped = 1; - ret = NETDEV_TX_BUSY; + ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count); + if (ret) goto tx_netdev_return; - } ret = xgbe_prep_tso(skb, packet); if (ret) { @@ -1257,13 +1490,21 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) xgbe_prep_tx_tstamp(pdata, skb, packet); + /* Report on the actual number of bytes (to be) sent */ + netdev_tx_sent_queue(txq, packet->tx_bytes); + /* Configure required descriptor fields for transmission */ - hw_if->pre_xmit(channel); + hw_if->dev_xmit(channel); #ifdef XGMAC_ENABLE_TX_PKT_DUMP xgbe_print_pkt(netdev, skb, true); #endif + /* Stop the queue in advance if there may not be enough descriptors */ + xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS); + + ret = NETDEV_TX_OK; + tx_netdev_return: spin_unlock_irqrestore(&ring->lock, flags); @@ -1420,14 +1661,20 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, static void xgbe_poll_controller(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_channel *channel; + unsigned int i; DBGPR("-->xgbe_poll_controller\n"); - disable_irq(pdata->irq_number); - - xgbe_isr(pdata->irq_number, pdata); - - enable_irq(pdata->irq_number); + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + xgbe_dma_isr(channel->dma_irq, channel); + } else { + disable_irq(pdata->dev_irq); + xgbe_isr(pdata->dev_irq, pdata); + enable_irq(pdata->dev_irq); + } DBGPR("<--xgbe_poll_controller\n"); } @@ -1465,12 +1712,21 @@ static int xgbe_set_features(struct net_device *netdev, { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned int rxcsum, rxvlan, rxvlan_filter; + netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; + int ret = 0; + rxhash = pdata->netdev_features & NETIF_F_RXHASH; rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; + if ((features & NETIF_F_RXHASH) && !rxhash) + ret = hw_if->enable_rss(pdata); + else if (!(features & NETIF_F_RXHASH) && rxhash) + ret = hw_if->disable_rss(pdata); + if (ret) + return ret; + if ((features & NETIF_F_RXCSUM) && !rxcsum) hw_if->enable_rx_csum(pdata); else if (!(features & NETIF_F_RXCSUM) && rxcsum) @@ -1524,7 +1780,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; - desc_if->realloc_skb(channel); + desc_if->realloc_rx_buffer(channel); /* Update the Rx Tail Pointer Register with address of * the last cleaned entry */ @@ -1533,6 +1789,31 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) lower_32_bits(rdata->rdesc_dma)); } +static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, + struct xgbe_ring_data *rdata, + unsigned int *len) +{ + struct net_device *netdev = pdata->netdev; + struct sk_buff *skb; + u8 *packet; + unsigned int copy_len; + + skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len); + if (!skb) + return NULL; + + packet = page_address(rdata->rx.hdr.pa.pages) + + rdata->rx.hdr.pa.pages_offset; + copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len; + copy_len = min(rdata->rx.hdr.dma_len, copy_len); + skb_copy_to_linear_data(skb, packet, copy_len); + skb_put(skb, copy_len); + + *len -= copy_len; + + return skb; +} + static int xgbe_tx_poll(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; @@ -1542,8 +1823,10 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct net_device *netdev = pdata->netdev; + struct netdev_queue *txq; unsigned long flags; int processed = 0; + unsigned int tx_packets = 0, tx_bytes = 0; DBGPR("-->xgbe_tx_poll\n"); @@ -1551,6 +1834,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) if (!ring) return 0; + txq = netdev_get_tx_queue(netdev, channel->queue_index); + spin_lock_irqsave(&ring->lock, flags); while ((processed < XGBE_TX_DESC_MAX_PROC) && @@ -1561,26 +1846,41 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) if (!hw_if->tx_complete(rdesc)) break; + /* Make sure descriptor fields are read after reading the OWN + * bit */ + rmb(); + #ifdef XGMAC_ENABLE_TX_DESC_DUMP xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); #endif + if (hw_if->is_last_desc(rdesc)) { + tx_packets += rdata->tx.packets; + tx_bytes += rdata->tx.bytes; + } + /* Free the SKB and reset the descriptor for re-use */ - desc_if->unmap_skb(pdata, rdata); + desc_if->unmap_rdata(pdata, rdata); hw_if->tx_desc_reset(rdata); processed++; ring->dirty++; } + if (!processed) + goto unlock; + + netdev_tx_completed_queue(txq, tx_packets, tx_bytes); + if ((ring->tx.queue_stopped == 1) && (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) { ring->tx.queue_stopped = 0; - netif_wake_subqueue(netdev, channel->queue_index); + netif_tx_wake_queue(txq); } DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); +unlock: spin_unlock_irqrestore(&ring->lock, flags); return processed; @@ -1594,11 +1894,13 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) struct xgbe_ring_data *rdata; struct xgbe_packet_data *packet; struct net_device *netdev = pdata->netdev; + struct napi_struct *napi; struct sk_buff *skb; struct skb_shared_hwtstamps *hwtstamps; unsigned int incomplete, error, context_next, context; unsigned int len, put_len, max_len; - int received = 0; + unsigned int received = 0; + int packet_count = 0; DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); @@ -1606,9 +1908,11 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) if (!ring) return 0; + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); packet = &ring->packet_data; - while (received < budget) { + while (packet_count < budget) { DBGPR(" cur = %d\n", ring->cur); /* First time in loop see if we need to restore state */ @@ -1640,10 +1944,6 @@ read_again: ring->cur++; ring->dirty++; - dma_unmap_single(pdata->dev, rdata->skb_dma, - rdata->skb_dma_len, DMA_FROM_DEVICE); - rdata->skb_dma = 0; - incomplete = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, INCOMPLETE); @@ -1662,39 +1962,46 @@ read_again: if (packet->errors) DBGPR("Error in received packet\n"); dev_kfree_skb(skb); - continue; + goto next_packet; } if (!context) { - put_len = rdata->len - len; - if (skb) { - if (pskb_expand_head(skb, 0, put_len, - GFP_ATOMIC)) { - DBGPR("pskb_expand_head error\n"); - if (incomplete) { - error = 1; - goto read_again; - } - - dev_kfree_skb(skb); - continue; + put_len = rdata->rx.len - len; + len += put_len; + + if (!skb) { + dma_sync_single_for_cpu(pdata->dev, + rdata->rx.hdr.dma, + rdata->rx.hdr.dma_len, + DMA_FROM_DEVICE); + + skb = xgbe_create_skb(pdata, rdata, &put_len); + if (!skb) { + error = 1; + goto skip_data; } - memcpy(skb_tail_pointer(skb), rdata->skb->data, - put_len); - } else { - skb = rdata->skb; - rdata->skb = NULL; } - skb_put(skb, put_len); - len += put_len; + + if (put_len) { + dma_sync_single_for_cpu(pdata->dev, + rdata->rx.buf.dma, + rdata->rx.buf.dma_len, + DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rdata->rx.buf.pa.pages, + rdata->rx.buf.pa.pages_offset, + put_len, rdata->rx.buf.dma_len); + rdata->rx.buf.pa.pages = NULL; + } } +skip_data: if (incomplete || context_next) goto read_again; - /* Stray Context Descriptor? */ if (!skb) - continue; + goto next_packet; /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; @@ -1705,7 +2012,7 @@ read_again: if (skb->len > max_len) { DBGPR("packet length exceeds configured MTU\n"); dev_kfree_skb(skb); - continue; + goto next_packet; } #ifdef XGMAC_ENABLE_RX_PKT_DUMP @@ -1732,13 +2039,21 @@ read_again: hwtstamps->hwtstamp = ns_to_ktime(nsec); } + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, RSS_HASH)) + skb_set_hash(skb, packet->rss_hash, + packet->rss_hash_type); + skb->dev = netdev; skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, channel->queue_index); - skb_mark_napi_id(skb, &pdata->napi); + skb_mark_napi_id(skb, napi); netdev->last_rx = jiffies; - napi_gro_receive(&pdata->napi, skb); + napi_gro_receive(napi, skb); + +next_packet: + packet_count++; } /* Check if we need to save state before leaving */ @@ -1752,12 +2067,40 @@ read_again: rdata->state.error = error; } - DBGPR("<--xgbe_rx_poll: received = %d\n", received); + DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count); + + return packet_count; +} + +static int xgbe_one_poll(struct napi_struct *napi, int budget) +{ + struct xgbe_channel *channel = container_of(napi, struct xgbe_channel, + napi); + int processed = 0; + + DBGPR("-->xgbe_one_poll: budget=%d\n", budget); + + /* Cleanup Tx ring first */ + xgbe_tx_poll(channel); + + /* Process Rx ring next */ + processed = xgbe_rx_poll(channel, budget); + + /* If we processed everything, we are done */ + if (processed < budget) { + /* Turn off polling */ + napi_complete(napi); + + /* Enable Tx and Rx interrupts */ + enable_irq(channel->dma_irq); + } + + DBGPR("<--xgbe_one_poll: received = %d\n", processed); - return received; + return processed; } -static int xgbe_poll(struct napi_struct *napi, int budget) +static int xgbe_all_poll(struct napi_struct *napi, int budget) { struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, napi); @@ -1766,7 +2109,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget) int processed, last_processed; unsigned int i; - DBGPR("-->xgbe_poll: budget=%d\n", budget); + DBGPR("-->xgbe_all_poll: budget=%d\n", budget); processed = 0; ring_budget = budget / pdata->rx_ring_count; @@ -1794,7 +2137,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget) xgbe_enable_rx_tx_ints(pdata); } - DBGPR("<--xgbe_poll: received = %d\n", processed); + DBGPR("<--xgbe_all_poll: received = %d\n", processed); return processed; } @@ -1808,10 +2151,10 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, while (count--) { rdata = XGBE_GET_DESC_DATA(ring, idx); rdesc = rdata->rdesc; - DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, - (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", - le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), - le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); + pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, + (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", + le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), + le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); idx++; } } @@ -1819,9 +2162,9 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, unsigned int idx) { - DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, - le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), - le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); + pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, + le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), + le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); } void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 49508ec98b72..ebf489351555 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -452,9 +452,9 @@ static int xgbe_set_coalesce(struct net_device *netdev, rx_usecs); return -EINVAL; } - if (rx_frames > pdata->channel->rx_ring->rdesc_count) { + if (rx_frames > pdata->rx_desc_count) { netdev_alert(netdev, "rx-frames is limited to %d frames\n", - pdata->channel->rx_ring->rdesc_count); + pdata->rx_desc_count); return -EINVAL; } @@ -462,9 +462,9 @@ static int xgbe_set_coalesce(struct net_device *netdev, tx_frames = ec->tx_max_coalesced_frames; /* Check the bounds of values for Tx */ - if (tx_frames > pdata->channel->tx_ring->rdesc_count) { + if (tx_frames > pdata->tx_desc_count) { netdev_alert(netdev, "tx-frames is limited to %d frames\n", - pdata->channel->tx_ring->rdesc_count); + pdata->tx_desc_count); return -EINVAL; } @@ -481,6 +481,82 @@ static int xgbe_set_coalesce(struct net_device *netdev, return 0; } +static int xgbe_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = pdata->rx_ring_count; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static u32 xgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + return sizeof(pdata->rss_key); +} + +static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + return ARRAY_SIZE(pdata->rss_table); +} + +static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int i; + + if (indir) { + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) + indir[i] = XGMAC_GET_BITS(pdata->rss_table[i], + MAC_RSSDR, DMCH); + } + + if (key) + memcpy(key, pdata->rss_key, sizeof(pdata->rss_key)); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + unsigned int ret; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + ret = hw_if->set_rss_lookup_table(pdata, indir); + if (ret) + return ret; + } + + if (key) { + ret = hw_if->set_rss_hash_key(pdata, key); + if (ret) + return ret; + } + + return 0; +} + static int xgbe_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *ts_info) { @@ -526,6 +602,11 @@ static const struct ethtool_ops xgbe_ethtool_ops = { .get_strings = xgbe_get_strings, .get_ethtool_stats = xgbe_get_ethtool_stats, .get_sset_count = xgbe_get_sset_count, + .get_rxnfc = xgbe_get_rxnfc, + .get_rxfh_key_size = xgbe_get_rxfh_key_size, + .get_rxfh_indir_size = xgbe_get_rxfh_indir_size, + .get_rxfh = xgbe_get_rxfh, + .set_rxfh = xgbe_set_rxfh, .get_ts_info = xgbe_get_ts_info, }; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index f5a8fa03921a..dbd3850b8b0a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -133,60 +133,6 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(XGBE_DRV_VERSION); MODULE_DESCRIPTION(XGBE_DRV_DESC); -static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata) -{ - struct xgbe_channel *channel_mem, *channel; - struct xgbe_ring *tx_ring, *rx_ring; - unsigned int count, i; - - DBGPR("-->xgbe_alloc_rings\n"); - - count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); - - channel_mem = devm_kcalloc(pdata->dev, count, - sizeof(struct xgbe_channel), GFP_KERNEL); - if (!channel_mem) - return NULL; - - tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count, - sizeof(struct xgbe_ring), GFP_KERNEL); - if (!tx_ring) - return NULL; - - rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count, - sizeof(struct xgbe_ring), GFP_KERNEL); - if (!rx_ring) - return NULL; - - for (i = 0, channel = channel_mem; i < count; i++, channel++) { - snprintf(channel->name, sizeof(channel->name), "channel-%d", i); - channel->pdata = pdata; - channel->queue_index = i; - channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + - (DMA_CH_INC * i); - - if (i < pdata->tx_ring_count) { - spin_lock_init(&tx_ring->lock); - channel->tx_ring = tx_ring++; - } - - if (i < pdata->rx_ring_count) { - spin_lock_init(&rx_ring->lock); - channel->rx_ring = rx_ring++; - } - - DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n", - channel->name, channel->queue_index, channel->dma_regs, - channel->tx_ring, channel->rx_ring); - } - - pdata->channel_count = count; - - DBGPR("<--xgbe_alloc_rings\n"); - - return channel_mem; -} - static void xgbe_default_config(struct xgbe_prv_data *pdata) { DBGPR("-->xgbe_default_config\n"); @@ -224,6 +170,7 @@ static int xgbe_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; const u8 *mac_addr; + unsigned int i; int ret; DBGPR("--> xgbe_probe\n"); @@ -244,6 +191,7 @@ static int xgbe_probe(struct platform_device *pdev) spin_lock_init(&pdata->lock); mutex_init(&pdata->xpcs_mutex); + mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); /* Set and validate the number of descriptors for a ring */ @@ -318,12 +266,18 @@ static int xgbe_probe(struct platform_device *pdev) pdata->awcache = XGBE_DMA_SYS_AWCACHE; } + /* Check for per channel interrupt support */ + if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS)) + pdata->per_channel_irq = 1; + ret = platform_get_irq(pdev, 0); if (ret < 0) { - dev_err(dev, "platform_get_irq failed\n"); + dev_err(dev, "platform_get_irq 0 failed\n"); goto err_io; } - netdev->irq = ret; + pdata->dev_irq = ret; + + netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->xgmac_regs; /* Set all the function pointers */ @@ -383,13 +337,16 @@ static int xgbe_probe(struct platform_device *pdev) goto err_io; } - /* Allocate the rings for the DMA channels */ - pdata->channel = xgbe_alloc_rings(pdata); - if (!pdata->channel) { - dev_err(dev, "ring allocation failed\n"); - ret = -ENOMEM; - goto err_io; - } + /* Initialize RSS hash key and lookup table */ + netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); + + for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, + i % pdata->rx_ring_count); + + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Prepare to regsiter with MDIO */ pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name); @@ -421,6 +378,9 @@ static int xgbe_probe(struct platform_device *pdev) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER; + if (pdata->hw_feat.rss) + netdev->hw_features |= NETIF_F_RXHASH; + netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 789957d43a13..f9ec762ac3f0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -140,11 +140,25 @@ #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +/* Descriptors required for maximum contigous TSO/GSO packet */ +#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1) + +/* Maximum possible descriptors needed for an SKB: + * - Maximum number of SKB frags + * - Maximum descriptors for contiguous TSO/GSO packet + * - Possible context descriptor + * - Possible TSO header descriptor + */ +#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2) + #define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) #define XGBE_RX_BUF_ALIGN 64 +#define XGBE_SKB_ALLOC_SIZE 256 +#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */ #define XGBE_MAX_DMA_CHANNELS 16 #define XGBE_MAX_QUEUES 16 +#define XGBE_DMA_STOP_TIMEOUT 5 /* DMA cache settings - Outer sharable, write-back, write-allocate */ #define XGBE_DMA_OS_AXDOMAIN 0x2 @@ -171,6 +185,7 @@ /* Device-tree clock names */ #define XGBE_DMA_CLOCK "dma_clk" #define XGBE_PTP_CLOCK "ptp_clk" +#define XGBE_DMA_IRQS "amd,per-channel-interrupt" /* Timestamp support - values based on 50MHz PTP clock * 50MHz => 20 nsec @@ -212,9 +227,17 @@ /* Maximum MAC address hash table size (256 bits = 8 bytes) */ #define XGBE_MAC_HASH_TABLE_SIZE 8 +/* Receive Side Scaling */ +#define XGBE_RSS_HASH_KEY_SIZE 40 +#define XGBE_RSS_MAX_TABLE_SIZE 256 +#define XGBE_RSS_LOOKUP_TABLE_TYPE 0 +#define XGBE_RSS_HASH_KEY_TYPE 1 + struct xgbe_prv_data; struct xgbe_packet_data { + struct sk_buff *skb; + unsigned int attributes; unsigned int errors; @@ -230,14 +253,53 @@ struct xgbe_packet_data { unsigned short vlan_ctag; u64 rx_tstamp; + + u32 rss_hash; + enum pkt_hash_types rss_hash_type; + + unsigned int tx_packets; + unsigned int tx_bytes; }; /* Common Rx and Tx descriptor mapping */ struct xgbe_ring_desc { - unsigned int desc0; - unsigned int desc1; - unsigned int desc2; - unsigned int desc3; + __le32 desc0; + __le32 desc1; + __le32 desc2; + __le32 desc3; +}; + +/* Page allocation related values */ +struct xgbe_page_alloc { + struct page *pages; + unsigned int pages_len; + unsigned int pages_offset; + + dma_addr_t pages_dma; +}; + +/* Ring entry buffer data */ +struct xgbe_buffer_data { + struct xgbe_page_alloc pa; + struct xgbe_page_alloc pa_unmap; + + dma_addr_t dma; + unsigned int dma_len; +}; + +/* Tx-related ring data */ +struct xgbe_tx_ring_data { + unsigned int packets; /* BQL packet count */ + unsigned int bytes; /* BQL byte count */ +}; + +/* Rx-related ring data */ +struct xgbe_rx_ring_data { + struct xgbe_buffer_data hdr; /* Header locations */ + struct xgbe_buffer_data buf; /* Payload locations */ + + unsigned short hdr_len; /* Length of received header */ + unsigned short len; /* Length of received packet */ }; /* Structure used to hold information related to the descriptor @@ -251,9 +313,9 @@ struct xgbe_ring_data { struct sk_buff *skb; /* Virtual address of SKB */ dma_addr_t skb_dma; /* DMA address of SKB data */ unsigned int skb_dma_len; /* Length of SKB DMA area */ - unsigned int tso_header; /* TSO header indicator */ - unsigned short len; /* Length of received Rx packet */ + struct xgbe_tx_ring_data tx; /* Tx-related data */ + struct xgbe_rx_ring_data rx; /* Rx-related data */ unsigned int interrupt; /* Interrupt indicator */ @@ -291,6 +353,10 @@ struct xgbe_ring { */ struct xgbe_ring_data *rdata; + /* Page allocation for RX buffers */ + struct xgbe_page_alloc rx_hdr_pa; + struct xgbe_page_alloc rx_buf_pa; + /* Ring index values * cur - Tx: index of descriptor to be used for current transfer * Rx: index of descriptor to check for packet availability @@ -307,6 +373,7 @@ struct xgbe_ring { union { struct { unsigned int queue_stopped; + unsigned int xmit_more; unsigned short cur_mss; unsigned short cur_vlan_ctag; } tx; @@ -331,6 +398,13 @@ struct xgbe_channel { unsigned int queue_index; void __iomem *dma_regs; + /* Per channel interrupt irq number */ + int dma_irq; + char dma_irq_name[IFNAMSIZ + 32]; + + /* Netdev related settings */ + struct napi_struct napi; + unsigned int saved_ier; unsigned int tx_timer_active; @@ -456,7 +530,7 @@ struct xgbe_hw_if { int (*enable_int)(struct xgbe_channel *, enum xgbe_int); int (*disable_int)(struct xgbe_channel *, enum xgbe_int); - void (*pre_xmit)(struct xgbe_channel *); + void (*dev_xmit)(struct xgbe_channel *); int (*dev_read)(struct xgbe_channel *); void (*tx_desc_init)(struct xgbe_channel *); void (*rx_desc_init)(struct xgbe_channel *); @@ -464,6 +538,7 @@ struct xgbe_hw_if { void (*tx_desc_reset)(struct xgbe_ring_data *); int (*is_last_desc)(struct xgbe_ring_desc *); int (*is_context_desc)(struct xgbe_ring_desc *); + void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *); /* For FLOW ctrl */ int (*config_tx_flow_control)(struct xgbe_prv_data *); @@ -509,14 +584,20 @@ struct xgbe_hw_if { /* For Data Center Bridging config */ void (*config_dcb_tc)(struct xgbe_prv_data *); void (*config_dcb_pfc)(struct xgbe_prv_data *); + + /* For Receive Side Scaling */ + int (*enable_rss)(struct xgbe_prv_data *); + int (*disable_rss)(struct xgbe_prv_data *); + int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *); + int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *); }; struct xgbe_desc_if { int (*alloc_ring_resources)(struct xgbe_prv_data *); void (*free_ring_resources)(struct xgbe_prv_data *); int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *); - void (*realloc_skb)(struct xgbe_channel *); - void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *); + void (*realloc_rx_buffer)(struct xgbe_channel *); + void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *); void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); }; @@ -581,7 +662,11 @@ struct xgbe_prv_data { /* XPCS indirect addressing mutex */ struct mutex xpcs_mutex; - int irq_number; + /* RSS addressing mutex */ + struct mutex rss_mutex; + + int dev_irq; + unsigned int per_channel_irq; struct xgbe_hw_if hw_if; struct xgbe_desc_if desc_if; @@ -624,7 +709,7 @@ struct xgbe_prv_data { unsigned int rx_riwt; unsigned int rx_frames; - /* Current MTU */ + /* Current Rx buffer size */ unsigned int rx_buf_size; /* Flow control settings */ @@ -632,6 +717,11 @@ struct xgbe_prv_data { unsigned int tx_pause; unsigned int rx_pause; + /* Receive Side Scaling settings */ + u8 rss_key[XGBE_RSS_HASH_KEY_SIZE]; + u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE]; + u32 rss_options; + /* MDIO settings */ struct module *phy_module; char *mii_bus_id; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 63ea1941e973..7ba83ffb08ac 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); } -static void xgene_enet_reset(struct xgene_enet_pdata *pdata) +bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) +{ + if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) + return false; + + if (ioread32(p->ring_csr_addr + SRST_ADDR)) + return false; + + return true; +} + +static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { u32 val; + if (!xgene_ring_mgr_init(pdata)) + return -ENODEV; + clk_prepare_enable(pdata->clk); clk_disable_unprepare(pdata->clk); clk_prepare_enable(pdata->clk); @@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata) val |= SCAN_AUTO_INCR; MGMT_CLOCK_SEL_SET(&val, 1); xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); + + return 0; } static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 38558584080e..ec45f3256f0e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -104,6 +104,9 @@ enum xgene_enet_rm { #define BLOCK_ETH_MAC_OFFSET 0x0000 #define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 +#define CLKEN_ADDR 0xc208 +#define SRST_ADDR 0xc200 + #define MAC_ADDR_REG_OFFSET 0x00 #define MAC_COMMAND_REG_OFFSET 0x04 #define MAC_WRITE_REG_OFFSET 0x08 @@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); +bool xgene_ring_mgr_init(struct xgene_enet_pdata *p); extern struct xgene_mac_ops xgene_gmac_ops; extern struct xgene_port_ops xgene_gport_ops; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 3c208cc6f6bb..83a50280bb70 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; struct xgene_enet_desc_ring *buf_pool = NULL; - u8 cpu_bufnum = 0, eth_bufnum = 0; - u8 bp_bufnum = 0x20; - u16 ring_id, ring_num = 0; + u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM; + u8 bp_bufnum = START_BP_BUFNUM; + u16 ring_id, ring_num = START_RING_NUM; int ret; /* allocate rx descriptor ring */ @@ -761,10 +761,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) ndev = pdata->ndev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr"); - if (!res) { - dev_err(dev, "Resource enet_csr not defined\n"); - return -ENODEV; - } pdata->base_addr = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->base_addr)) { dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); @@ -772,10 +768,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr"); - if (!res) { - dev_err(dev, "Resource ring_csr not defined\n"); - return -ENODEV; - } pdata->ring_csr_addr = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->ring_csr_addr)) { dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); @@ -783,10 +775,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd"); - if (!res) { - dev_err(dev, "Resource ring_cmd not defined\n"); - return -ENODEV; - } pdata->ring_cmd_addr = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->ring_cmd_addr)) { dev_err(dev, "Unable to retrieve ENET Ring command region\n"); @@ -852,7 +840,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) u16 dst_ring_num; int ret; - pdata->port_ops->reset(pdata); + ret = pdata->port_ops->reset(pdata); + if (ret) + return ret; ret = xgene_enet_create_desc_rings(ndev); if (ret) { @@ -954,6 +944,7 @@ static int xgene_enet_probe(struct platform_device *pdev) return ret; err: + unregister_netdev(ndev); free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 874e5a01161f..f9958fae6ffd 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -38,6 +38,9 @@ #define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) #define NUM_PKT_BUF 64 #define NUM_BUFPOOL 32 +#define START_ETH_BUFNUM 2 +#define START_BP_BUFNUM 0x22 +#define START_RING_NUM 8 #define PHY_POLL_LINK_ON (10 * HZ) #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) @@ -83,7 +86,7 @@ struct xgene_mac_ops { }; struct xgene_port_ops { - void (*reset)(struct xgene_enet_pdata *pdata); + int (*reset)(struct xgene_enet_pdata *pdata); void (*cle_bypass)(struct xgene_enet_pdata *pdata, u32 dst_ring_num, u16 bufpool_id); void (*shutdown)(struct xgene_enet_pdata *pdata); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index e6d24c210198..f5d4f68c288c 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -124,20 +124,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) { struct net_device *ndev = p->ndev; u32 data; - int i; + int i = 0; xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); - for (i = 0; i < 10 && data != ~0U ; i++) { + do { usleep_range(100, 110); data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); - } - - if (data != ~0U) { - netdev_err(ndev, "Failed to release memory from shutdown\n"); - return -ENODEV; - } + if (data == ~0U) + return 0; + } while (++i < 10); - return 0; + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; } static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) @@ -313,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p) xgene_sgmac_rxtx(p, TX_EN, false); } -static void xgene_enet_reset(struct xgene_enet_pdata *p) +static int xgene_enet_reset(struct xgene_enet_pdata *p) { + if (!xgene_ring_mgr_init(p)) + return -ENODEV; + clk_prepare_enable(p->clk); clk_disable_unprepare(p->clk); clk_prepare_enable(p->clk); xgene_enet_ecc_init(p); xgene_enet_config_ring_if_assoc(p); + + return 0; } static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 67d07206b3c7..a18a9d1f1143 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata) xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); } -static void xgene_enet_reset(struct xgene_enet_pdata *pdata) +static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { + if (!xgene_ring_mgr_init(pdata)) + return -ENODEV; + clk_prepare_enable(pdata->clk); clk_disable_unprepare(pdata->clk); clk_prepare_enable(pdata->clk); xgene_enet_ecc_init(pdata); xgene_enet_config_ring_if_assoc(pdata); + + return 0; } static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index c3e260c21734..888247ad9068 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -62,7 +62,6 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" - depends on OF select MII select PHYLIB select FIXED_PHY if BCMGENET=y diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 9ae36979bdee..c1d255972dae 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -274,6 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { /* RBUF misc statistics */ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), + STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), + STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), }; #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) @@ -477,6 +480,7 @@ static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, RX_BUF_LENGTH, DMA_FROM_DEVICE); ret = dma_mapping_error(kdev, mapping); if (ret) { + priv->mib.rx_dma_failed++; bcm_sysport_free_cb(cb); netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); return ret; @@ -526,6 +530,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int p_index; u16 len, status; struct bcm_rsb *rsb; + int ret; /* Determine how much we should process since last call */ p_index = rdma_readl(priv, RDMA_PROD_INDEX); @@ -620,7 +625,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, napi_gro_receive(&priv->napi, skb); refill: - bcm_sysport_rx_refill(priv, cb); + ret = bcm_sysport_rx_refill(priv, cb); + if (ret) + priv->mib.alloc_rx_buff_failed++; } return processed; @@ -731,9 +738,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* re-enable TX interrupt */ intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + + return 0; } - return 0; + return budget; } static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) @@ -971,6 +980,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); if (dma_mapping_error(kdev, mapping)) { + priv->mib.tx_dma_failed++; netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", skb->data, skb_len); ret = NETDEV_TX_OK; @@ -1110,7 +1120,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, /* We just need one DMA descriptor which is DMA-able, since writing to * the port will allocate a new descriptor in its internal linked-list */ - p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL); + p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, + GFP_KERNEL); if (!p) { netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); return -ENOMEM; @@ -1174,6 +1185,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, if (!(reg & TDMA_DISABLED)) netdev_warn(priv->netdev, "TDMA not stopped!\n"); + /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could + * fail, so by checking this pointer we know whether the TX ring was + * fully initialized or not. + */ + if (!ring->cbs) + return; + napi_disable(&ring->napi); netif_napi_del(&ring->napi); @@ -1183,7 +1201,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, ring->cbs = NULL; if (ring->desc_dma) { - dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma); + dma_free_coherent(kdev, sizeof(struct dma_desc), + ring->desc_cpu, ring->desc_dma); ring->desc_dma = 0; } ring->size = 0; @@ -1397,6 +1416,9 @@ static void bcm_sysport_netif_start(struct net_device *dev) /* Enable NAPI */ napi_enable(&priv->napi); + /* Enable RX interrupt and TX ring full interrupt */ + intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); + phy_start(priv->phydev); /* Enable TX interrupts for the 32 TXQs */ @@ -1499,9 +1521,6 @@ static int bcm_sysport_open(struct net_device *dev) if (ret) goto out_free_rx_ring; - /* Enable RX interrupt and TX ring full interrupt */ - intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); - /* Turn on TDMA */ ret = tdma_enable_set(priv, 1); if (ret) @@ -1858,6 +1877,8 @@ static int bcm_sysport_resume(struct device *d) if (!netif_running(dev)) return 0; + umac_reset(priv); + /* We may have been suspended and never received a WOL event that * would turn off MPD detection, take care of that now */ @@ -1885,9 +1906,6 @@ static int bcm_sysport_resume(struct device *d) netif_device_attach(dev); - /* Enable RX interrupt and TX ring full interrupt */ - intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); - /* RX pipe enable */ topctrl_writel(priv, 0, RX_FLUSH_CNTL); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index b08dab828101..fc19417d82a5 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -557,6 +557,9 @@ struct bcm_sysport_mib { u32 rxchk_other_pkt_disc; u32 rbuf_ovflow_cnt; u32 rbuf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; }; /* HW maintains a large list of counters */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 40beef5bca88..b4d71fd909ee 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1139,7 +1139,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) prefetch(fp->txdata_ptr[cos]->tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); return IRQ_HANDLED; } @@ -2099,7 +2099,7 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, if (config_hash) { /* RSS keys */ - prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4); + netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4); __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 1edc931b1458..ffe4e003e636 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3358,12 +3358,18 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) return T_ETH_INDIRECTION_TABLE_SIZE; } -static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) +static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) { struct bnx2x *bp = netdev_priv(dev); u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; size_t i; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + /* Get the current configuration of the RSS indirection table */ bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); @@ -3383,11 +3389,21 @@ static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) } static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct bnx2x *bp = netdev_priv(dev); size_t i; + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!indir) + return 0; + for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { /* * The same as in bnx2x_get_rxfh: we can't use a memcpy() diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 74fbf9ea7bd8..07c636815127 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1931,7 +1931,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) for_each_cos_in_tx_queue(fp, cos) prefetch(fp->txdata_ptr[cos]->tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); status &= ~mask; } } @@ -3163,6 +3163,8 @@ static void bnx2x_pf_q_prep_general(struct bnx2x *bp, gen_init->mtu = bp->dev->mtu; gen_init->cos = cos; + + gen_init->fp_hsi = ETH_FP_HSI_VERSION; } static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, @@ -12537,7 +12539,7 @@ static int bnx2x_validate_addr(struct net_device *dev) } static int bnx2x_get_phys_port_id(struct net_device *netdev, - struct netdev_phys_port_id *ppid) + struct netdev_phys_item_id *ppid) { struct bnx2x *bp = netdev_priv(netdev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 7bc2924a7e24..07cdf9bbffef 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -4336,7 +4336,7 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, test_bit(BNX2X_Q_FLG_FCOE, flags) ? LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; - gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION; + gen_data->fp_hsi_ver = params->fp_hsi; DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index e97275f456c0..86baecb7c60c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -937,6 +937,8 @@ struct bnx2x_general_setup_params { u8 spcl_id; u16 mtu; u8 cos; + + u8 fp_hsi; }; struct bnx2x_rxq_setup_params { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index c88b20af87df..e5aca2de1871 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -193,6 +193,7 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, /* Setup-op general parameters */ setup_p->gen_params.spcl_id = vf->sp_cl_id; setup_p->gen_params.stat_id = vfq_stat_id(vf, q); + setup_p->gen_params.fp_hsi = vf->fp_hsi; /* Setup-op pause params: * Nothing to do, the pause thresholds are set by default to 0 which diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 01bafa4ac045..66ee62a0401a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -205,6 +205,8 @@ struct bnx2x_virtf { /* slow-path operations */ struct mutex op_mutex; /* one vfop at a time mutex */ enum channel_tlvs op_current; + + u8 fp_hsi; }; #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index b1d9c44aa56c..be40eabc5304 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -224,6 +224,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; struct vfpf_port_phys_id_resp_tlv *phys_port_resp; + struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp; u32 vf_id; bool resources_acquired = false; @@ -237,6 +238,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) req->vfdev_info.vf_id = vf_id; req->vfdev_info.vf_os = 0; + req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION; req->resc_request.num_rxqs = rx_count; req->resc_request.num_txqs = tx_count; @@ -316,9 +318,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) memset(&bp->vf2pf_mbox->resp, 0, sizeof(union pfvf_tlvs)); } else { - /* PF reports error */ - BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n", - bp->acquire_resp.hdr.status); + /* Determine reason of PF failure of acquire process */ + fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, + CHANNEL_TLV_FP_HSI_SUPPORT); + if (fp_hsi_resp && !fp_hsi_resp->is_supported) + BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n"); + else + BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n", + bp->acquire_resp.hdr.status); rc = -EAGAIN; goto out; } @@ -333,6 +340,25 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) bp->flags |= HAS_PHYS_PORT_ID; } + /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV. + * If that's the case, we need to make certain required FW was + * supported by such a hypervisor [i.e., v0-v2]. + */ + fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, + CHANNEL_TLV_FP_HSI_SUPPORT); + if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) { + BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n"); + + /* Since acquire succeeded on the PF side, we need to send a + * release message in order to allow future probes. + */ + bnx2x_vfpf_finalize(bp, &req->first_tlv); + bnx2x_vfpf_release(bp); + + rc = -EINVAL; + goto out; + } + /* get HW info */ bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); bp->link_params.chip_id = bp->common.chip_id; @@ -1125,6 +1151,26 @@ static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, *offset += sizeof(struct vfpf_port_phys_id_resp_tlv); } +static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp, + struct bnx2x_virtf *vf, + void *buffer, + u16 *offset) +{ + struct vfpf_fp_hsi_resp_tlv *fp_hsi; + + bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT, + sizeof(struct vfpf_fp_hsi_resp_tlv)); + + fp_hsi = (struct vfpf_fp_hsi_resp_tlv *) + (((u8 *)buffer) + *offset); + fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1; + + /* Offset should continue representing the offset to the tail + * of TLV data (outside this function scope) + */ + *offset += sizeof(struct vfpf_fp_hsi_resp_tlv); +} + static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx, int vfop_status) { @@ -1219,6 +1265,12 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, CHANNEL_TLV_PHYS_PORT_ID)) bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length); + /* `New' vfs will want to know if fastpath HSI is supported, since + * if that's not the case they could print into system log the fact + * the driver version must be updated. + */ + bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length); + bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -1288,6 +1340,23 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, goto out; } + /* Verify the VF fastpath HSI can be supported by the loaded FW. + * Linux vfs should be oblivious to changes between v0 and v2. + */ + if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) + vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver; + else + vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver, + ETH_FP_HSI_VER_2); + if (vf->fp_hsi > ETH_FP_HSI_VERSION) { + DP(BNX2X_MSG_IOV, + "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n", + vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver, + ETH_FP_HSI_VERSION); + rc = -EINVAL; + goto out; + } + /* acquire the resources */ rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index 15670c499a20..b86479fc0d2f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -124,7 +124,7 @@ struct vfpf_acquire_tlv { #define VF_OS_UNDEFINED (0 << VF_OS_SHIFT) #define VF_OS_WINDOWS (1 << VF_OS_SHIFT) - u8 padding; + u8 fp_hsi_ver; u8 caps; #define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0) } vfdev_info; @@ -204,6 +204,12 @@ struct vfpf_port_phys_id_resp_tlv { u8 padding[2]; }; +struct vfpf_fp_hsi_resp_tlv { + struct channel_tlv tl; + u8 is_supported; + u8 padding[3]; +}; + #define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues * stats will be coalesced on * the leading RSS queue @@ -448,6 +454,7 @@ enum channel_tlvs { CHANNEL_TLV_UPDATE_RSS, CHANNEL_TLV_PHYS_PORT_ID, CHANNEL_TLV_UPDATE_TPA, + CHANNEL_TLV_FP_HSI_SUPPORT, CHANNEL_TLV_MAX }; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 23f23c97c2ad..f05fab65d78a 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -382,10 +382,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, if (l5_cid >= MAX_CM_SK_TBL_SZ) break; - rcu_read_lock(); if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { rc = -ENODEV; - rcu_read_unlock(); break; } csk = &cp->csk_tbl[l5_cid]; @@ -414,7 +412,6 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, } } csk_put(csk); - rcu_read_unlock(); rc = 0; } } @@ -615,7 +612,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); mutex_lock(&cnic_lock); - if (rcu_dereference(cp->ulp_ops[ulp_type])) { + if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); cnic_put(dev); } else { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fdc9ec09e453..7078bd386fb7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -42,6 +42,7 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/phy.h> +#include <linux/platform_data/bcmgenet.h> #include <asm/unaligned.h> @@ -613,6 +614,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { UMAC_RBUF_OVFL_CNT), STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), + STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), }; #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) @@ -711,6 +715,98 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, } } +static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; + u32 reg; + + if (enable && !priv->clk_eee_enabled) { + clk_prepare_enable(priv->clk_eee); + priv->clk_eee_enabled = true; + } + + reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); + + /* Enable EEE and switch to a 27Mhz clock automatically */ + reg = __raw_readl(priv->base + off); + if (enable) + reg |= TBUF_EEE_EN | TBUF_PM_EN; + else + reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); + __raw_writel(reg, priv->base + off); + + /* Do the same for thing for RBUF */ + reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); + if (enable) + reg |= RBUF_EEE_EN | RBUF_PM_EN; + else + reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); + bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); + + if (!enable && priv->clk_eee_enabled) { + clk_disable_unprepare(priv->clk_eee); + priv->clk_eee_enabled = false; + } + + priv->eee.eee_enabled = enable; + priv->eee.eee_active = enable; +} + +static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(priv->phydev, e); +} + +static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + int ret = 0; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { + bcmgenet_eee_enable_set(dev, false); + } else { + ret = phy_init_eee(priv->phydev, 0); + if (ret) { + netif_err(priv, hw, dev, "EEE initialization failed\n"); + return ret; + } + + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); + bcmgenet_eee_enable_set(dev, true); + } + + return phy_ethtool_set_eee(priv->phydev, e); +} + +static int bcmgenet_nway_reset(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return genphy_restart_aneg(priv->phydev); +} + /* standard ethtool support functions. */ static struct ethtool_ops bcmgenet_ethtool_ops = { .get_strings = bcmgenet_get_strings, @@ -724,6 +820,9 @@ static struct ethtool_ops bcmgenet_ethtool_ops = { .set_msglevel = bcmgenet_set_msglevel, .get_wol = bcmgenet_get_wol, .set_wol = bcmgenet_set_wol, + .get_eee = bcmgenet_get_eee, + .set_eee = bcmgenet_set_eee, + .nway_reset = bcmgenet_nway_reset, }; /* Power down the unimac, based on mode. */ @@ -989,6 +1088,7 @@ static int bcmgenet_xmit_single(struct net_device *dev, mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); ret = dma_mapping_error(kdev, mapping); if (ret) { + priv->mib.tx_dma_failed++; netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); dev_kfree_skb(skb); return ret; @@ -1035,6 +1135,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev, skb_frag_size(frag), DMA_TO_DEVICE); ret = dma_mapping_error(kdev, mapping); if (ret) { + priv->mib.tx_dma_failed++; netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", __func__); return ret; @@ -1231,6 +1332,7 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) priv->rx_buf_len, DMA_FROM_DEVICE); ret = dma_mapping_error(kdev, mapping); if (ret) { + priv->mib.rx_dma_failed++; bcmgenet_free_cb(cb); netif_err(priv, rx_err, priv->dev, "%s DMA map failed\n", __func__); @@ -1397,8 +1499,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, /* refill RX path on the current control block */ refill: err = bcmgenet_rx_refill(priv, cb); - if (err) + if (err) { + priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, dev, "Rx refill failed\n"); + } rxpktprocessed++; priv->rx_read_ptr++; @@ -2140,6 +2244,12 @@ static int bcmgenet_open(struct net_device *dev) goto err_irq0; } + /* Re-configure the port multiplexer towards the PHY device */ + bcmgenet_mii_config(priv->dev, false); + + phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, + priv->phy_interface); + bcmgenet_netif_start(dev); return 0; @@ -2184,6 +2294,9 @@ static int bcmgenet_close(struct net_device *dev) bcmgenet_netif_stop(dev); + /* Really kill the PHY state machine and disconnect from it */ + phy_disconnect(priv->phydev); + /* Disable MAC receive */ umac_enable_set(priv, CMD_RX_EN, false); @@ -2391,6 +2504,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) struct bcmgenet_hw_params *params; u32 reg; u8 major; + u16 gphy_rev; if (GENET_IS_V4(priv)) { bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; @@ -2439,8 +2553,29 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) * to pass this information to the PHY driver. The PHY driver expects * to find the PHY major revision in bits 15:8 while the GENET register * stores that information in bits 7:0, account for that. + * + * On newer chips, starting with PHY revision G0, a new scheme is + * deployed similar to the Starfighter 2 switch with GPHY major + * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 + * is reserved as well as special value 0x01ff, we have a small + * heuristic to check for the new GPHY revision and re-arrange things + * so the GPHY driver is happy. */ - priv->gphy_rev = (reg & 0xffff) << 8; + gphy_rev = reg & 0xffff; + + /* This is the good old scheme, just GPHY major, no minor nor patch */ + if ((gphy_rev & 0xf0) != 0) + priv->gphy_rev = gphy_rev << 8; + + /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ + else if ((gphy_rev & 0xff00) != 0) + priv->gphy_rev = gphy_rev; + + /* This is reserved so should require special treatment */ + else if (gphy_rev == 0 || gphy_rev == 0x01ff) { + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); + return; + } #ifdef CONFIG_PHYS_ADDR_T_64BIT if (!(params->flags & GENET_HAS_40BITS)) @@ -2474,8 +2609,9 @@ static const struct of_device_id bcmgenet_match[] = { static int bcmgenet_probe(struct platform_device *pdev) { + struct bcmgenet_platform_data *pd = pdev->dev.platform_data; struct device_node *dn = pdev->dev.of_node; - const struct of_device_id *of_id; + const struct of_device_id *of_id = NULL; struct bcmgenet_priv *priv; struct net_device *dev; const void *macaddr; @@ -2489,9 +2625,11 @@ static int bcmgenet_probe(struct platform_device *pdev) return -ENOMEM; } - of_id = of_match_node(bcmgenet_match, dn); - if (!of_id) - return -EINVAL; + if (dn) { + of_id = of_match_node(bcmgenet_match, dn); + if (!of_id) + return -EINVAL; + } priv = netdev_priv(dev); priv->irq0 = platform_get_irq(pdev, 0); @@ -2503,11 +2641,15 @@ static int bcmgenet_probe(struct platform_device *pdev) goto err; } - macaddr = of_get_mac_address(dn); - if (!macaddr) { - dev_err(&pdev->dev, "can't find MAC address\n"); - err = -EINVAL; - goto err; + if (dn) { + macaddr = of_get_mac_address(dn); + if (!macaddr) { + dev_err(&pdev->dev, "can't find MAC address\n"); + err = -EINVAL; + goto err; + } + } else { + macaddr = pd->mac_address; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2547,7 +2689,10 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->dev = dev; priv->pdev = pdev; - priv->version = (enum bcmgenet_version)of_id->data; + if (of_id) + priv->version = (enum bcmgenet_version)of_id->data; + else + priv->version = pd->genet_version; priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); if (IS_ERR(priv->clk)) @@ -2568,6 +2713,12 @@ static int bcmgenet_probe(struct platform_device *pdev) if (IS_ERR(priv->clk_wol)) dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); + priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee"); + if (IS_ERR(priv->clk_eee)) { + dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n"); + priv->clk_eee = NULL; + } + err = reset_umac(priv); if (err) goto err_clk_disable; @@ -2685,7 +2836,7 @@ static int bcmgenet_resume(struct device *d) phy_init_hw(priv->phydev); /* Speed settings must be restored */ - bcmgenet_mii_config(priv->dev); + bcmgenet_mii_config(priv->dev, false); /* disable ethernet MAC while updating its registers */ umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); @@ -2718,6 +2869,9 @@ static int bcmgenet_resume(struct device *d) phy_resume(priv->phydev); + if (priv->eee.eee_enabled) + bcmgenet_eee_enable_set(dev, true); + bcmgenet_netif_start(dev); return 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index dbf524ea3b19..b36ddec0cc0a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -143,6 +143,9 @@ struct bcmgenet_mib_counters { u32 rbuf_ovflow_cnt; u32 rbuf_err_cnt; u32 mdf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; }; #define UMAC_HD_BKP_CTRL 0x004 @@ -182,6 +185,21 @@ struct bcmgenet_mib_counters { #define UMAC_MAC1 0x010 #define UMAC_MAX_FRAME_LEN 0x014 +#define UMAC_EEE_CTRL 0x064 +#define EN_LPI_RX_PAUSE (1 << 0) +#define EN_LPI_TX_PFC (1 << 1) +#define EN_LPI_TX_PAUSE (1 << 2) +#define EEE_EN (1 << 3) +#define RX_FIFO_CHECK (1 << 4) +#define EEE_TX_CLK_DIS (1 << 5) +#define DIS_EEE_10M (1 << 6) +#define LP_IDLE_PREDICTION_MODE (1 << 7) + +#define UMAC_EEE_LPI_TIMER 0x068 +#define UMAC_EEE_WAKE_TIMER 0x06C +#define UMAC_EEE_REF_COUNT 0x070 +#define EEE_REFERENCE_COUNT_MASK 0xffff + #define UMAC_TX_FLUSH 0x334 #define UMAC_MIB_START 0x400 @@ -229,6 +247,10 @@ struct bcmgenet_mib_counters { #define RBUF_RXCHK_EN (1 << 0) #define RBUF_SKIP_FCS (1 << 4) +#define RBUF_ENERGY_CTRL 0x9c +#define RBUF_EEE_EN (1 << 0) +#define RBUF_PM_EN (1 << 1) + #define RBUF_TBUF_SIZE_CTRL 0xb4 #define RBUF_HFB_CTRL_V1 0x38 @@ -244,6 +266,9 @@ struct bcmgenet_mib_counters { #define TBUF_CTRL 0x00 #define TBUF_BP_MC 0x0C +#define TBUF_ENERGY_CTRL 0x14 +#define TBUF_EEE_EN (1 << 0) +#define TBUF_PM_EN (1 << 1) #define TBUF_CTRL_V1 0x80 #define TBUF_BP_MC_V1 0xA0 @@ -548,6 +573,8 @@ struct bcmgenet_priv { struct device_node *phy_dn; struct mii_bus *mii_bus; u16 gphy_rev; + struct clk *clk_eee; + bool clk_eee_enabled; /* PHY device variables */ int old_link; @@ -584,6 +611,8 @@ struct bcmgenet_priv { u32 wolopts; struct bcmgenet_mib_counters mib; + + struct ethtool_eee eee; }; #define GENET_IO_MACRO(name, offset) \ @@ -617,9 +646,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); /* MDIO routines */ int bcmgenet_mii_init(struct net_device *dev); -int bcmgenet_mii_config(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); void bcmgenet_mii_exit(struct net_device *dev); void bcmgenet_mii_reset(struct net_device *dev); +void bcmgenet_mii_setup(struct net_device *dev); /* Wake-on-LAN routines */ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 9ff799a9f801..446889cc3c6a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -23,6 +23,7 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_mdio.h> +#include <linux/platform_data/bcmgenet.h> #include "bcmgenet.h" @@ -77,7 +78,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, /* setup netdev link state when PHY link status change and * update UMAC and RGMII block when link up */ -static void bcmgenet_mii_setup(struct net_device *dev) +void bcmgenet_mii_setup(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -211,7 +212,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); } -int bcmgenet_mii_config(struct net_device *dev) +int bcmgenet_mii_config(struct net_device *dev, bool init) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -298,7 +299,8 @@ int bcmgenet_mii_config(struct net_device *dev) return -EINVAL; } - dev_info(kdev, "configuring instance for %s\n", phy_name); + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); return 0; } @@ -311,22 +313,6 @@ static int bcmgenet_mii_probe(struct net_device *dev) u32 phy_flags; int ret; - if (priv->phydev) { - pr_info("PHY already attached\n"); - return 0; - } - - /* In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { - ret = of_phy_register_fixed_link(dn); - if (ret) - return ret; - - priv->phy_dn = of_node_get(dn); - } - /* Communicate the integrated PHY revision */ phy_flags = priv->gphy_rev; @@ -336,11 +322,39 @@ static int bcmgenet_mii_probe(struct net_device *dev) priv->old_duplex = -1; priv->old_pause = -1; - phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, - phy_flags, priv->phy_interface); - if (!phydev) { - pr_err("could not attach to PHY\n"); - return -ENODEV; + if (dn) { + if (priv->phydev) { + pr_info("PHY already attached\n"); + return 0; + } + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, priv->phy_interface); + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } else { + phydev = priv->phydev; + phydev->dev_flags = phy_flags; + + ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, + priv->phy_interface); + if (ret) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } } priv->phydev = phydev; @@ -350,7 +364,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) * PHY speed which is needed for bcmgenet_mii_config() to configure * things appropriately. */ - ret = bcmgenet_mii_config(dev); + ret = bcmgenet_mii_config(dev, true); if (ret) { phy_disconnect(priv->phydev); return ret; @@ -437,6 +451,75 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) return 0; } +static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + struct mii_bus *mdio = priv->mii_bus; + struct phy_device *phydev; + int ret; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + mdio->phy_mask = ~(1 << pd->phy_address); + else + mdio->phy_mask = 0; + + ret = mdiobus_register(mdio); + if (ret) { + dev_err(kdev, "failed to register MDIO bus\n"); + return ret; + } + + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + phydev = mdio->phy_map[pd->phy_address]; + else + phydev = phy_find_first(mdio); + + if (!phydev) { + dev_err(kdev, "failed to register PHY device\n"); + mdiobus_unregister(mdio); + return -ENODEV; + } + } else { + /* + * MoCA port or no MDIO access. + * Use fixed PHY to represent the link layer. + */ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = pd->phy_speed, + .duplex = pd->phy_duplex, + .pause = 0, + .asym_pause = 0, + }; + + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phydev || IS_ERR(phydev)) { + dev_err(kdev, "failed to register fixed PHY device\n"); + return -ENODEV; + } + } + + priv->phydev = phydev; + priv->phy_interface = pd->phy_interface; + + return 0; +} + +static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + + if (dn) + return bcmgenet_mii_of_init(priv); + else + return bcmgenet_mii_pd_init(priv); +} + int bcmgenet_mii_init(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -446,7 +529,7 @@ int bcmgenet_mii_init(struct net_device *dev) if (ret) return ret; - ret = bcmgenet_mii_of_init(priv); + ret = bcmgenet_mii_bus_init(priv); if (ret) goto out_free; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index dbb41c1923e6..bb48a610b72a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp) if (tnapi->rx_rcb) memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + if (tnapi->prodring.rx_std && + tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { tg3_free_rings(tp); return -ENOMEM; } @@ -10540,19 +10541,14 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) udelay(100); if (tg3_flag(tp, ENABLE_RSS)) { + u32 rss_key[10]; + tg3_rss_write_indir_tbl(tp); - /* Setup the "secret" hash key. */ - tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); - tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); - tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); - tw32(MAC_RSS_HASH_KEY_3, 0x36621985); - tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); - tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); - tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); - tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); - tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); - tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); + netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); + + for (i = 0; i < 10 ; i++) + tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); } tp->rx_mode = RX_MODE_ENABLE; @@ -12565,22 +12561,38 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev) return size; } -static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) +static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct tg3 *tp = netdev_priv(dev); int i; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) indir[i] = tp->rss_ind_tbl[i]; return 0; } -static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key) +static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, + const u8 hfunc) { struct tg3 *tp = netdev_priv(dev); size_t i; + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!indir) + return 0; + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) tp->rss_ind_tbl[i] = indir[i]; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index c3861de9dc81..323721838cf9 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -2054,7 +2054,7 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) BFI_ENET_RSS_IPV4_TCP); rx_config->rss_config.hash_mask = bnad->num_rxp_per_rx - 1; - get_random_bytes(rx_config->rss_config.toeplitz_hash_key, + netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key, sizeof(rx_config->rss_config.toeplitz_hash_key)); } else { rx_config->rss_status = BNA_STATUS_T_DISABLED; diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 9e089d24466e..6932be08c62c 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -35,8 +35,8 @@ config MACB ---help--- The Cadence MACB ethernet interface is found on many Atmel AT32 and AT91 parts. This driver also supports the Cadence GEM (Gigabit - Ethernet MAC found in some ARM SoC devices). Note: the Gigabit mode - is not yet supported. Say Y to include support for the MACB/GEM chip. + Ethernet MAC found in some ARM SoC devices). Say Y to include + support for the MACB/GEM chip. To compile this driver as a module, choose M here: the module will be called macb. diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 4c5879389003..86222a1bdb12 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -301,7 +301,7 @@ unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, struct sched_port *p = &s->p[port]; unsigned int max_avail_segs; - pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); + pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed); if (speed) p->speed = speed; if (mtu) diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 1df65c915b99..b85280775997 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o +cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 3c481b260745..c38a93607ea2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -222,6 +222,12 @@ struct tp_err_stats { u32 ofldCongDefer; }; +struct sge_params { + u32 hps; /* host page size for our PF/VF */ + u32 eq_qpp; /* egress queues/page for our PF/VF */ + u32 iq_qpp; /* egress queues/page for our PF/VF */ +}; + struct tp_params { unsigned int ntxchan; /* # of Tx channels */ unsigned int tre; /* log2 of core clocks per TP tick */ @@ -285,6 +291,7 @@ enum chip_type { }; struct adapter_params { + struct sge_params sge; struct tp_params tp; struct vpd_params vpd; struct pci_params pci; @@ -318,10 +325,10 @@ struct adapter_params { #include "t4fw_api.h" #define FW_VERSION(chip) ( \ - FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \ - FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \ - FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \ - FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD)) + FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ + FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \ + FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \ + FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) struct fw_info { @@ -354,7 +361,7 @@ struct link_config { unsigned char link_ok; /* link up? */ }; -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) enum { MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ @@ -431,7 +438,8 @@ struct sge_fl { /* SGE free-buffer queue state */ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ __be64 *desc; /* address of HW Rx descriptor ring */ dma_addr_t addr; /* bus address of HW ring start */ - u64 udb; /* BAR2 offset of User Doorbell area */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; /* A packet gather list */ @@ -461,7 +469,8 @@ struct sge_rspq { /* state for an SGE response queue */ u16 abs_id; /* absolute SGE id for the response q */ __be64 *desc; /* address of HW response ring */ dma_addr_t phys_addr; /* physical address of the ring */ - u64 udb; /* BAR2 offset of User Doorbell area */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ unsigned int iqe_len; /* entry size */ unsigned int size; /* capacity of response queue */ struct adapter *adap; @@ -519,7 +528,8 @@ struct sge_txq { int db_disabled; unsigned short db_pidx; unsigned short db_pidx_inc; - u64 udb; /* BAR2 offset of User Doorbell area */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ @@ -995,6 +1005,15 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); + +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + +int t4_init_sge_params(struct adapter *adapter); int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); @@ -1085,4 +1104,5 @@ void t4_db_dropped(struct adapter *adapter); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); void t4_sge_decode_idma_state(struct adapter *adapter, int state); +void t4_free_mem(void *addr); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 8edf0f5bd679..a35d1ec6950e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -60,6 +60,43 @@ void cxgb4_dcb_version_init(struct net_device *dev) dcb->dcb_version = FW_PORT_DCB_VER_AUTO; } +static void cxgb4_dcb_cleanup_apps(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + struct port_dcb_info *dcb = &pi->dcb; + struct dcb_app app; + int i, err; + + /* zero priority implies remove */ + app.priority = 0; + + for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { + /* Check if app list is exhausted */ + if (!dcb->app_priority[i].protocolid) + break; + + app.protocol = dcb->app_priority[i].protocolid; + + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + app.priority = dcb->app_priority[i].user_prio_map; + app.selector = dcb->app_priority[i].sel_field + 1; + err = dcb_ieee_delapp(dev, &app); + } else { + app.selector = !!(dcb->app_priority[i].sel_field); + err = dcb_setapp(dev, &app); + } + + if (err) { + dev_err(adap->pdev_dev, + "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n", + dcb_ver_array[dcb->dcb_version], app.selector, + app.protocol, -err); + break; + } + } +} + /* Finite State machine for Data Center Bridging. */ void cxgb4_dcb_state_fsm(struct net_device *dev, @@ -80,14 +117,17 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, /* we're going to use Host DCB */ dcb->state = CXGB4_DCB_STATE_HOST; dcb->supported = CXGB4_DCBX_HOST_SUPPORT; - dcb->enabled = 1; break; } case CXGB4_DCB_INPUT_FW_ENABLED: { /* we're going to use Firmware DCB */ dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; - dcb->supported = CXGB4_DCBX_FW_SUPPORT; + dcb->supported = DCB_CAP_DCBX_LLD_MANAGED; + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) + dcb->supported |= DCB_CAP_DCBX_VER_IEEE; + else + dcb->supported |= DCB_CAP_DCBX_VER_CEE; break; } @@ -145,6 +185,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, * state. We need to reset back to a ground state * of incomplete. */ + cxgb4_dcb_cleanup_apps(dev); cxgb4_dcb_state_init(dev); dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; dcb->supported = CXGB4_DCBX_FW_SUPPORT; @@ -202,7 +243,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, const struct fw_port_cmd *pcmd) { const union fw_port_dcb *fwdcb = &pcmd->u.dcb; - int port = FW_PORT_CMD_PORTID_GET(be32_to_cpu(pcmd->op_to_portid)); + int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid)); struct net_device *dev = adap->port[port]; struct port_info *pi = netdev_priv(dev); struct port_dcb_info *dcb = &pi->dcb; @@ -215,12 +256,12 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) { enum cxgb4_dcb_state_input input = ((pcmd->u.dcb.control.all_syncd_pkd & - FW_PORT_CMD_ALL_SYNCD) + FW_PORT_CMD_ALL_SYNCD_F) ? CXGB4_DCB_STATE_FW_ALLSYNCED : CXGB4_DCB_STATE_FW_INCOMPLETE); if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) { - dcb_running_version = FW_PORT_CMD_DCB_VERSION_GET( + dcb_running_version = FW_PORT_CMD_DCB_VERSION_G( be16_to_cpu( pcmd->u.dcb.control.dcb_version_to_app_state)); if (dcb_running_version == FW_PORT_DCB_VER_CEE1D01 || @@ -349,6 +390,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled) { struct port_info *pi = netdev2pinfo(dev); + /* If DCBx is host-managed, dcb is enabled by outside lldp agents */ + if (pi->dcb.state == CXGB4_DCB_STATE_HOST) { + pi->dcb.enabled = enabled; + return 0; + } + /* Firmware doesn't provide any mechanism to control the DCB state. */ if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) @@ -394,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc, *up_tc_map = (1 << tc); /* prio_type is link strict */ - *prio_type = 0x2; + if (*pgid != 0xF) + *prio_type = 0x2; } static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, u8 *prio_type, u8 *pgid, u8 *bw_per, u8 *up_tc_map) { - return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1); + /* tc 0 is written at MSB position */ + return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, + up_tc_map, 1); } @@ -409,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc, u8 *prio_type, u8 *pgid, u8 *bw_per, u8 *up_tc_map) { - return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0); + /* tc 0 is written at MSB position */ + return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, + up_tc_map, 0); } static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, @@ -419,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, struct fw_port_cmd pcmd; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = pi->adapter; + int fw_tc = 7 - tc; u32 _pgid; int err; @@ -437,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, } _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); - _pgid &= ~(0xF << (tc * 4)); - _pgid |= pgid << (tc * 4); + _pgid &= ~(0xF << (fw_tc * 4)); + _pgid |= pgid << (fw_tc * 4); pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); @@ -466,7 +519,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); if (err != FW_PORT_DCB_CFG_SUCCESS) @@ -530,7 +583,7 @@ static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid, INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); @@ -551,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg) priority >= CXGB4_MAX_PRIORITY) *pfccfg = 0; else - *pfccfg = (pi->dcb.pfcen >> priority) & 1; + *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1; } /* Enable/disable Priority Pause Frames for the specified Traffic Class @@ -570,15 +623,15 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC; pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; if (pfccfg) - pcmd.u.dcb.pfc.pfcen |= (1 << priority); + pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority)); else - pcmd.u.dcb.pfc.pfcen &= (~(1 << priority)); + pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority))); err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); if (err != FW_PORT_DCB_CFG_SUCCESS) { @@ -789,7 +842,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, /* write out new app table entry */ INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID; pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id); @@ -833,11 +886,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, /* Return whether IEEE Data Center Bridging has been negotiated. */ -static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev) +static inline int +cxgb4_ieee_negotiation_complete(struct net_device *dev, + enum cxgb4_dcb_fw_msgs dcb_subtype) { struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; + if (dcb_subtype && !(dcb->msgs & dcb_subtype)) + return 0; + return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); } @@ -850,7 +908,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app) { int prio; - if (!cxgb4_ieee_negotiation_complete(dev)) + if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) return -EINVAL; if (!(app->selector && app->protocol)) return -EINVAL; @@ -872,7 +930,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app) { int ret; - if (!cxgb4_ieee_negotiation_complete(dev)) + if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) return -EINVAL; if (!(app->selector && app->protocol)) return -EINVAL; @@ -1024,7 +1082,7 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg) pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); for (i = 0; i < CXGB4_MAX_PRIORITY; i++) - pg->prio_pg[i] = (pgid >> (i * 4)) & 0xF; + pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF; INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index 2a6aa88984f4..31ce425616c9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -42,12 +42,12 @@ do { \ memset(&(__pcmd), 0, sizeof(__pcmd)); \ (__pcmd).op_to_portid = \ - cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | \ - FW_CMD_REQUEST | \ - FW_CMD_##__op | \ - FW_PORT_CMD_PORTID(__port)); \ + cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | \ + FW_CMD_REQUEST_F | \ + FW_CMD_##__op##_F | \ + FW_PORT_CMD_PORTID_V(__port)); \ (__pcmd).action_to_len16 = \ - cpu_to_be32(FW_PORT_CMD_ACTION(__action) | \ + cpu_to_be32(FW_PORT_CMD_ACTION_V(__action) | \ FW_LEN16(pcmd)); \ } while (0) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c new file mode 100644 index 000000000000..c98a350d857e --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -0,0 +1,158 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/string_helpers.h> +#include <linux/sort.h> + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4fw_api.h" +#include "cxgb4_debugfs.h" +#include "l2t.h" + +static ssize_t mem_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file_inode(file)->i_size; + unsigned int mem = (uintptr_t)file->private_data & 3; + struct adapter *adap = file->private_data - mem; + __be32 *data; + int ret; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + data = t4_alloc_mem(count); + if (!data) + return -ENOMEM; + + spin_lock(&adap->win0_lock); + ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); + spin_unlock(&adap->win0_lock); + if (ret) { + t4_free_mem(data); + return ret; + } + ret = copy_to_user(buf, data, count); + + t4_free_mem(data); + if (ret) + return -EFAULT; + + *ppos = pos + count; + return count; +} + +static const struct file_operations mem_debugfs_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mem_read, + .llseek = default_llseek, +}; + +static void add_debugfs_mem(struct adapter *adap, const char *name, + unsigned int idx, unsigned int size_mb) +{ + struct dentry *de; + + de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, + (void *)adap + idx, &mem_debugfs_fops); + if (de && de->d_inode) + de->d_inode->i_size = size_mb << 20; +} + +/* Add an array of Debug FS files. + */ +void add_debugfs_files(struct adapter *adap, + struct t4_debugfs_entry *files, + unsigned int nfiles) +{ + int i; + + /* debugfs support is best effort */ + for (i = 0; i < nfiles; i++) + debugfs_create_file(files[i].name, files[i].mode, + adap->debugfs_root, + (void *)adap + files[i].data, + files[i].ops); +} + +int t4_setup_debugfs(struct adapter *adap) +{ + int i; + u32 size; + + static struct t4_debugfs_entry t4_debugfs_files[] = { + { "l2t", &t4_l2t_fops, S_IRUSR, 0}, + }; + + add_debugfs_files(adap, + t4_debugfs_files, + ARRAY_SIZE(t4_debugfs_files)); + + i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (i & EDRAM0_ENABLE_F) { + size = t4_read_reg(adap, MA_EDRAM0_BAR_A); + add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM0_SIZE_G(size)); + } + if (i & EDRAM1_ENABLE_F) { + size = t4_read_reg(adap, MA_EDRAM1_BAR_A); + add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size)); + } + if (is_t4(adap->params.chip)) { + size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); + if (i & EXT_MEM_ENABLE_F) + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_G(size)); + } else { + if (i & EXT_MEM0_ENABLE_F) { + size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + add_debugfs_mem(adap, "mc0", MEM_MC0, + EXT_MEM0_SIZE_G(size)); + } + if (i & EXT_MEM1_ENABLE_F) { + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + add_debugfs_mem(adap, "mc1", MEM_MC1, + EXT_MEM1_SIZE_G(size)); + } + } + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h new file mode 100644 index 000000000000..a3d8867efd3d --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h @@ -0,0 +1,52 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_DEBUGFS_H +#define __CXGB4_DEBUGFS_H + +#include <linux/export.h> + +struct t4_debugfs_entry { + const char *name; + const struct file_operations *ops; + mode_t mode; + unsigned char data; +}; + +int t4_setup_debugfs(struct adapter *adap); +void add_debugfs_files(struct adapter *adap, + struct t4_debugfs_entry *files, + unsigned int nfiles); + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3f60070f2519..4c26be97fc9a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -61,6 +61,7 @@ #include <net/neighbour.h> #include <net/netevent.h> #include <net/addrconf.h> +#include <net/bonding.h> #include <asm/uaccess.h> #include "cxgb4.h" @@ -68,10 +69,9 @@ #include "t4_msg.h" #include "t4fw_api.h" #include "cxgb4_dcb.h" +#include "cxgb4_debugfs.h" #include "l2t.h" -#include <../drivers/net/bonding/bonding.h> - #ifdef DRV_VERSION #undef DRV_VERSION #endif @@ -141,7 +141,7 @@ static unsigned int pfvfres_pmask(struct adapter *adapter, * Give PF's access to all of the ports. */ if (vf == 0) - return FW_PFVF_CMD_PMASK_MASK; + return FW_PFVF_CMD_PMASK_M; /* * For VFs, we'll assign them access to the ports based purely on the @@ -210,114 +210,25 @@ struct filter_entry { NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) -#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) } - -static const struct pci_device_id cxgb4_pci_tbl[] = { - CH_DEVICE(0xa000, 0), /* PE10K */ - CH_DEVICE(0x4001, -1), - CH_DEVICE(0x4002, -1), - CH_DEVICE(0x4003, -1), - CH_DEVICE(0x4004, -1), - CH_DEVICE(0x4005, -1), - CH_DEVICE(0x4006, -1), - CH_DEVICE(0x4007, -1), - CH_DEVICE(0x4008, -1), - CH_DEVICE(0x4009, -1), - CH_DEVICE(0x400a, -1), - CH_DEVICE(0x400d, -1), - CH_DEVICE(0x400e, -1), - CH_DEVICE(0x4080, -1), - CH_DEVICE(0x4081, -1), - CH_DEVICE(0x4082, -1), - CH_DEVICE(0x4083, -1), - CH_DEVICE(0x4084, -1), - CH_DEVICE(0x4085, -1), - CH_DEVICE(0x4086, -1), - CH_DEVICE(0x4087, -1), - CH_DEVICE(0x4088, -1), - CH_DEVICE(0x4401, 4), - CH_DEVICE(0x4402, 4), - CH_DEVICE(0x4403, 4), - CH_DEVICE(0x4404, 4), - CH_DEVICE(0x4405, 4), - CH_DEVICE(0x4406, 4), - CH_DEVICE(0x4407, 4), - CH_DEVICE(0x4408, 4), - CH_DEVICE(0x4409, 4), - CH_DEVICE(0x440a, 4), - CH_DEVICE(0x440d, 4), - CH_DEVICE(0x440e, 4), - CH_DEVICE(0x4480, 4), - CH_DEVICE(0x4481, 4), - CH_DEVICE(0x4482, 4), - CH_DEVICE(0x4483, 4), - CH_DEVICE(0x4484, 4), - CH_DEVICE(0x4485, 4), - CH_DEVICE(0x4486, 4), - CH_DEVICE(0x4487, 4), - CH_DEVICE(0x4488, 4), - CH_DEVICE(0x5001, 4), - CH_DEVICE(0x5002, 4), - CH_DEVICE(0x5003, 4), - CH_DEVICE(0x5004, 4), - CH_DEVICE(0x5005, 4), - CH_DEVICE(0x5006, 4), - CH_DEVICE(0x5007, 4), - CH_DEVICE(0x5008, 4), - CH_DEVICE(0x5009, 4), - CH_DEVICE(0x500A, 4), - CH_DEVICE(0x500B, 4), - CH_DEVICE(0x500C, 4), - CH_DEVICE(0x500D, 4), - CH_DEVICE(0x500E, 4), - CH_DEVICE(0x500F, 4), - CH_DEVICE(0x5010, 4), - CH_DEVICE(0x5011, 4), - CH_DEVICE(0x5012, 4), - CH_DEVICE(0x5013, 4), - CH_DEVICE(0x5014, 4), - CH_DEVICE(0x5015, 4), - CH_DEVICE(0x5080, 4), - CH_DEVICE(0x5081, 4), - CH_DEVICE(0x5082, 4), - CH_DEVICE(0x5083, 4), - CH_DEVICE(0x5084, 4), - CH_DEVICE(0x5085, 4), - CH_DEVICE(0x5086, 4), - CH_DEVICE(0x5087, 4), - CH_DEVICE(0x5088, 4), - CH_DEVICE(0x5401, 4), - CH_DEVICE(0x5402, 4), - CH_DEVICE(0x5403, 4), - CH_DEVICE(0x5404, 4), - CH_DEVICE(0x5405, 4), - CH_DEVICE(0x5406, 4), - CH_DEVICE(0x5407, 4), - CH_DEVICE(0x5408, 4), - CH_DEVICE(0x5409, 4), - CH_DEVICE(0x540A, 4), - CH_DEVICE(0x540B, 4), - CH_DEVICE(0x540C, 4), - CH_DEVICE(0x540D, 4), - CH_DEVICE(0x540E, 4), - CH_DEVICE(0x540F, 4), - CH_DEVICE(0x5410, 4), - CH_DEVICE(0x5411, 4), - CH_DEVICE(0x5412, 4), - CH_DEVICE(0x5413, 4), - CH_DEVICE(0x5414, 4), - CH_DEVICE(0x5415, 4), - CH_DEVICE(0x5480, 4), - CH_DEVICE(0x5481, 4), - CH_DEVICE(0x5482, 4), - CH_DEVICE(0x5483, 4), - CH_DEVICE(0x5484, 4), - CH_DEVICE(0x5485, 4), - CH_DEVICE(0x5486, 4), - CH_DEVICE(0x5487, 4), - CH_DEVICE(0x5488, 4), - { 0, } -}; +/* Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static struct pci_device_id cxgb4_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x4 + +/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is + * called for both. + */ +#define CH_PCI_DEVICE_ID_FUNCTION2 0x0 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + {PCI_VDEVICE(CHELSIO, (devid)), 4} + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ + { 0, } \ + } + +#include "t4_pci_id_tbl.h" #define FW4_FNAME "cxgb4/t4fw.bin" #define FW5_FNAME "cxgb4/t5fw.bin" @@ -512,9 +423,10 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) u32 name, value; int err; - name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | - FW_PARAMS_PARAM_YZ(txq->q.cntxt_id)); + name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | + FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); value = enable ? i : 0xffffffff; /* Since we can be called while atomic (from "interrupt @@ -694,7 +606,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev) #ifdef CONFIG_CHELSIO_T4_DCB struct port_info *pi = netdev_priv(dev); - return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED; + if (!pi->dcb.enabled) + return 0; + + return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || + (pi->dcb.state == CXGB4_DCB_STATE_HOST)); #else return 0; #endif @@ -705,7 +621,7 @@ EXPORT_SYMBOL(cxgb4_dcb_enabled); /* Handle a Data Center Bridging update message from the firmware. */ static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) { - int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid)); + int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); struct net_device *dev = adap->port[port]; int old_dcb_enabled = cxgb4_dcb_enabled(dev); int new_dcb_enabled; @@ -828,17 +744,17 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, #ifdef CONFIG_CHELSIO_T4_DCB const struct fw_port_cmd *pcmd = (const void *)p->data; - unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid)); + unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); unsigned int action = - FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16)); + FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); if (cmd == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { - int port = FW_PORT_CMD_PORTID_GET( + int port = FW_PORT_CMD_PORTID_G( be32_to_cpu(pcmd->op_to_portid)); struct net_device *dev = q->adap->port[port]; int state_input = ((pcmd->u.info.dcbxdis_pkd & - FW_PORT_CMD_DCBXDIS) + FW_PORT_CMD_DCBXDIS_F) ? CXGB4_DCB_INPUT_FW_DISABLED : CXGB4_DCB_INPUT_FW_ENABLED); @@ -1283,7 +1199,7 @@ void *t4_alloc_mem(size_t size) /* * Free memory allocated through alloc_mem(). */ -static void t4_free_mem(void *addr) +void t4_free_mem(void *addr) { if (is_vmalloc_addr(addr)) vfree(addr); @@ -1335,52 +1251,52 @@ static int set_filter_wr(struct adapter *adapter, int fidx) * filter specification structure but for now it's easiest to simply * put this fairly direct code in line ... */ - fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); - fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16)); + fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); + fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16)); fwr->tid_to_iq = - htonl(V_FW_FILTER_WR_TID(ftid) | - V_FW_FILTER_WR_RQTYPE(f->fs.type) | - V_FW_FILTER_WR_NOREPLY(0) | - V_FW_FILTER_WR_IQ(f->fs.iq)); + htonl(FW_FILTER_WR_TID_V(ftid) | + FW_FILTER_WR_RQTYPE_V(f->fs.type) | + FW_FILTER_WR_NOREPLY_V(0) | + FW_FILTER_WR_IQ_V(f->fs.iq)); fwr->del_filter_to_l2tix = - htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | - V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | - V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | - V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | - V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | - V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | - V_FW_FILTER_WR_DMAC(f->fs.newdmac) | - V_FW_FILTER_WR_SMAC(f->fs.newsmac) | - V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || + htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) | + FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) | + FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) | + FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) | + FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) | + FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) | + FW_FILTER_WR_DMAC_V(f->fs.newdmac) | + FW_FILTER_WR_SMAC_V(f->fs.newsmac) | + FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) | - V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || + FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | - V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | - V_FW_FILTER_WR_TXCHAN(f->fs.eport) | - V_FW_FILTER_WR_PRIO(f->fs.prio) | - V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); + FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) | + FW_FILTER_WR_TXCHAN_V(f->fs.eport) | + FW_FILTER_WR_PRIO_V(f->fs.prio) | + FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0)); fwr->ethtype = htons(f->fs.val.ethtype); fwr->ethtypem = htons(f->fs.mask.ethtype); fwr->frag_to_ovlan_vldm = - (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | - V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | - V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) | - V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) | - V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) | - V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld)); + (FW_FILTER_WR_FRAG_V(f->fs.val.frag) | + FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) | + FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) | + FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) | + FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) | + FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld)); fwr->smac_sel = 0; fwr->rx_chan_rx_rpl_iq = - htons(V_FW_FILTER_WR_RX_CHAN(0) | - V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id)); + htons(FW_FILTER_WR_RX_CHAN_V(0) | + FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id)); fwr->maci_to_matchtypem = - htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | - V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | - V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | - V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | - V_FW_FILTER_WR_PORT(f->fs.val.iport) | - V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | - V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | - V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); + htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) | + FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) | + FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) | + FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) | + FW_FILTER_WR_PORT_V(f->fs.val.iport) | + FW_FILTER_WR_PORTM_V(f->fs.mask.iport) | + FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) | + FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype)); fwr->ptcl = f->fs.val.proto; fwr->ptclm = f->fs.mask.proto; fwr->ttyp = f->fs.val.tos; @@ -1611,14 +1527,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) if (adapter->params.fw_vers) snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", - FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); + FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -2713,9 +2629,10 @@ static int set_rspq_intr_params(struct sge_rspq *q, new_idx = closest_thres(&adap->sge, cnt); if (q->desc && q->pktcnt_idx != new_idx) { /* the queue has already been created, update it */ - v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | - FW_PARAMS_PARAM_YZ(q->cntxt_id); + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ_V(q->cntxt_id); err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, &new_idx); if (err) @@ -2929,7 +2846,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) int ret; const struct firmware *fw; struct adapter *adap = netdev2adap(netdev); - unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1; + unsigned int mbox = PCIE_FW_MASTER_M + 1; ef->data[sizeof(ef->data) - 1] = '\0'; ret = request_firmware(&fw, ef->data, adap->pdev_dev); @@ -3006,21 +2923,35 @@ static u32 get_rss_table_size(struct net_device *dev) return pi->rss_size; } -static int get_rss_table(struct net_device *dev, u32 *p, u8 *key) +static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc) { const struct port_info *pi = netdev_priv(dev); unsigned int n = pi->rss_size; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; while (n--) p[n] = pi->rss[n]; return 0; } -static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key) +static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, + const u8 hfunc) { unsigned int i; struct port_info *pi = netdev_priv(dev); + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!p) + return 0; + for (i = 0; i < pi->rss_size; i++) pi->rss[i] = p[i]; if (pi->adapter->flags & FULL_INIT_DONE) @@ -3040,45 +2971,45 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, info->data = 0; switch (info->flow_type) { case TCP_V4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case UDP_V4_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) + if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case IPV4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case TCP_V6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case UDP_V6_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) + if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case IPV6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; } @@ -3123,102 +3054,14 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .flash_device = set_flash, }; -/* - * debugfs support - */ -static ssize_t mem_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) -{ - loff_t pos = *ppos; - loff_t avail = file_inode(file)->i_size; - unsigned int mem = (uintptr_t)file->private_data & 3; - struct adapter *adap = file->private_data - mem; - __be32 *data; - int ret; - - if (pos < 0) - return -EINVAL; - if (pos >= avail) - return 0; - if (count > avail - pos) - count = avail - pos; - - data = t4_alloc_mem(count); - if (!data) - return -ENOMEM; - - spin_lock(&adap->win0_lock); - ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); - spin_unlock(&adap->win0_lock); - if (ret) { - t4_free_mem(data); - return ret; - } - ret = copy_to_user(buf, data, count); - - t4_free_mem(data); - if (ret) - return -EFAULT; - - *ppos = pos + count; - return count; -} - -static const struct file_operations mem_debugfs_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = mem_read, - .llseek = default_llseek, -}; - -static void add_debugfs_mem(struct adapter *adap, const char *name, - unsigned int idx, unsigned int size_mb) -{ - struct dentry *de; - - de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, - (void *)adap + idx, &mem_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = size_mb << 20; -} - static int setup_debugfs(struct adapter *adap) { - int i; - u32 size; - if (IS_ERR_OR_NULL(adap->debugfs_root)) return -1; - i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); - if (i & EDRAM0_ENABLE) { - size = t4_read_reg(adap, MA_EDRAM0_BAR); - add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size)); - } - if (i & EDRAM1_ENABLE) { - size = t4_read_reg(adap, MA_EDRAM1_BAR); - add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); - } - if (is_t4(adap->params.chip)) { - size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); - if (i & EXT_MEM_ENABLE) - add_debugfs_mem(adap, "mc", MEM_MC, - EXT_MEM_SIZE_GET(size)); - } else { - if (i & EXT_MEM_ENABLE) { - size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); - add_debugfs_mem(adap, "mc0", MEM_MC0, - EXT_MEM_SIZE_GET(size)); - } - if (i & EXT_MEM1_ENABLE) { - size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR); - add_debugfs_mem(adap, "mc1", MEM_MC1, - EXT_MEM_SIZE_GET(size)); - } - } - if (adap->l2t) - debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, - &t4_l2t_fops); +#ifdef CONFIG_DEBUG_FS + t4_setup_debugfs(adap); +#endif return 0; } @@ -3500,9 +3343,9 @@ int cxgb4_clip_get(const struct net_device *dev, adap = netdev2adap(dev); memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); + c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c)); c.ip_hi = *(__be64 *)(lip->s6_addr); c.ip_lo = *(__be64 *)(lip->s6_addr + 8); return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); @@ -3517,9 +3360,9 @@ int cxgb4_clip_release(const struct net_device *dev, adap = netdev2adap(dev); memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); - c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); + c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c)); c.ip_hi = *(__be64 *)(lip->s6_addr); c.ip_lo = *(__be64 *)(lip->s6_addr + 8); return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); @@ -3560,7 +3403,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, req->local_ip = sip; req->peer_ip = htonl(0); chan = rxq_to_chan(&adap->sge, queue); - req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); req->opt1 = cpu_to_be64(CONN_POLICY_ASK | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); ret = t4_mgmt_tx(adap, skb); @@ -3603,7 +3446,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, req->peer_ip_hi = cpu_to_be64(0); req->peer_ip_lo = cpu_to_be64(0); chan = rxq_to_chan(&adap->sge, queue); - req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); req->opt1 = cpu_to_be64(CONN_POLICY_ASK | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); ret = t4_mgmt_tx(adap, skb); @@ -3885,7 +3728,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) { struct adapter *adap; u32 offset, memtype, memaddr; - u32 edc0_size, edc1_size, mc0_size, mc1_size; + u32 edc0_size, edc1_size, mc0_size, mc1_size, size; u32 edc0_end, edc1_end, mc0_end, mc1_end; int ret; @@ -3899,9 +3742,12 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) * and EDC1. Some cards will have neither MC0 nor MC1, most cards have * MC0, and some have both MC0 and MC1. */ - edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20; - edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20; - mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20; + size = t4_read_reg(adap, MA_EDRAM0_BAR_A); + edc0_size = EDRAM0_SIZE_G(size) << 20; + size = t4_read_reg(adap, MA_EDRAM1_BAR_A); + edc1_size = EDRAM1_SIZE_G(size) << 20; + size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + mc0_size = EXT_MEM0_SIZE_G(size) << 20; edc0_end = edc0_size; edc1_end = edc0_end + edc1_size; @@ -3921,9 +3767,8 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) /* T4 only has a single memory channel */ goto err; } else { - mc1_size = EXT_MEM_SIZE_GET( - t4_read_reg(adap, - MA_EXT_MEMORY1_BAR)) << 20; + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + mc1_size = EXT_MEM1_SIZE_G(size) << 20; mc1_end = mc0_end + mc1_size; if (offset < mc1_end) { memtype = MEM_MC1; @@ -3960,6 +3805,22 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev) } EXPORT_SYMBOL(cxgb4_read_sge_timestamp); +int cxgb4_bar2_sge_qregs(struct net_device *dev, + unsigned int qid, + enum cxgb4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + return t4_bar2_sge_qregs(netdev2adap(dev), + qid, + (qtype == CXGB4_BAR2_QTYPE_EGRESS + ? T4_BAR2_QTYPE_EGRESS + : T4_BAR2_QTYPE_INGRESS), + pbar2_qoffset, + pbar2_qid); +} +EXPORT_SYMBOL(cxgb4_bar2_sge_qregs); + static struct pci_driver cxgb4_driver; static void check_neigh_update(struct neighbour *neigh) @@ -4142,31 +4003,18 @@ static void process_db_drop(struct work_struct *work) u32 dropped_db = t4_read_reg(adap, 0x010ac); u16 qid = (dropped_db >> 15) & 0x1ffff; u16 pidx_inc = dropped_db & 0x1fff; - unsigned int s_qpp; - unsigned short udb_density; - unsigned long qpshift; - int page; - u32 udb; + u64 bar2_qoffset; + unsigned int bar2_qid; + int ret; - dev_warn(adap->pdev_dev, - "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n", - dropped_db, qid, - (dropped_db >> 14) & 1, - (dropped_db >> 13) & 1, - pidx_inc); - - drain_db_fifo(adap, 1); - - s_qpp = QUEUESPERPAGEPF1 * adap->fn; - udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap, - SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); - qpshift = PAGE_SHIFT - ilog2(udb_density); - udb = qid << qpshift; - udb &= PAGE_MASK; - page = udb / PAGE_SIZE; - udb += (qid - (page * udb_density)) * 128; - - writel(PIDX(pidx_inc), adap->bar2 + udb + 8); + ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, + &bar2_qoffset, &bar2_qid); + if (ret) + dev_err(adap->pdev_dev, "doorbell drop recovery: " + "qid=%d, pidx_inc=%d\n", qid, pidx_inc); + else + writel(PIDX_T5(pidx_inc) | QID(bar2_qid), + adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); /* Re-enable BAR2 WC */ t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); @@ -4224,12 +4072,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.adapter_type = adap->params.chip; lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; - lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( - t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> - (adap->fn * 4)); - lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( - t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> - (adap->fn * 4)); + lli.udb_density = 1 << adap->params.sge.eq_qpp; + lli.ucq_density = 1 << adap->params.sge.iq_qpp; lli.filt_mode = adap->params.tp.vlan_pri_map; /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ for (i = 0; i < NCHAN; i++) @@ -4391,8 +4235,7 @@ static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, if (cxgb4_netdev(event_dev)) { switch (event) { case NETDEV_UP: - ret = cxgb4_clip_get(event_dev, - (const struct in6_addr *)ifa->addr.s6_addr); + ret = cxgb4_clip_get(event_dev, &ifa->addr); if (ret < 0) { rcu_read_unlock(); return ret; @@ -4400,8 +4243,7 @@ static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, ret = NOTIFY_OK; break; case NETDEV_DOWN: - cxgb4_clip_release(event_dev, - (const struct in6_addr *)ifa->addr.s6_addr); + cxgb4_clip_release(event_dev, &ifa->addr); ret = NOTIFY_OK; break; default: @@ -4470,8 +4312,7 @@ static int update_dev_clip(struct net_device *root_dev, struct net_device *dev) read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { - ret = cxgb4_clip_get(dev, - (const struct in6_addr *)ifa->addr.s6_addr); + ret = cxgb4_clip_get(dev, &ifa->addr); if (ret < 0) break; } @@ -4952,14 +4793,14 @@ static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg) */ memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = - htonl(FW_CMD_OP(FW_LDST_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | - FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); + htonl(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE)); ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); - ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1); + ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1); ldst_cmd.u.pcie.ctrl_to_fn = - (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn)); + (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn)); ldst_cmd.u.pcie.r = reg; ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); @@ -5046,8 +4887,8 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) /* get device capabilities */ memset(c, 0, sizeof(*c)); - c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); + c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); if (ret < 0) @@ -5063,16 +4904,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) dev_err(adap->pdev_dev, "virtualization ACLs not supported"); return ret; } - c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); + c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); if (ret < 0) return ret; ret = t4_config_glbl_rss(adap, adap->fn, FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, - FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | - FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F); if (ret < 0) return ret; @@ -5233,8 +5074,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (cf->size >= FLASH_CFG_MAX_SIZE) ret = -ENOMEM; else { - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); ret = t4_query_params(adapter, adapter->mbox, adapter->fn, 0, 1, params, val); if (ret == 0) { @@ -5252,8 +5093,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) size_t size = cf->size & ~0x3; __be32 *data = (__be32 *)cf->data; - mtype = FW_PARAMS_PARAM_Y_GET(val[0]); - maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16; + mtype = FW_PARAMS_PARAM_Y_G(val[0]); + maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16; spin_lock(&adapter->win0_lock); ret = t4_memory_rw(adapter, 0, mtype, maddr, @@ -5290,13 +5131,13 @@ static int adap_init0_config(struct adapter *adapter, int reset) */ memset(&caps_cmd, 0, sizeof(caps_cmd)); caps_cmd.op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = - htonl(FW_CAPS_CONFIG_CMD_CFVALID | - FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | - FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | + htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | + FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | + FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -5310,9 +5151,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (ret == -ENOENT) { memset(&caps_cmd, 0, sizeof(caps_cmd)); caps_cmd.op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -5335,9 +5176,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) * And now tell the firmware to use the configuration we just loaded. */ caps_cmd.op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), NULL); @@ -5408,8 +5249,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) * Get device capabilities and select which we'll be using. */ memset(&caps_cmd, 0, sizeof(caps_cmd)); - caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); + caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -5425,8 +5266,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) dev_err(adapter->pdev_dev, "virtualization ACLs not supported"); goto bye; } - caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); + caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), NULL); if (ret < 0) @@ -5448,10 +5289,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) adapter->flags |= RSS_TNLALLLOOKUP; ret = t4_config_glbl_rss(adapter, adapter->mbox, FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, - FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | - FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | + FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F | ((adapter->flags & RSS_TNLALLLOOKUP) ? - FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0)); + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0)); if (ret < 0) goto bye; @@ -5462,7 +5303,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) PFRES_NEQ, PFRES_NETHCTRL, PFRES_NIQFLINT, PFRES_NIQ, PFRES_TC, PFRES_NVI, - FW_PFVF_CMD_CMASK_MASK, + FW_PFVF_CMD_CMASK_M, pfvfres_pmask(adapter, adapter->fn, 0), PFRES_NEXACTF, PFRES_R_CAPS, PFRES_WX_CAPS); @@ -5507,7 +5348,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) VFRES_NEQ, VFRES_NETHCTRL, VFRES_NIQFLINT, VFRES_NIQ, VFRES_TC, VFRES_NVI, - FW_PFVF_CMD_CMASK_MASK, + FW_PFVF_CMD_CMASK_M, pfvfres_pmask( adapter, pf, vf), VFRES_NEXACTF, @@ -5771,8 +5612,8 @@ static int adap_init0(struct adapter *adap) * and portvec ... */ v = - FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); if (ret < 0) goto bye; @@ -5794,7 +5635,6 @@ static int adap_init0(struct adapter *adap) } else { dev_info(adap->pdev_dev, "Coming up as MASTER: "\ "Initializing adapter\n"); - /* * If the firmware doesn't support Configuration * Files warn user and exit, @@ -5809,8 +5649,9 @@ static int adap_init0(struct adapter *adap) * Find out whether we're dealing with a version of * the firmware which has configuration file support. */ - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DEV_CF)); ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, params, val); @@ -5870,14 +5711,14 @@ static int adap_init0(struct adapter *adap) * Grab some of our basic fundamental operating parameters. */ #define FW_PARAM_DEV(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) #define FW_PARAM_PFVF(param) \ - FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ - FW_PARAMS_PARAM_Y(0) | \ - FW_PARAMS_PARAM_Z(0) + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ + FW_PARAMS_PARAM_Y_V(0) | \ + FW_PARAMS_PARAM_Z_V(0) params[0] = FW_PARAM_PFVF(EQ_START); params[1] = FW_PARAM_PFVF(L2T_START); @@ -5937,8 +5778,8 @@ static int adap_init0(struct adapter *adap) * to manage. */ memset(&caps_cmd, 0, sizeof(caps_cmd)); - caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); + caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -6084,6 +5925,7 @@ static int adap_init0(struct adapter *adap) t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, adap->params.b_wnd); } + t4_init_sge_params(adap); t4_init_tp_params(adap); adap->flags |= FW_OK; return 0; @@ -6610,6 +6452,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->tid_release_lock); + spin_lock_init(&adapter->win0_lock); INIT_WORK(&adapter->tid_release_task, process_tid_release_list); INIT_WORK(&adapter->db_full_task, process_db_full); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 1366ba620c87..152b4c4c7809 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -52,10 +52,10 @@ enum { }; #define INIT_TP_WR(w, tid) do { \ - (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \ - FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ - (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ - FW_WR_FLOWID(tid)); \ + (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | \ + FW_WR_IMMDLEN_V(sizeof(*w) - sizeof(w->wr))); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*w), 16)) | \ + FW_WR_FLOWID_V(tid)); \ (w)->wr.wr_lo = cpu_to_be64(0); \ } while (0) @@ -65,9 +65,10 @@ enum { } while (0) #define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ - (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \ - (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ - FW_WR_FLOWID(tid)); \ + (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | \ + FW_WR_ATOMIC_V(atomic)); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(wrlen, 16)) | \ + FW_WR_FLOWID_V(tid)); \ (w)->wr.wr_lo = cpu_to_be64(0); \ } while (0) @@ -304,4 +305,11 @@ void cxgb4_enable_db_coalescing(struct net_device *dev); int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte); u64 cxgb4_read_sge_timestamp(struct net_device *dev); +enum cxgb4_bar2_qtype { CXGB4_BAR2_QTYPE_EGRESS, CXGB4_BAR2_QTYPE_INGRESS }; +int cxgb4_bar2_sge_qregs(struct net_device *dev, + unsigned int qid, + enum cxgb4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + #endif /* !__CXGB4_OFLD_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 96041397ee15..a047baa9fd04 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -435,9 +435,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev, if (tp->vnic_shift >= 0) { u32 viid = cxgb4_port_viid(dev); - u32 vf = FW_VIID_VIN_GET(viid); - u32 pf = FW_VIID_PFN_GET(viid); - u32 vld = FW_VIID_VIVLD_GET(viid); + u32 vf = FW_VIID_VIN_G(viid); + u32 pf = FW_VIID_PFN_G(viid); + u32 vld = FW_VIID_VIVLD_G(viid); ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) | diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 5e1b314e11af..f12debd98dac 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -527,14 +527,16 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) val |= DBPRIO(1); wmb(); - /* If we're on T4, use the old doorbell mechanism; otherwise - * use the new BAR2 mechanism. + /* If we don't have access to the new User Doorbell (T5+), use + * the old doorbell mechanism; otherwise use the new BAR2 + * mechanism. */ - if (is_t4(adap->params.chip)) { + if (unlikely(q->bar2_addr == NULL)) { t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), val | QID(q->cntxt_id)); } else { - writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL); + writel(val | QID(q->bar2_qid), + q->bar2_addr + SGE_UDB_KDOORBELL); /* This Write memory Barrier will force the write to * the User Doorbell area to be flushed. @@ -576,7 +578,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, __be64 *d = &q->desc[q->pidx]; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; - gfp |= __GFP_NOWARN | __GFP_COLD; + gfp |= __GFP_NOWARN; if (s->fl_pg_order == 0) goto alloc_small_pages; @@ -585,7 +587,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, * Prefer large buffers */ while (n) { - pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order); + pg = __dev_alloc_pages(gfp, s->fl_pg_order); if (unlikely(!pg)) { q->large_alloc_failed++; break; /* fall back to single pages */ @@ -615,7 +617,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, alloc_small_pages: while (n--) { - pg = __skb_alloc_page(gfp, NULL); + pg = __dev_alloc_page(gfp); if (unlikely(!pg)) { q->alloc_failed++; break; @@ -816,7 +818,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, sgl->addr0 = cpu_to_be64(addr[1]); } - sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); if (likely(--nfrags == 0)) return; /* @@ -850,14 +852,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, *end = 0; } -/* This function copies a tx_desc struct to memory mapped BAR2 space(user space - * writes). For coalesced WR SGE, fetches data from the FIFO instead of from - * Host. +/* This function copies 64 byte coalesced work request to + * memory mapped BAR2 space. For coalesced WR SGE fetches + * data from the FIFO instead of from Host. */ -static void cxgb_pio_copy(u64 __iomem *dst, struct tx_desc *desc) +static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) { - int count = sizeof(*desc) / sizeof(u64); - u64 *src = (u64 *)desc; + int count = 8; while (count) { writeq(*src, dst); @@ -879,7 +880,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) { wmb(); /* write descriptors before telling HW */ - if (is_t4(adap->params.chip)) { + /* If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { u32 val = PIDX(n); unsigned long flags; @@ -905,21 +909,22 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) */ WARN_ON(val & DBPRIO(1)); - /* For T5 and later we use the Write-Combine mapped BAR2 User - * Doorbell mechanism. If we're only writing a single TX - * Descriptor and TX Write Combining hasn't been disabled, we - * can use the Write Combining Gather Buffer; otherwise we use - * the simple doorbell. + /* If we're only writing a single TX Descriptor and we can use + * Inferred QID registers, we can use the Write Combining + * Gather Buffer; otherwise we use the simple doorbell. */ - if (n == 1) { + if (n == 1 && q->bar2_qid == 0) { int index = (q->pidx ? (q->pidx - 1) : (q->size - 1)); + u64 *wr = (u64 *)&q->desc[index]; - cxgb_pio_copy(adap->bar2 + q->udb + SGE_UDB_WCDOORBELL, - q->desc + index); + cxgb_pio_copy((u64 __iomem *) + (q->bar2_addr + SGE_UDB_WCDOORBELL), + wr); } else { - writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL); + writel(val | QID(q->bar2_qid), + q->bar2_addr + SGE_UDB_KDOORBELL); } /* This Write Memory Barrier will force the write to the User @@ -1092,10 +1097,10 @@ out_free: dev_kfree_skb_any(skb); goto out_free; } - wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); if (unlikely(credits < ETHTXQ_STOP_THRES)) { eth_txq_stop(q); - wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } wr = (void *)&q->q.desc[q->q.pidx]; @@ -1112,8 +1117,8 @@ out_free: dev_kfree_skb_any(skb); int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; len += sizeof(*lso); - wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | - FW_WR_IMMDLEN(len)); + wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN_V(len)); lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | LSO_FIRST_SLICE | LSO_LAST_SLICE | LSO_IPV6(v6) | @@ -1135,8 +1140,8 @@ out_free: dev_kfree_skb_any(skb); q->tx_cso += ssi->gso_segs; } else { len += sizeof(*cpl); - wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | - FW_WR_IMMDLEN(len)); + wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN_V(len)); cpl = (void *)(wr + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) { cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; @@ -1224,7 +1229,7 @@ static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) { reclaim_completed_tx_imm(&q->q); if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { - wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); q->q.stops++; q->full = 1; } @@ -1406,7 +1411,7 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) { struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; - wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); q->q.stops++; q->full = 1; } @@ -1997,11 +2002,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) params = QINTR_TIMER_IDX(7); val = CIDXINC(work_done) | SEINTARM(params); - if (is_t4(q->adap->params.chip)) { + + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), val | INGRESSQID((u32)q->cntxt_id)); } else { - writel(val, q->adap->bar2 + q->udb + SGE_UDB_GTS); + writel(val | INGRESSQID(q->bar2_qid), + q->bar2_addr + SGE_UDB_GTS); wmb(); } return work_done; @@ -2047,11 +2057,16 @@ static unsigned int process_intrq(struct adapter *adap) } val = CIDXINC(credits) | SEINTARM(q->intr_params); - if (is_t4(adap->params.chip)) { + + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), val | INGRESSQID(q->cntxt_id)); } else { - writel(val, adap->bar2 + q->udb + SGE_UDB_GTS); + writel(val | INGRESSQID(q->bar2_qid), + q->bar2_addr + SGE_UDB_GTS); wmb(); } spin_unlock(&adap->sge.intrq_lock); @@ -2235,48 +2250,32 @@ static void sge_tx_timer_cb(unsigned long data) } /** - * udb_address - return the BAR2 User Doorbell address for a Queue - * @adap: the adapter - * @cntxt_id: the Queue Context ID - * @qpp: Queues Per Page (for all PFs) + * bar2_address - return the BAR2 address for an SGE Queue's Registers + * @adapter: the adapter + * @qid: the SGE Queue ID + * @qtype: the SGE Queue Type (Egress or Ingress) + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues * - * Returns the BAR2 address of the user Doorbell associated with the - * indicated Queue Context ID. Note that this is only applicable - * for T5 and later. - */ -static u64 udb_address(struct adapter *adap, unsigned int cntxt_id, - unsigned int qpp) -{ - u64 udb; - unsigned int s_qpp; - unsigned short udb_density; - unsigned long qpshift; - int page; - - BUG_ON(is_t4(adap->params.chip)); - - s_qpp = (QUEUESPERPAGEPF0 + - (QUEUESPERPAGEPF1 - QUEUESPERPAGEPF0) * adap->fn); - udb_density = 1 << ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); - qpshift = PAGE_SHIFT - ilog2(udb_density); - udb = (u64)cntxt_id << qpshift; - udb &= PAGE_MASK; - page = udb / PAGE_SIZE; - udb += (cntxt_id - (page * udb_density)) * SGE_UDB_SIZE; - - return udb; -} + * Returns the BAR2 address for the SGE Queue Registers associated with + * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also + * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE + * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" + * Registers are supported (e.g. the Write Combining Doorbell Buffer). + */ +static void __iomem *bar2_address(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + unsigned int *pbar2_qid) +{ + u64 bar2_qoffset; + int ret; -static u64 udb_address_eq(struct adapter *adap, unsigned int cntxt_id) -{ - return udb_address(adap, cntxt_id, - t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); -} + ret = t4_bar2_sge_qregs(adapter, qid, qtype, + &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; -static u64 udb_address_iq(struct adapter *adap, unsigned int cntxt_id) -{ - return udb_address(adap, cntxt_id, - t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); + return adapter->bar2 + bar2_qoffset; } int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, @@ -2297,20 +2296,20 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, return -ENOMEM; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | FW_LEN16(c)); - c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | - FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | - FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | - FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | + FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) | + FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : -intr_idx - 1)); - c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | - FW_IQ_CMD_IQGTSMODE | - FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | - FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); + c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | + FW_IQ_CMD_IQGTSMODE_F | + FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); c.iqsize = htons(iq->size); c.iqaddr = cpu_to_be64(iq->phys_addr); @@ -2323,12 +2322,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, goto fl_nomem; flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); - c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) | - FW_IQ_CMD_FL0FETCHRO(1) | - FW_IQ_CMD_FL0DATARO(1) | - FW_IQ_CMD_FL0PADEN(1)); - c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | - FW_IQ_CMD_FL0FBMAX(3)); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F | + FW_IQ_CMD_FL0FETCHRO_F | + FW_IQ_CMD_FL0DATARO_F | + FW_IQ_CMD_FL0PADEN_F); + c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) | + FW_IQ_CMD_FL0FBMAX_V(3)); c.fl0size = htons(flsz); c.fl0addr = cpu_to_be64(fl->addr); } @@ -2344,8 +2343,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->next_intr_params = iq->intr_params; iq->cntxt_id = ntohs(c.iqid); iq->abs_id = ntohs(c.physiqid); - if (!is_t4(adap->params.chip)) - iq->udb = udb_address_iq(adap, iq->cntxt_id); + iq->bar2_addr = bar2_address(adap, + iq->cntxt_id, + T4_BAR2_QTYPE_INGRESS, + &iq->bar2_qid); iq->size--; /* subtract status entry */ iq->netdev = dev; iq->handler = hnd; @@ -2362,11 +2363,13 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; - /* Note, we must initialize the Free List User Doorbell - * address before refilling the Free List! + /* Note, we must initialize the BAR2 Free List User Doorbell + * information before refilling the Free List! */ - if (!is_t4(adap->params.chip)) - fl->udb = udb_address_eq(adap, fl->cntxt_id); + fl->bar2_addr = bar2_address(adap, + fl->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &fl->bar2_qid); refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); } return 0; @@ -2392,9 +2395,10 @@ err: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) { q->cntxt_id = id; - if (!is_t4(adap->params.chip)) - q->udb = udb_address_eq(adap, q->cntxt_id); - + q->bar2_addr = bar2_address(adap, + q->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &q->bar2_qid); q->in_use = 0; q->cidx = q->pidx = 0; q->stops = q->restarts = 0; @@ -2423,21 +2427,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, return -ENOMEM; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | - FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); - c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE | - FW_EQ_ETH_CMD_VIID(pi->viid)); - c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | - FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_ETH_CMD_FETCHRO(1) | - FW_EQ_ETH_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | - FW_EQ_ETH_CMD_FBMAX(3) | - FW_EQ_ETH_CMD_CIDXFTHRESH(5) | - FW_EQ_ETH_CMD_EQSIZE(nentries)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_ETH_CMD_PFN_V(adap->fn) | + FW_EQ_ETH_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | + FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); + c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); + c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) | + FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_ETH_CMD_FETCHRO_V(1) | + FW_EQ_ETH_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) | + FW_EQ_ETH_CMD_FBMAX_V(3) | + FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) | + FW_EQ_ETH_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); @@ -2451,7 +2456,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, return ret; } - init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); + init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->txq = netdevq; txq->tso = txq->tx_cso = txq->vlan_ins = 0; txq->mapping_err = 0; @@ -2476,22 +2481,22 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, if (!txq->q.desc) return -ENOMEM; - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_CTRL_CMD_PFN(adap->fn) | - FW_EQ_CTRL_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | - FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); - c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_CTRL_CMD_PFN_V(adap->fn) | + FW_EQ_CTRL_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | + FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); c.physeqid_pkd = htonl(0); - c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | - FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_CTRL_CMD_FETCHRO | - FW_EQ_CTRL_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | - FW_EQ_CTRL_CMD_FBMAX(3) | - FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | - FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) | + FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_CTRL_CMD_FETCHRO_F | + FW_EQ_CTRL_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) | + FW_EQ_CTRL_CMD_FBMAX_V(3) | + FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) | + FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); @@ -2503,7 +2508,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, return ret; } - init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); + init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); txq->adap = adap; skb_queue_head_init(&txq->sendq); tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); @@ -2530,20 +2535,20 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, return -ENOMEM; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_OFLD_CMD_PFN(adap->fn) | - FW_EQ_OFLD_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | - FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); - c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | - FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_OFLD_CMD_FETCHRO(1) | - FW_EQ_OFLD_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | - FW_EQ_OFLD_CMD_FBMAX(3) | - FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | - FW_EQ_OFLD_CMD_EQSIZE(nentries)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_OFLD_CMD_PFN_V(adap->fn) | + FW_EQ_OFLD_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | + FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); + c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) | + FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_OFLD_CMD_FETCHRO_F | + FW_EQ_OFLD_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) | + FW_EQ_OFLD_CMD_FBMAX_V(3) | + FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) | + FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); @@ -2557,7 +2562,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, return ret; } - init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); + init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->adap = adap; skb_queue_head_init(&txq->sendq); tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); @@ -2914,7 +2919,8 @@ static int t4_sge_init_hard(struct adapter *adap) int t4_sge_init(struct adapter *adap) { struct sge *s = &adap->sge; - u32 sge_control, sge_conm_ctrl; + u32 sge_control, sge_control2, sge_conm_ctrl; + unsigned int ingpadboundary, ingpackboundary; int ret, egress_threshold; /* @@ -2924,8 +2930,31 @@ int t4_sge_init(struct adapter *adap) sge_control = t4_read_reg(adap, SGE_CONTROL); s->pktshift = PKTSHIFT_GET(sge_control); s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; - s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + - X_INGPADBOUNDARY_SHIFT); + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. + */ + ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) + + X_INGPADBOUNDARY_SHIFT); + if (is_t4(adap->params.chip)) { + s->fl_align = ingpadboundary; + } else { + /* T5 has a different interpretation of one of the PCIe Packing + * Boundary values. + */ + sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); + ingpackboundary = INGPACKBOUNDARY_G(sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + s->fl_align = max(ingpadboundary, ingpackboundary); + } if (adap->flags & USING_SOFT_PARAMS) ret = t4_sge_init_soft(adap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a9d9d74e4f09..a9323bdb3585 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -188,9 +188,9 @@ static void t4_report_fw_error(struct adapter *adap) u32 pcie_fw; pcie_fw = t4_read_reg(adap, MA_PCIE_FW); - if (pcie_fw & FW_PCIE_FW_ERR) + if (pcie_fw & PCIE_FW_ERR) dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", - reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]); + reason[PCIE_FW_EVAL_G(pcie_fw)]); } /* @@ -310,16 +310,17 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, } res = t4_read_reg64(adap, data_reg); - if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { + if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) { fw_asrt(adap, data_reg); - res = FW_CMD_RETVAL(EIO); - } else if (rpl) + res = FW_CMD_RETVAL_V(EIO); + } else if (rpl) { get_mbox_rpl(adap, rpl, size / 8, data_reg); + } - if (FW_CMD_RETVAL_GET((int)res)) + if (FW_CMD_RETVAL_G((int)res)) dump_mbox(adap, mbox, data_reg); t4_write_reg(adap, ctl_reg, 0); - return -FW_CMD_RETVAL_GET((int)res); + return -FW_CMD_RETVAL_G((int)res); } } @@ -483,12 +484,12 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, * MEM_MC0 = 2 -- For T5 * MEM_MC1 = 3 -- For T5 */ - edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)); + edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); if (mtype != MEM_MC1) memoffset = (mtype * (edc_size * 1024 * 1024)); else { - mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, - MA_EXT_MEMORY_BAR)); + mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, + MA_EXT_MEMORY1_BAR_A)); memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; } @@ -710,8 +711,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) * Ask firmware for the Core Clock since it knows how to translate the * Reference Clock ('V2') VPD field into a Core Clock value ... */ - cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); ret = t4_query_params(adapter, adapter->mbox, 0, 0, 1, &cclk_param, &cclk_val); @@ -992,10 +993,10 @@ static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, install: dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " "installing firmware %u.%u.%u.%u on card.\n", - FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), - FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason, - FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), - FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); return 1; } @@ -1067,12 +1068,12 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, "driver compiled with %d.%d.%d.%d, " "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", state, - FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d), - FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d), - FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), - FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), - FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), - FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); + FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), + FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); ret = EINVAL; goto bye; } @@ -1212,6 +1213,8 @@ out: if (ret) dev_err(adap->pdev_dev, "firmware download failed, error %d\n", ret); + else + ret = t4_get_fw_version(adap, &adap->params.fw_vers); return ret; } @@ -1236,7 +1239,7 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; - unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); + unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); lc->link_ok = 0; if (lc->requested_fc & PAUSE_RX) @@ -1245,9 +1248,9 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, fc |= FW_PORT_CAP_FC_TX; memset(&c, 0, sizeof(c)); - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); - c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); if (!(lc->supported & FW_PORT_CAP_ANEG)) { @@ -1275,9 +1278,9 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) struct fw_port_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); - c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); @@ -1563,7 +1566,7 @@ static void cim_intr_handler(struct adapter *adapter) int fat; - if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR) + if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR) t4_report_fw_error(adapter); fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, @@ -2071,9 +2074,9 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, struct fw_rss_ind_tbl_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE | - FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_RSS_IND_TBL_CMD_VIID_V(viid)); cmd.retval_len16 = htonl(FW_LEN16(cmd)); /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ @@ -2090,13 +2093,13 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, while (nq > 0) { unsigned int v; - v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); + v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp); if (++rsp >= rsp_end) rsp = rspq; - v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); + v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp); if (++rsp >= rsp_end) rsp = rspq; - v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); + v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp); if (++rsp >= rsp_end) rsp = rspq; @@ -2126,14 +2129,14 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, struct fw_rss_glb_config_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); + c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); c.retval_len16 = htonl(FW_LEN16(c)); if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { - c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode)); } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { c.u.basicvirtual.mode_pkd = - htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode)); c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); } else return -EINVAL; @@ -2553,18 +2556,18 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) { memset(wr, 0, sizeof(*wr)); - wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); - wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16)); - wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | - V_FW_FILTER_WR_NOREPLY(qid < 0)); - wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); + wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); + wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16)); + wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) | + FW_FILTER_WR_NOREPLY_V(qid < 0)); + wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F); if (qid >= 0) - wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); + wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid)); } #define INIT_CMD(var, cmd, rd_wr) do { \ - (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ - FW_CMD_REQUEST | FW_CMD_##rd_wr); \ + (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \ + FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \ (var).retval_len16 = htonl(FW_LEN16(var)); \ } while (0) @@ -2574,9 +2577,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.addrval.addr = htonl(addr); c.u.addrval.val = htonl(val); @@ -2602,11 +2605,11 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); - c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | - FW_LDST_CMD_MMD(mmd)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) | + FW_LDST_CMD_MMD_V(mmd)); c.u.mdio.raddr = htons(reg); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); @@ -2632,11 +2635,11 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); - c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | - FW_LDST_CMD_MMD(mmd)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) | + FW_LDST_CMD_MMD_V(mmd)); c.u.mdio.raddr = htons(reg); c.u.mdio.rval = htons(val); @@ -2773,13 +2776,13 @@ retry: memset(&c, 0, sizeof(c)); INIT_CMD(c, HELLO, WRITE); c.err_to_clearinit = htonl( - FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | - FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | - FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : - FW_HELLO_CMD_MBMASTER_MASK) | - FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | - FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | - FW_HELLO_CMD_CLEARINIT); + FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) | + FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox : + FW_HELLO_CMD_MBMASTER_M) | + FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) | + FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) | + FW_HELLO_CMD_CLEARINIT_F); /* * Issue the HELLO command to the firmware. If it's not successful @@ -2792,17 +2795,17 @@ retry: if (ret < 0) { if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) goto retry; - if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR) + if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR) t4_report_fw_error(adap); return ret; } v = ntohl(c.err_to_clearinit); - master_mbox = FW_HELLO_CMD_MBMASTER_GET(v); + master_mbox = FW_HELLO_CMD_MBMASTER_G(v); if (state) { - if (v & FW_HELLO_CMD_ERR) + if (v & FW_HELLO_CMD_ERR_F) *state = DEV_STATE_ERR; - else if (v & FW_HELLO_CMD_INIT) + else if (v & FW_HELLO_CMD_INIT_F) *state = DEV_STATE_INIT; else *state = DEV_STATE_UNINIT; @@ -2817,9 +2820,9 @@ retry: * and we wouldn't want to fail pointlessly. (This can happen when an * OS loads lots of different drivers rapidly at the same time). In * this case, the Master PF returned by the firmware will be - * FW_PCIE_FW_MASTER_MASK so the test below will work ... + * PCIE_FW_MASTER_M so the test below will work ... */ - if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 && + if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 && master_mbox != mbox) { int waiting = FW_CMD_HELLO_TIMEOUT; @@ -2843,7 +2846,7 @@ retry: * our retries ... */ pcie_fw = t4_read_reg(adap, MA_PCIE_FW); - if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) { + if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { if (waiting <= 0) { if (retries-- > 0) goto retry; @@ -2858,9 +2861,9 @@ retry: * report errors preferentially. */ if (state) { - if (pcie_fw & FW_PCIE_FW_ERR) + if (pcie_fw & PCIE_FW_ERR) *state = DEV_STATE_ERR; - else if (pcie_fw & FW_PCIE_FW_INIT) + else if (pcie_fw & PCIE_FW_INIT) *state = DEV_STATE_INIT; } @@ -2869,9 +2872,9 @@ retry: * there's not a valid Master PF, grab its identity * for our caller. */ - if (master_mbox == FW_PCIE_FW_MASTER_MASK && - (pcie_fw & FW_PCIE_FW_MASTER_VLD)) - master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw); + if (master_mbox == PCIE_FW_MASTER_M && + (pcie_fw & PCIE_FW_MASTER_VLD)) + master_mbox = PCIE_FW_MASTER_G(pcie_fw); break; } } @@ -2939,7 +2942,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) * Issues a RESET command to firmware (if desired) with a HALT indication * and then puts the microprocessor into RESET state. The RESET command * will only be issued if a legitimate mailbox is provided (mbox <= - * FW_PCIE_FW_MASTER_MASK). + * PCIE_FW_MASTER_M). * * This is generally used in order for the host to safely manipulate the * adapter without fear of conflicting with whatever the firmware might @@ -2954,13 +2957,13 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) * If a legitimate mailbox is provided, issue a RESET command * with a HALT indication. */ - if (mbox <= FW_PCIE_FW_MASTER_MASK) { + if (mbox <= PCIE_FW_MASTER_M) { struct fw_reset_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = htonl(PIORST | PIORSTMODE); - c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U)); + c.halt_pkd = htonl(FW_RESET_CMD_HALT_F); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -2979,8 +2982,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) */ if (ret == 0 || force) { t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); - t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, - FW_PCIE_FW_HALT); + t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, + PCIE_FW_HALT_F); } /* @@ -3019,7 +3022,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) * doing it automatically, we need to clear the PCIE_FW.HALT * bit. */ - t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0); + t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0); /* * If we've been given a valid mailbox, first try to get the @@ -3028,7 +3031,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) * valid mailbox or the RESET command failed, fall back to * hitting the chip with a hammer. */ - if (mbox <= FW_PCIE_FW_MASTER_MASK) { + if (mbox <= PCIE_FW_MASTER_M) { t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); msleep(100); if (t4_fw_reset(adap, mbox, @@ -3043,7 +3046,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { - if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT)) + if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F)) return 0; msleep(100); ms += 100; @@ -3129,12 +3132,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, HOSTPAGESIZEPF6(sge_hps) | HOSTPAGESIZEPF7(sge_hps)); - t4_set_reg_field(adap, SGE_CONTROL, - INGPADBOUNDARY_MASK | - EGRSTATUSPAGESIZE_MASK, - INGPADBOUNDARY(fl_align_log - 5) | - EGRSTATUSPAGESIZE(stat_len != 64)); - + if (is_t4(adap->params.chip)) { + t4_set_reg_field(adap, SGE_CONTROL, + INGPADBOUNDARY_MASK | + EGRSTATUSPAGESIZE_MASK, + INGPADBOUNDARY(fl_align_log - 5) | + EGRSTATUSPAGESIZE(stat_len != 64)); + } else { + /* T5 introduced the separation of the Free List Padding and + * Packing Boundaries. Thus, we can select a smaller Padding + * Boundary to avoid uselessly chewing up PCIe Link and Memory + * Bandwidth, and use a Packing Boundary which is large enough + * to avoid false sharing between CPUs, etc. + * + * For the PCI Link, the smaller the Padding Boundary the + * better. For the Memory Controller, a smaller Padding + * Boundary is better until we cross under the Memory Line + * Size (the minimum unit of transfer to/from Memory). If we + * have a Padding Boundary which is smaller than the Memory + * Line Size, that'll involve a Read-Modify-Write cycle on the + * Memory Controller which is never good. For T5 the smallest + * Padding Boundary which we can select is 32 bytes which is + * larger than any known Memory Controller Line Size so we'll + * use that. + * + * T5 has a different interpretation of the "0" value for the + * Packing Boundary. This corresponds to 16 bytes instead of + * the expected 32 bytes. We never have a Packing Boundary + * less than 32 bytes so we can't use that special value but + * on the other hand, if we wanted 32 bytes, the best we can + * really do is 64 bytes. + */ + if (fl_align <= 32) { + fl_align = 64; + fl_align_log = 6; + } + t4_set_reg_field(adap, SGE_CONTROL, + INGPADBOUNDARY_MASK | + EGRSTATUSPAGESIZE_MASK, + INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) | + EGRSTATUSPAGESIZE(stat_len != 64)); + t4_set_reg_field(adap, SGE_CONTROL2_A, + INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), + INGPACKBOUNDARY_V(fl_align_log - + INGPACKBOUNDARY_SHIFT_X)); + } /* * Adjust various SGE Free List Host Buffer Sizes. * @@ -3211,9 +3253,9 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | - FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); c.retval_len16 = htonl(FW_LEN16(c)); for (i = 0; i < nparams; i++, p += 2) *p = htonl(*params++); @@ -3251,10 +3293,10 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE | - FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); + c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); while (nparams--) { @@ -3289,9 +3331,9 @@ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); c.retval_len16 = htonl(FW_LEN16(c)); while (nparams--) { *p++ = htonl(*params++); @@ -3331,20 +3373,20 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_pfvf_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | - FW_PFVF_CMD_VFN(vf)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) | + FW_PFVF_CMD_VFN_V(vf)); c.retval_len16 = htonl(FW_LEN16(c)); - c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | - FW_PFVF_CMD_NIQ(rxq)); - c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | - FW_PFVF_CMD_PMASK(pmask) | - FW_PFVF_CMD_NEQ(txq)); - c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | - FW_PFVF_CMD_NEXACTF(nexact)); - c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | - FW_PFVF_CMD_WX_CAPS(wxcaps) | - FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); + c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) | + FW_PFVF_CMD_NIQ_V(rxq)); + c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) | + FW_PFVF_CMD_PMASK_V(pmask) | + FW_PFVF_CMD_NEQ_V(txq)); + c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) | + FW_PFVF_CMD_NEXACTF_V(nexact)); + c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) | + FW_PFVF_CMD_WX_CAPS_V(wxcaps) | + FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3373,11 +3415,11 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, struct fw_vi_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); - c.portid_pkd = FW_VI_CMD_PORTID(port); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c)); + c.portid_pkd = FW_VI_CMD_PORTID_V(port); c.nmac = nmac - 1; ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); @@ -3398,8 +3440,8 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, } } if (rss_size) - *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); - return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); + *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd)); + return FW_VI_CMD_VIID_G(ntohs(c.type_viid)); } /** @@ -3426,23 +3468,23 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, if (mtu < 0) mtu = FW_RXMODE_MTU_NO_CHG; if (promisc < 0) - promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; if (all_multi < 0) - all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; if (bcast < 0) - bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; if (vlanex < 0) - vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid)); c.retval_len16 = htonl(FW_LEN16(c)); - c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | - FW_VI_RXMODE_CMD_PROMISCEN(promisc) | - FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | - FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | - FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } @@ -3483,15 +3525,15 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16((naddr + 2) / 2)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) | + FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) | + FW_CMD_LEN16_V((naddr + 2) / 2)); for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); } @@ -3500,7 +3542,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, return ret; for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx)); if (idx) idx[i] = index >= max_naddr ? 0xffff : index; @@ -3546,17 +3588,17 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); - p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_SMAC_RESULT(mode) | - FW_VI_MAC_CMD_IDX(idx)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_SMAC_RESULT_V(mode) | + FW_VI_MAC_CMD_IDX_V(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { - ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx)); if (ret >= max_mac_addr) ret = -ENOMEM; } @@ -3580,11 +3622,11 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, struct fw_vi_mac_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | - FW_VI_MAC_CMD_HASHUNIEN(ucast) | - FW_CMD_LEN16(1)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F | + FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | + FW_CMD_LEN16_V(1)); c.u.hash.hashvec = cpu_to_be64(vec); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } @@ -3607,12 +3649,12 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid)); - c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | - FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) | - FW_VI_ENABLE_CMD_DCB_INFO(dcb_en)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) | + FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) | + FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en)); return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); } @@ -3647,9 +3689,9 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); - c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c)); c.blinkdur = htons(nblinks); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3674,11 +3716,11 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_iq_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | - FW_IQ_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); - c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) | + FW_IQ_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype)); c.iqid = htons(iqid); c.fl0id = htons(fl0id); c.fl1id = htons(fl1id); @@ -3701,11 +3743,11 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_eq_eth_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | - FW_EQ_ETH_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); - c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) | + FW_EQ_ETH_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3725,11 +3767,11 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_eq_ctrl_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | - FW_EQ_CTRL_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); - c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) | + FW_EQ_CTRL_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3749,11 +3791,11 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_eq_ofld_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | - FW_EQ_OFLD_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); - c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) | + FW_EQ_OFLD_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3771,25 +3813,25 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) if (opcode == FW_PORT_CMD) { /* link/module state change message */ int speed = 0, fc = 0; const struct fw_port_cmd *p = (void *)rpl; - int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); + int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid)); int port = adap->chan_map[chan]; struct port_info *pi = adap2pinfo(adap, port); struct link_config *lc = &pi->link_cfg; u32 stat = ntohl(p->u.info.lstatus_to_modtype); - int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; - u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); + int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; + u32 mod = FW_PORT_CMD_MODTYPE_G(stat); - if (stat & FW_PORT_CMD_RXPAUSE) + if (stat & FW_PORT_CMD_RXPAUSE_F) fc |= PAUSE_RX; - if (stat & FW_PORT_CMD_TXPAUSE) + if (stat & FW_PORT_CMD_TXPAUSE_F) fc |= PAUSE_TX; - if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) speed = 100; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) speed = 1000; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) speed = 10000; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) speed = 40000; if (link_ok != lc->link_ok || speed != lc->speed || @@ -3963,6 +4005,126 @@ int t4_prep_adapter(struct adapter *adapter) } /** + * t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.hps + 10; + page_size = 1 << page_shift; + + /* Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS + ? adapter->params.sge.eq_qpp + : adapter->params.sge.iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + +/** + * t4_init_sge_params - initialize adap->params.sge + * @adapter: the adapter + * + * Initialize various fields of the adapter's SGE Parameters structure. + */ +int t4_init_sge_params(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 hps, qpp; + unsigned int s_hps, s_qpp; + + /* Extract the SGE Page Size for our PF. + */ + hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE); + s_hps = (HOSTPAGESIZEPF0_S + + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn); + sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M); + + /* Extract the SGE Egress and Ingess Queues Per Page for our PF. + */ + s_qpp = (QUEUESPERPAGEPF0_S + + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn); + qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF); + sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); + qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF); + sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); + + return 0; +} + +/** * t4_init_tp_params - initialize adap->params.tp * @adap: the adapter * @@ -4082,11 +4244,11 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) while ((adap->params.portvec & (1 << j)) == 0) j++; - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | - FW_CMD_REQUEST | FW_CMD_READ | - FW_PORT_CMD_PORTID(j)); + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(j)); c.action_to_len16 = htonl( - FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | FW_LEN16(c)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) @@ -4104,13 +4266,13 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) adap->port[i]->dev_port = j; ret = ntohl(c.u.info.lstatus_to_modtype); - p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? - FW_PORT_CMD_MDIOADDR_GET(ret) : -1; - p->port_type = FW_PORT_CMD_PTYPE_GET(ret); + p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? + FW_PORT_CMD_MDIOADDR_G(ret) : -1; + p->port_type = FW_PORT_CMD_PTYPE_G(ret); p->mod_type = FW_PORT_MOD_TYPE_NA; - rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ | + rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); rvc.retval_len16 = htonl(FW_LEN16(rvc)); ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 5f4db2398c71..0f89f68948ab 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -205,16 +205,62 @@ struct work_request_hdr { #define WR_HDR struct work_request_hdr wr /* option 0 fields */ -#define S_MSS_IDX 60 -#define M_MSS_IDX 0xF -#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX) -#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX) +#define TX_CHAN_S 2 +#define TX_CHAN_V(x) ((x) << TX_CHAN_S) + +#define ULP_MODE_S 8 +#define ULP_MODE_V(x) ((x) << ULP_MODE_S) + +#define RCV_BUFSIZ_S 12 +#define RCV_BUFSIZ_M 0x3FFU +#define RCV_BUFSIZ_V(x) ((x) << RCV_BUFSIZ_S) + +#define SMAC_SEL_S 28 +#define SMAC_SEL_V(x) ((__u64)(x) << SMAC_SEL_S) + +#define L2T_IDX_S 36 +#define L2T_IDX_V(x) ((__u64)(x) << L2T_IDX_S) + +#define WND_SCALE_S 50 +#define WND_SCALE_V(x) ((__u64)(x) << WND_SCALE_S) + +#define KEEP_ALIVE_S 54 +#define KEEP_ALIVE_V(x) ((__u64)(x) << KEEP_ALIVE_S) +#define KEEP_ALIVE_F KEEP_ALIVE_V(1ULL) + +#define MSS_IDX_S 60 +#define MSS_IDX_M 0xF +#define MSS_IDX_V(x) ((__u64)(x) << MSS_IDX_S) +#define MSS_IDX_G(x) (((x) >> MSS_IDX_S) & MSS_IDX_M) /* option 2 fields */ -#define S_RSS_QUEUE 0 -#define M_RSS_QUEUE 0x3FF -#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE) -#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE) +#define RSS_QUEUE_S 0 +#define RSS_QUEUE_M 0x3FF +#define RSS_QUEUE_V(x) ((x) << RSS_QUEUE_S) +#define RSS_QUEUE_G(x) (((x) >> RSS_QUEUE_S) & RSS_QUEUE_M) + +#define RSS_QUEUE_VALID_S 10 +#define RSS_QUEUE_VALID_V(x) ((x) << RSS_QUEUE_VALID_S) +#define RSS_QUEUE_VALID_F RSS_QUEUE_VALID_V(1U) + +#define RX_FC_DISABLE_S 20 +#define RX_FC_DISABLE_V(x) ((x) << RX_FC_DISABLE_S) +#define RX_FC_DISABLE_F RX_FC_DISABLE_V(1U) + +#define RX_FC_VALID_S 22 +#define RX_FC_VALID_V(x) ((x) << RX_FC_VALID_S) +#define RX_FC_VALID_F RX_FC_VALID_V(1U) + +#define RX_CHANNEL_S 26 +#define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S) + +#define WND_SCALE_EN_S 28 +#define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S) +#define WND_SCALE_EN_F WND_SCALE_EN_V(1U) + +#define T5_OPT_2_VALID_S 31 +#define T5_OPT_2_VALID_V(x) ((x) << T5_OPT_2_VALID_S) +#define T5_OPT_2_VALID_F T5_OPT_2_VALID_V(1U) struct cpl_pass_open_req { WR_HDR; @@ -224,20 +270,11 @@ struct cpl_pass_open_req { __be32 local_ip; __be32 peer_ip; __be64 opt0; -#define TX_CHAN(x) ((x) << 2) #define NO_CONG(x) ((x) << 4) #define DELACK(x) ((x) << 5) -#define ULP_MODE(x) ((x) << 8) -#define RCV_BUFSIZ(x) ((x) << 12) -#define RCV_BUFSIZ_MASK 0x3FFU #define DSCP(x) ((x) << 22) -#define SMAC_SEL(x) ((u64)(x) << 28) -#define L2T_IDX(x) ((u64)(x) << 36) #define TCAM_BYPASS(x) ((u64)(x) << 48) #define NAGLE(x) ((u64)(x) << 49) -#define WND_SCALE(x) ((u64)(x) << 50) -#define KEEP_ALIVE(x) ((u64)(x) << 54) -#define MSS_IDX(x) ((u64)(x) << 60) __be64 opt1; #define SYN_RSS_ENABLE (1 << 0) #define SYN_RSS_QUEUE(x) ((x) << 2) @@ -267,20 +304,13 @@ struct cpl_pass_accept_rpl { WR_HDR; union opcode_tid ot; __be32 opt2; -#define RSS_QUEUE(x) ((x) << 0) -#define RSS_QUEUE_VALID (1 << 10) #define RX_COALESCE_VALID(x) ((x) << 11) #define RX_COALESCE(x) ((x) << 12) #define PACE(x) ((x) << 16) -#define RX_FC_VALID ((1U) << 19) -#define RX_FC_DISABLE ((1U) << 20) #define TX_QUEUE(x) ((x) << 23) -#define RX_CHANNEL(x) ((x) << 26) #define CCTRL_ECN(x) ((x) << 27) -#define WND_SCALE_EN(x) ((x) << 28) #define TSTAMPS_EN(x) ((x) << 29) #define SACK_EN(x) ((x) << 30) -#define T5_OPT_2_VALID ((1U) << 31) __be64 opt0; }; @@ -305,10 +335,10 @@ struct cpl_act_open_req { __be32 opt2; }; -#define S_FILTER_TUPLE 24 -#define M_FILTER_TUPLE 0xFFFFFFFFFF -#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE) -#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE) +#define FILTER_TUPLE_S 24 +#define FILTER_TUPLE_M 0xFFFFFFFFFF +#define FILTER_TUPLE_V(x) ((x) << FILTER_TUPLE_S) +#define FILTER_TUPLE_G(x) (((x) >> FILTER_TUPLE_S) & FILTER_TUPLE_M) struct cpl_t5_act_open_req { WR_HDR; union opcode_tid ot; @@ -579,10 +609,16 @@ struct cpl_rx_data_ack { WR_HDR; union opcode_tid ot; __be32 credit_dack; -#define RX_CREDITS(x) ((x) << 0) -#define RX_FORCE_ACK(x) ((x) << 28) }; +/* cpl_rx_data_ack.ack_seq fields */ +#define RX_CREDITS_S 0 +#define RX_CREDITS_V(x) ((x) << RX_CREDITS_S) + +#define RX_FORCE_ACK_S 28 +#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S) +#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U) + struct cpl_rx_pkt { struct rss_header rsshdr; u8 opcode; @@ -803,6 +839,9 @@ enum { ULP_TX_SC_ISGL = 0x83 }; +#define ULPTX_CMD_S 24 +#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S) + struct ulptx_sge_pair { __be32 len[2]; __be64 addr[2]; @@ -810,7 +849,6 @@ struct ulptx_sge_pair { struct ulptx_sgl { __be32 cmd_nsge; -#define ULPTX_CMD(x) ((x) << 24) #define ULPTX_NSGE(x) ((x) << 0) #define ULPTX_MORE (1U << 23) __be32 len0; @@ -821,15 +859,21 @@ struct ulptx_sgl { struct ulp_mem_io { WR_HDR; __be32 cmd; -#define ULP_MEMIO_ORDER(x) ((x) << 23) __be32 len16; /* command length */ __be32 dlen; /* data length in 32-byte units */ -#define ULP_MEMIO_DATA_LEN(x) ((x) << 0) __be32 lock_addr; -#define ULP_MEMIO_ADDR(x) ((x) << 0) #define ULP_MEMIO_LOCK(x) ((x) << 31) }; +/* additional ulp_mem_io.cmd fields */ +#define ULP_MEMIO_ORDER_S 23 +#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S) +#define ULP_MEMIO_ORDER_F ULP_MEMIO_ORDER_V(1U) + +#define T5_ULP_MEMIO_IMM_S 23 +#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S) +#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U) + #define S_T5_ULP_MEMIO_IMM 23 #define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM) #define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U) @@ -838,4 +882,12 @@ struct ulp_mem_io { #define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER) #define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U) +/* ulp_mem_io.lock_addr fields */ +#define ULP_MEMIO_ADDR_S 0 +#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S) + +/* ulp_mem_io.dlen fields */ +#define ULP_MEMIO_DATA_LEN_S 0 +#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S) + #endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h new file mode 100644 index 000000000000..9e4f95a91fb4 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -0,0 +1,160 @@ +/* + * This file is part of the Chelsio T4/T5 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __T4_PCI_ID_TBL_H__ +#define __T4_PCI_ID_TBL_H__ + +/* The code can defined cpp macros for creating a PCI Device ID Table. This is + * useful because it allows the PCI ID Table to be maintained in a single place. + * + * The macros are: + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + * -- Used to start the definition of the PCI ID Table. + * + * CH_PCI_DEVICE_ID_FUNCTION + * -- The PCI Function Number to use in the PCI Device ID Table. "0" + * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4, + * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc. + * + * CH_PCI_DEVICE_ID_FUNCTION2 [optional] + * -- If defined, create a PCI Device ID Table with both + * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated. + * + * CH_PCI_ID_TABLE_ENTRY(DeviceID) + * -- Used for the individual PCI Device ID entries. Note that we will + * -- be adding a trailing comma (",") after all of the entries (and + * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined). + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_END + * -- Used to finish the definition of the PCI ID Table. Note that we + * -- will be adding a trailing semi-colon (";") here. + */ +#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + +#ifndef CH_PCI_DEVICE_ID_FUNCTION +#error CH_PCI_DEVICE_ID_FUNCTION not defined! +#endif +#ifndef CH_PCI_ID_TABLE_ENTRY +#error CH_PCI_ID_TABLE_ENTRY not defined! +#endif +#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END +#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined! +#endif + +/* T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + * + * We use this consistency in order to create the proper PCI Device IDs + * for the specified CH_PCI_DEVICE_ID_FUNCTION. + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION2 +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)) +#else +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION2) << 8)) +#endif + +CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + /* T4 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x4000), /* T440-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x4001), /* T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4002), /* T422-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4003), /* T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4004), /* T420-bch */ + CH_PCI_ID_TABLE_FENTRY(0x4005), /* T440-bch */ + CH_PCI_ID_TABLE_FENTRY(0x4006), /* T440-ch */ + CH_PCI_ID_TABLE_FENTRY(0x4007), /* T420-so */ + CH_PCI_ID_TABLE_FENTRY(0x4008), /* T420-cx */ + CH_PCI_ID_TABLE_FENTRY(0x4009), /* T420-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400a), /* T404-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400b), /* B420-sr */ + CH_PCI_ID_TABLE_FENTRY(0x400c), /* B404-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400d), /* T480-cr */ + CH_PCI_ID_TABLE_FENTRY(0x400e), /* T440-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4080), /* Custom T480-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4081), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4082), /* Custom T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4083), /* Custom T420-xaui */ + CH_PCI_ID_TABLE_FENTRY(0x4084), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4085), /* Custom T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4086), /* Custom T440-bt */ + CH_PCI_ID_TABLE_FENTRY(0x4087), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4088), /* Custom T440 2-xaui, 2-xfi */ + + /* T5 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */ + CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */ + CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */ + CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */ + CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */ + CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */ + CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ + CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ +CH_PCI_DEVICE_ID_TABLE_DEFINE_END; + +#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */ + +#endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index a1024db5dc13..d7bd34ee65bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -95,6 +95,7 @@ #define X_INGPADBOUNDARY_SHIFT 5 #define SGE_CONTROL 0x1008 +#define SGE_CONTROL2_A 0x1124 #define DCASYSTYPE 0x00080000U #define RXPKTCPLMODE_MASK 0x00040000U #define RXPKTCPLMODE_SHIFT 18 @@ -106,6 +107,7 @@ #define PKTSHIFT_SHIFT 10 #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) #define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) +#define INGPCIEBOUNDARY_32B_X 0 #define INGPCIEBOUNDARY_MASK 0x00000380U #define INGPCIEBOUNDARY_SHIFT 7 #define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) @@ -114,6 +116,14 @@ #define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) #define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ >> INGPADBOUNDARY_SHIFT) +#define INGPACKBOUNDARY_16B_X 0 +#define INGPACKBOUNDARY_SHIFT_X 5 + +#define INGPACKBOUNDARY_S 16 +#define INGPACKBOUNDARY_M 0x7U +#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S) +#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \ + & INGPACKBOUNDARY_M) #define EGRPCIEBOUNDARY_MASK 0x0000000eU #define EGRPCIEBOUNDARY_SHIFT 1 #define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) @@ -145,17 +155,22 @@ #define HOSTPAGESIZEPF2_SHIFT 8 #define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT) -#define HOSTPAGESIZEPF1_MASK 0x0000000fU -#define HOSTPAGESIZEPF1_SHIFT 4 -#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_SHIFT) +#define HOSTPAGESIZEPF1_M 0x0000000fU +#define HOSTPAGESIZEPF1_S 4 +#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_S) -#define HOSTPAGESIZEPF0_MASK 0x0000000fU -#define HOSTPAGESIZEPF0_SHIFT 0 -#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) +#define HOSTPAGESIZEPF0_M 0x0000000fU +#define HOSTPAGESIZEPF0_S 0 +#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_S) #define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 -#define QUEUESPERPAGEPF0_MASK 0x0000000fU -#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) +#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014 + +#define QUEUESPERPAGEPF1_S 4 + +#define QUEUESPERPAGEPF0_S 0 +#define QUEUESPERPAGEPF0_MASK 0x0000000fU +#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) #define QUEUESPERPAGEPF0 0 #define QUEUESPERPAGEPF1 4 @@ -313,6 +328,7 @@ #define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc #define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8 #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 +#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8 #define S_HP_INT_THRESH 28 #define M_HP_INT_THRESH 0xfU @@ -501,21 +517,62 @@ #define MC_BIST_STATUS_RDATA 0x7688 -#define MA_EDRAM0_BAR 0x77c0 -#define MA_EDRAM1_BAR 0x77c4 -#define EDRAM_SIZE_MASK 0xfffU -#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK) +#define MA_EDRAM0_BAR_A 0x77c0 + +#define EDRAM0_SIZE_S 0 +#define EDRAM0_SIZE_M 0xfffU +#define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S) +#define EDRAM0_SIZE_G(x) (((x) >> EDRAM0_SIZE_S) & EDRAM0_SIZE_M) + +#define MA_EDRAM1_BAR_A 0x77c4 + +#define EDRAM1_SIZE_S 0 +#define EDRAM1_SIZE_M 0xfffU +#define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S) +#define EDRAM1_SIZE_G(x) (((x) >> EDRAM1_SIZE_S) & EDRAM1_SIZE_M) + +#define MA_EXT_MEMORY_BAR_A 0x77c8 + +#define EXT_MEM_SIZE_S 0 +#define EXT_MEM_SIZE_M 0xfffU +#define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S) +#define EXT_MEM_SIZE_G(x) (((x) >> EXT_MEM_SIZE_S) & EXT_MEM_SIZE_M) + +#define MA_EXT_MEMORY1_BAR_A 0x7808 + +#define EXT_MEM1_SIZE_S 0 +#define EXT_MEM1_SIZE_M 0xfffU +#define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S) +#define EXT_MEM1_SIZE_G(x) (((x) >> EXT_MEM1_SIZE_S) & EXT_MEM1_SIZE_M) + +#define MA_EXT_MEMORY0_BAR_A 0x77c8 + +#define EXT_MEM0_SIZE_S 0 +#define EXT_MEM0_SIZE_M 0xfffU +#define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S) +#define EXT_MEM0_SIZE_G(x) (((x) >> EXT_MEM0_SIZE_S) & EXT_MEM0_SIZE_M) + +#define MA_TARGET_MEM_ENABLE_A 0x77d8 + +#define EXT_MEM_ENABLE_S 2 +#define EXT_MEM_ENABLE_V(x) ((x) << EXT_MEM_ENABLE_S) +#define EXT_MEM_ENABLE_F EXT_MEM_ENABLE_V(1U) + +#define EDRAM1_ENABLE_S 1 +#define EDRAM1_ENABLE_V(x) ((x) << EDRAM1_ENABLE_S) +#define EDRAM1_ENABLE_F EDRAM1_ENABLE_V(1U) + +#define EDRAM0_ENABLE_S 0 +#define EDRAM0_ENABLE_V(x) ((x) << EDRAM0_ENABLE_S) +#define EDRAM0_ENABLE_F EDRAM0_ENABLE_V(1U) -#define MA_EXT_MEMORY_BAR 0x77c8 -#define EXT_MEM_SIZE_MASK 0x00000fffU -#define EXT_MEM_SIZE_SHIFT 0 -#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) +#define EXT_MEM1_ENABLE_S 4 +#define EXT_MEM1_ENABLE_V(x) ((x) << EXT_MEM1_ENABLE_S) +#define EXT_MEM1_ENABLE_F EXT_MEM1_ENABLE_V(1U) -#define MA_TARGET_MEM_ENABLE 0x77d8 -#define EXT_MEM1_ENABLE 0x00000010U -#define EXT_MEM_ENABLE 0x00000004U -#define EDRAM1_ENABLE 0x00000002U -#define EDRAM0_ENABLE 0x00000001U +#define EXT_MEM0_ENABLE_S 2 +#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S) +#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U) #define MA_INT_CAUSE 0x77e0 #define MEM_PERR_INT_CAUSE 0x00000002U @@ -532,7 +589,6 @@ #define MA_PARITY_ERROR_STATUS 0x77f4 #define MA_PARITY_ERROR_STATUS2 0x7804 -#define MA_EXT_MEMORY1_BAR 0x7808 #define EDC_0_BASE_ADDR 0x7900 #define EDC_BIST_CMD 0x7904 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 3409756a85b9..beaf80a6214b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -109,18 +109,49 @@ struct fw_wr_hdr { __be32 lo; }; -#define FW_WR_OP(x) ((x) << 24) -#define FW_WR_OP_GET(x) (((x) >> 24) & 0xff) -#define FW_WR_ATOMIC(x) ((x) << 23) -#define FW_WR_FLUSH(x) ((x) << 22) -#define FW_WR_COMPL(x) ((x) << 21) -#define FW_WR_IMMDLEN_MASK 0xff -#define FW_WR_IMMDLEN(x) ((x) << 0) - -#define FW_WR_EQUIQ (1U << 31) -#define FW_WR_EQUEQ (1U << 30) -#define FW_WR_FLOWID(x) ((x) << 8) -#define FW_WR_LEN16(x) ((x) << 0) +/* work request opcode (hi) */ +#define FW_WR_OP_S 24 +#define FW_WR_OP_M 0xff +#define FW_WR_OP_V(x) ((x) << FW_WR_OP_S) +#define FW_WR_OP_G(x) (((x) >> FW_WR_OP_S) & FW_WR_OP_M) + +/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER */ +#define FW_WR_ATOMIC_S 23 +#define FW_WR_ATOMIC_V(x) ((x) << FW_WR_ATOMIC_S) + +/* flush flag (hi) - firmware flushes flushable work request buffered + * in the flow context. + */ +#define FW_WR_FLUSH_S 22 +#define FW_WR_FLUSH_V(x) ((x) << FW_WR_FLUSH_S) + +/* completion flag (hi) - firmware generates a cpl_fw6_ack */ +#define FW_WR_COMPL_S 21 +#define FW_WR_COMPL_V(x) ((x) << FW_WR_COMPL_S) +#define FW_WR_COMPL_F FW_WR_COMPL_V(1U) + +/* work request immediate data length (hi) */ +#define FW_WR_IMMDLEN_S 0 +#define FW_WR_IMMDLEN_M 0xff +#define FW_WR_IMMDLEN_V(x) ((x) << FW_WR_IMMDLEN_S) + +/* egress queue status update to associated ingress queue entry (lo) */ +#define FW_WR_EQUIQ_S 31 +#define FW_WR_EQUIQ_V(x) ((x) << FW_WR_EQUIQ_S) +#define FW_WR_EQUIQ_F FW_WR_EQUIQ_V(1U) + +/* egress queue status update to egress queue status entry (lo) */ +#define FW_WR_EQUEQ_S 30 +#define FW_WR_EQUEQ_V(x) ((x) << FW_WR_EQUEQ_S) +#define FW_WR_EQUEQ_F FW_WR_EQUEQ_V(1U) + +/* flow context identifier (lo) */ +#define FW_WR_FLOWID_S 8 +#define FW_WR_FLOWID_V(x) ((x) << FW_WR_FLOWID_S) + +/* length in units of 16-bytes (lo) */ +#define FW_WR_LEN16_S 0 +#define FW_WR_LEN16_V(x) ((x) << FW_WR_LEN16_S) #define HW_TPL_FR_MT_PR_IV_P_FC 0X32B #define HW_TPL_FR_MT_PR_OV_P_FC 0X327 @@ -166,239 +197,239 @@ struct fw_filter_wr { __u8 sma[6]; }; -#define S_FW_FILTER_WR_TID 12 -#define M_FW_FILTER_WR_TID 0xfffff -#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID) -#define G_FW_FILTER_WR_TID(x) \ - (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID) - -#define S_FW_FILTER_WR_RQTYPE 11 -#define M_FW_FILTER_WR_RQTYPE 0x1 -#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE) -#define G_FW_FILTER_WR_RQTYPE(x) \ - (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE) -#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U) - -#define S_FW_FILTER_WR_NOREPLY 10 -#define M_FW_FILTER_WR_NOREPLY 0x1 -#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY) -#define G_FW_FILTER_WR_NOREPLY(x) \ - (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY) -#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U) - -#define S_FW_FILTER_WR_IQ 0 -#define M_FW_FILTER_WR_IQ 0x3ff -#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ) -#define G_FW_FILTER_WR_IQ(x) \ - (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ) - -#define S_FW_FILTER_WR_DEL_FILTER 31 -#define M_FW_FILTER_WR_DEL_FILTER 0x1 -#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER) -#define G_FW_FILTER_WR_DEL_FILTER(x) \ - (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER) -#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U) - -#define S_FW_FILTER_WR_RPTTID 25 -#define M_FW_FILTER_WR_RPTTID 0x1 -#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID) -#define G_FW_FILTER_WR_RPTTID(x) \ - (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID) -#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U) - -#define S_FW_FILTER_WR_DROP 24 -#define M_FW_FILTER_WR_DROP 0x1 -#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP) -#define G_FW_FILTER_WR_DROP(x) \ - (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP) -#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U) - -#define S_FW_FILTER_WR_DIRSTEER 23 -#define M_FW_FILTER_WR_DIRSTEER 0x1 -#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER) -#define G_FW_FILTER_WR_DIRSTEER(x) \ - (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER) -#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U) - -#define S_FW_FILTER_WR_MASKHASH 22 -#define M_FW_FILTER_WR_MASKHASH 0x1 -#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH) -#define G_FW_FILTER_WR_MASKHASH(x) \ - (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH) -#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U) - -#define S_FW_FILTER_WR_DIRSTEERHASH 21 -#define M_FW_FILTER_WR_DIRSTEERHASH 0x1 -#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH) -#define G_FW_FILTER_WR_DIRSTEERHASH(x) \ - (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH) -#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U) - -#define S_FW_FILTER_WR_LPBK 20 -#define M_FW_FILTER_WR_LPBK 0x1 -#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK) -#define G_FW_FILTER_WR_LPBK(x) \ - (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK) -#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U) - -#define S_FW_FILTER_WR_DMAC 19 -#define M_FW_FILTER_WR_DMAC 0x1 -#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC) -#define G_FW_FILTER_WR_DMAC(x) \ - (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC) -#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U) - -#define S_FW_FILTER_WR_SMAC 18 -#define M_FW_FILTER_WR_SMAC 0x1 -#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC) -#define G_FW_FILTER_WR_SMAC(x) \ - (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC) -#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U) - -#define S_FW_FILTER_WR_INSVLAN 17 -#define M_FW_FILTER_WR_INSVLAN 0x1 -#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN) -#define G_FW_FILTER_WR_INSVLAN(x) \ - (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN) -#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U) - -#define S_FW_FILTER_WR_RMVLAN 16 -#define M_FW_FILTER_WR_RMVLAN 0x1 -#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN) -#define G_FW_FILTER_WR_RMVLAN(x) \ - (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN) -#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U) - -#define S_FW_FILTER_WR_HITCNTS 15 -#define M_FW_FILTER_WR_HITCNTS 0x1 -#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS) -#define G_FW_FILTER_WR_HITCNTS(x) \ - (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS) -#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U) - -#define S_FW_FILTER_WR_TXCHAN 13 -#define M_FW_FILTER_WR_TXCHAN 0x3 -#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN) -#define G_FW_FILTER_WR_TXCHAN(x) \ - (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN) - -#define S_FW_FILTER_WR_PRIO 12 -#define M_FW_FILTER_WR_PRIO 0x1 -#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO) -#define G_FW_FILTER_WR_PRIO(x) \ - (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO) -#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U) - -#define S_FW_FILTER_WR_L2TIX 0 -#define M_FW_FILTER_WR_L2TIX 0xfff -#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX) -#define G_FW_FILTER_WR_L2TIX(x) \ - (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX) - -#define S_FW_FILTER_WR_FRAG 7 -#define M_FW_FILTER_WR_FRAG 0x1 -#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG) -#define G_FW_FILTER_WR_FRAG(x) \ - (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG) -#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U) - -#define S_FW_FILTER_WR_FRAGM 6 -#define M_FW_FILTER_WR_FRAGM 0x1 -#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM) -#define G_FW_FILTER_WR_FRAGM(x) \ - (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM) -#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U) - -#define S_FW_FILTER_WR_IVLAN_VLD 5 -#define M_FW_FILTER_WR_IVLAN_VLD 0x1 -#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD) -#define G_FW_FILTER_WR_IVLAN_VLD(x) \ - (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD) -#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U) - -#define S_FW_FILTER_WR_OVLAN_VLD 4 -#define M_FW_FILTER_WR_OVLAN_VLD 0x1 -#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD) -#define G_FW_FILTER_WR_OVLAN_VLD(x) \ - (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD) -#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U) - -#define S_FW_FILTER_WR_IVLAN_VLDM 3 -#define M_FW_FILTER_WR_IVLAN_VLDM 0x1 -#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM) -#define G_FW_FILTER_WR_IVLAN_VLDM(x) \ - (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM) -#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U) - -#define S_FW_FILTER_WR_OVLAN_VLDM 2 -#define M_FW_FILTER_WR_OVLAN_VLDM 0x1 -#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM) -#define G_FW_FILTER_WR_OVLAN_VLDM(x) \ - (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM) -#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U) - -#define S_FW_FILTER_WR_RX_CHAN 15 -#define M_FW_FILTER_WR_RX_CHAN 0x1 -#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN) -#define G_FW_FILTER_WR_RX_CHAN(x) \ - (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN) -#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U) - -#define S_FW_FILTER_WR_RX_RPL_IQ 0 -#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff -#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ) -#define G_FW_FILTER_WR_RX_RPL_IQ(x) \ - (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ) - -#define S_FW_FILTER_WR_MACI 23 -#define M_FW_FILTER_WR_MACI 0x1ff -#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI) -#define G_FW_FILTER_WR_MACI(x) \ - (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI) - -#define S_FW_FILTER_WR_MACIM 14 -#define M_FW_FILTER_WR_MACIM 0x1ff -#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM) -#define G_FW_FILTER_WR_MACIM(x) \ - (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM) - -#define S_FW_FILTER_WR_FCOE 13 -#define M_FW_FILTER_WR_FCOE 0x1 -#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE) -#define G_FW_FILTER_WR_FCOE(x) \ - (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE) -#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U) - -#define S_FW_FILTER_WR_FCOEM 12 -#define M_FW_FILTER_WR_FCOEM 0x1 -#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM) -#define G_FW_FILTER_WR_FCOEM(x) \ - (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM) -#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U) - -#define S_FW_FILTER_WR_PORT 9 -#define M_FW_FILTER_WR_PORT 0x7 -#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT) -#define G_FW_FILTER_WR_PORT(x) \ - (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT) - -#define S_FW_FILTER_WR_PORTM 6 -#define M_FW_FILTER_WR_PORTM 0x7 -#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM) -#define G_FW_FILTER_WR_PORTM(x) \ - (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM) - -#define S_FW_FILTER_WR_MATCHTYPE 3 -#define M_FW_FILTER_WR_MATCHTYPE 0x7 -#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE) -#define G_FW_FILTER_WR_MATCHTYPE(x) \ - (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE) - -#define S_FW_FILTER_WR_MATCHTYPEM 0 -#define M_FW_FILTER_WR_MATCHTYPEM 0x7 -#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM) -#define G_FW_FILTER_WR_MATCHTYPEM(x) \ - (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM) +#define FW_FILTER_WR_TID_S 12 +#define FW_FILTER_WR_TID_M 0xfffff +#define FW_FILTER_WR_TID_V(x) ((x) << FW_FILTER_WR_TID_S) +#define FW_FILTER_WR_TID_G(x) \ + (((x) >> FW_FILTER_WR_TID_S) & FW_FILTER_WR_TID_M) + +#define FW_FILTER_WR_RQTYPE_S 11 +#define FW_FILTER_WR_RQTYPE_M 0x1 +#define FW_FILTER_WR_RQTYPE_V(x) ((x) << FW_FILTER_WR_RQTYPE_S) +#define FW_FILTER_WR_RQTYPE_G(x) \ + (((x) >> FW_FILTER_WR_RQTYPE_S) & FW_FILTER_WR_RQTYPE_M) +#define FW_FILTER_WR_RQTYPE_F FW_FILTER_WR_RQTYPE_V(1U) + +#define FW_FILTER_WR_NOREPLY_S 10 +#define FW_FILTER_WR_NOREPLY_M 0x1 +#define FW_FILTER_WR_NOREPLY_V(x) ((x) << FW_FILTER_WR_NOREPLY_S) +#define FW_FILTER_WR_NOREPLY_G(x) \ + (((x) >> FW_FILTER_WR_NOREPLY_S) & FW_FILTER_WR_NOREPLY_M) +#define FW_FILTER_WR_NOREPLY_F FW_FILTER_WR_NOREPLY_V(1U) + +#define FW_FILTER_WR_IQ_S 0 +#define FW_FILTER_WR_IQ_M 0x3ff +#define FW_FILTER_WR_IQ_V(x) ((x) << FW_FILTER_WR_IQ_S) +#define FW_FILTER_WR_IQ_G(x) \ + (((x) >> FW_FILTER_WR_IQ_S) & FW_FILTER_WR_IQ_M) + +#define FW_FILTER_WR_DEL_FILTER_S 31 +#define FW_FILTER_WR_DEL_FILTER_M 0x1 +#define FW_FILTER_WR_DEL_FILTER_V(x) ((x) << FW_FILTER_WR_DEL_FILTER_S) +#define FW_FILTER_WR_DEL_FILTER_G(x) \ + (((x) >> FW_FILTER_WR_DEL_FILTER_S) & FW_FILTER_WR_DEL_FILTER_M) +#define FW_FILTER_WR_DEL_FILTER_F FW_FILTER_WR_DEL_FILTER_V(1U) + +#define FW_FILTER_WR_RPTTID_S 25 +#define FW_FILTER_WR_RPTTID_M 0x1 +#define FW_FILTER_WR_RPTTID_V(x) ((x) << FW_FILTER_WR_RPTTID_S) +#define FW_FILTER_WR_RPTTID_G(x) \ + (((x) >> FW_FILTER_WR_RPTTID_S) & FW_FILTER_WR_RPTTID_M) +#define FW_FILTER_WR_RPTTID_F FW_FILTER_WR_RPTTID_V(1U) + +#define FW_FILTER_WR_DROP_S 24 +#define FW_FILTER_WR_DROP_M 0x1 +#define FW_FILTER_WR_DROP_V(x) ((x) << FW_FILTER_WR_DROP_S) +#define FW_FILTER_WR_DROP_G(x) \ + (((x) >> FW_FILTER_WR_DROP_S) & FW_FILTER_WR_DROP_M) +#define FW_FILTER_WR_DROP_F FW_FILTER_WR_DROP_V(1U) + +#define FW_FILTER_WR_DIRSTEER_S 23 +#define FW_FILTER_WR_DIRSTEER_M 0x1 +#define FW_FILTER_WR_DIRSTEER_V(x) ((x) << FW_FILTER_WR_DIRSTEER_S) +#define FW_FILTER_WR_DIRSTEER_G(x) \ + (((x) >> FW_FILTER_WR_DIRSTEER_S) & FW_FILTER_WR_DIRSTEER_M) +#define FW_FILTER_WR_DIRSTEER_F FW_FILTER_WR_DIRSTEER_V(1U) + +#define FW_FILTER_WR_MASKHASH_S 22 +#define FW_FILTER_WR_MASKHASH_M 0x1 +#define FW_FILTER_WR_MASKHASH_V(x) ((x) << FW_FILTER_WR_MASKHASH_S) +#define FW_FILTER_WR_MASKHASH_G(x) \ + (((x) >> FW_FILTER_WR_MASKHASH_S) & FW_FILTER_WR_MASKHASH_M) +#define FW_FILTER_WR_MASKHASH_F FW_FILTER_WR_MASKHASH_V(1U) + +#define FW_FILTER_WR_DIRSTEERHASH_S 21 +#define FW_FILTER_WR_DIRSTEERHASH_M 0x1 +#define FW_FILTER_WR_DIRSTEERHASH_V(x) ((x) << FW_FILTER_WR_DIRSTEERHASH_S) +#define FW_FILTER_WR_DIRSTEERHASH_G(x) \ + (((x) >> FW_FILTER_WR_DIRSTEERHASH_S) & FW_FILTER_WR_DIRSTEERHASH_M) +#define FW_FILTER_WR_DIRSTEERHASH_F FW_FILTER_WR_DIRSTEERHASH_V(1U) + +#define FW_FILTER_WR_LPBK_S 20 +#define FW_FILTER_WR_LPBK_M 0x1 +#define FW_FILTER_WR_LPBK_V(x) ((x) << FW_FILTER_WR_LPBK_S) +#define FW_FILTER_WR_LPBK_G(x) \ + (((x) >> FW_FILTER_WR_LPBK_S) & FW_FILTER_WR_LPBK_M) +#define FW_FILTER_WR_LPBK_F FW_FILTER_WR_LPBK_V(1U) + +#define FW_FILTER_WR_DMAC_S 19 +#define FW_FILTER_WR_DMAC_M 0x1 +#define FW_FILTER_WR_DMAC_V(x) ((x) << FW_FILTER_WR_DMAC_S) +#define FW_FILTER_WR_DMAC_G(x) \ + (((x) >> FW_FILTER_WR_DMAC_S) & FW_FILTER_WR_DMAC_M) +#define FW_FILTER_WR_DMAC_F FW_FILTER_WR_DMAC_V(1U) + +#define FW_FILTER_WR_SMAC_S 18 +#define FW_FILTER_WR_SMAC_M 0x1 +#define FW_FILTER_WR_SMAC_V(x) ((x) << FW_FILTER_WR_SMAC_S) +#define FW_FILTER_WR_SMAC_G(x) \ + (((x) >> FW_FILTER_WR_SMAC_S) & FW_FILTER_WR_SMAC_M) +#define FW_FILTER_WR_SMAC_F FW_FILTER_WR_SMAC_V(1U) + +#define FW_FILTER_WR_INSVLAN_S 17 +#define FW_FILTER_WR_INSVLAN_M 0x1 +#define FW_FILTER_WR_INSVLAN_V(x) ((x) << FW_FILTER_WR_INSVLAN_S) +#define FW_FILTER_WR_INSVLAN_G(x) \ + (((x) >> FW_FILTER_WR_INSVLAN_S) & FW_FILTER_WR_INSVLAN_M) +#define FW_FILTER_WR_INSVLAN_F FW_FILTER_WR_INSVLAN_V(1U) + +#define FW_FILTER_WR_RMVLAN_S 16 +#define FW_FILTER_WR_RMVLAN_M 0x1 +#define FW_FILTER_WR_RMVLAN_V(x) ((x) << FW_FILTER_WR_RMVLAN_S) +#define FW_FILTER_WR_RMVLAN_G(x) \ + (((x) >> FW_FILTER_WR_RMVLAN_S) & FW_FILTER_WR_RMVLAN_M) +#define FW_FILTER_WR_RMVLAN_F FW_FILTER_WR_RMVLAN_V(1U) + +#define FW_FILTER_WR_HITCNTS_S 15 +#define FW_FILTER_WR_HITCNTS_M 0x1 +#define FW_FILTER_WR_HITCNTS_V(x) ((x) << FW_FILTER_WR_HITCNTS_S) +#define FW_FILTER_WR_HITCNTS_G(x) \ + (((x) >> FW_FILTER_WR_HITCNTS_S) & FW_FILTER_WR_HITCNTS_M) +#define FW_FILTER_WR_HITCNTS_F FW_FILTER_WR_HITCNTS_V(1U) + +#define FW_FILTER_WR_TXCHAN_S 13 +#define FW_FILTER_WR_TXCHAN_M 0x3 +#define FW_FILTER_WR_TXCHAN_V(x) ((x) << FW_FILTER_WR_TXCHAN_S) +#define FW_FILTER_WR_TXCHAN_G(x) \ + (((x) >> FW_FILTER_WR_TXCHAN_S) & FW_FILTER_WR_TXCHAN_M) + +#define FW_FILTER_WR_PRIO_S 12 +#define FW_FILTER_WR_PRIO_M 0x1 +#define FW_FILTER_WR_PRIO_V(x) ((x) << FW_FILTER_WR_PRIO_S) +#define FW_FILTER_WR_PRIO_G(x) \ + (((x) >> FW_FILTER_WR_PRIO_S) & FW_FILTER_WR_PRIO_M) +#define FW_FILTER_WR_PRIO_F FW_FILTER_WR_PRIO_V(1U) + +#define FW_FILTER_WR_L2TIX_S 0 +#define FW_FILTER_WR_L2TIX_M 0xfff +#define FW_FILTER_WR_L2TIX_V(x) ((x) << FW_FILTER_WR_L2TIX_S) +#define FW_FILTER_WR_L2TIX_G(x) \ + (((x) >> FW_FILTER_WR_L2TIX_S) & FW_FILTER_WR_L2TIX_M) + +#define FW_FILTER_WR_FRAG_S 7 +#define FW_FILTER_WR_FRAG_M 0x1 +#define FW_FILTER_WR_FRAG_V(x) ((x) << FW_FILTER_WR_FRAG_S) +#define FW_FILTER_WR_FRAG_G(x) \ + (((x) >> FW_FILTER_WR_FRAG_S) & FW_FILTER_WR_FRAG_M) +#define FW_FILTER_WR_FRAG_F FW_FILTER_WR_FRAG_V(1U) + +#define FW_FILTER_WR_FRAGM_S 6 +#define FW_FILTER_WR_FRAGM_M 0x1 +#define FW_FILTER_WR_FRAGM_V(x) ((x) << FW_FILTER_WR_FRAGM_S) +#define FW_FILTER_WR_FRAGM_G(x) \ + (((x) >> FW_FILTER_WR_FRAGM_S) & FW_FILTER_WR_FRAGM_M) +#define FW_FILTER_WR_FRAGM_F FW_FILTER_WR_FRAGM_V(1U) + +#define FW_FILTER_WR_IVLAN_VLD_S 5 +#define FW_FILTER_WR_IVLAN_VLD_M 0x1 +#define FW_FILTER_WR_IVLAN_VLD_V(x) ((x) << FW_FILTER_WR_IVLAN_VLD_S) +#define FW_FILTER_WR_IVLAN_VLD_G(x) \ + (((x) >> FW_FILTER_WR_IVLAN_VLD_S) & FW_FILTER_WR_IVLAN_VLD_M) +#define FW_FILTER_WR_IVLAN_VLD_F FW_FILTER_WR_IVLAN_VLD_V(1U) + +#define FW_FILTER_WR_OVLAN_VLD_S 4 +#define FW_FILTER_WR_OVLAN_VLD_M 0x1 +#define FW_FILTER_WR_OVLAN_VLD_V(x) ((x) << FW_FILTER_WR_OVLAN_VLD_S) +#define FW_FILTER_WR_OVLAN_VLD_G(x) \ + (((x) >> FW_FILTER_WR_OVLAN_VLD_S) & FW_FILTER_WR_OVLAN_VLD_M) +#define FW_FILTER_WR_OVLAN_VLD_F FW_FILTER_WR_OVLAN_VLD_V(1U) + +#define FW_FILTER_WR_IVLAN_VLDM_S 3 +#define FW_FILTER_WR_IVLAN_VLDM_M 0x1 +#define FW_FILTER_WR_IVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_IVLAN_VLDM_S) +#define FW_FILTER_WR_IVLAN_VLDM_G(x) \ + (((x) >> FW_FILTER_WR_IVLAN_VLDM_S) & FW_FILTER_WR_IVLAN_VLDM_M) +#define FW_FILTER_WR_IVLAN_VLDM_F FW_FILTER_WR_IVLAN_VLDM_V(1U) + +#define FW_FILTER_WR_OVLAN_VLDM_S 2 +#define FW_FILTER_WR_OVLAN_VLDM_M 0x1 +#define FW_FILTER_WR_OVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_OVLAN_VLDM_S) +#define FW_FILTER_WR_OVLAN_VLDM_G(x) \ + (((x) >> FW_FILTER_WR_OVLAN_VLDM_S) & FW_FILTER_WR_OVLAN_VLDM_M) +#define FW_FILTER_WR_OVLAN_VLDM_F FW_FILTER_WR_OVLAN_VLDM_V(1U) + +#define FW_FILTER_WR_RX_CHAN_S 15 +#define FW_FILTER_WR_RX_CHAN_M 0x1 +#define FW_FILTER_WR_RX_CHAN_V(x) ((x) << FW_FILTER_WR_RX_CHAN_S) +#define FW_FILTER_WR_RX_CHAN_G(x) \ + (((x) >> FW_FILTER_WR_RX_CHAN_S) & FW_FILTER_WR_RX_CHAN_M) +#define FW_FILTER_WR_RX_CHAN_F FW_FILTER_WR_RX_CHAN_V(1U) + +#define FW_FILTER_WR_RX_RPL_IQ_S 0 +#define FW_FILTER_WR_RX_RPL_IQ_M 0x3ff +#define FW_FILTER_WR_RX_RPL_IQ_V(x) ((x) << FW_FILTER_WR_RX_RPL_IQ_S) +#define FW_FILTER_WR_RX_RPL_IQ_G(x) \ + (((x) >> FW_FILTER_WR_RX_RPL_IQ_S) & FW_FILTER_WR_RX_RPL_IQ_M) + +#define FW_FILTER_WR_MACI_S 23 +#define FW_FILTER_WR_MACI_M 0x1ff +#define FW_FILTER_WR_MACI_V(x) ((x) << FW_FILTER_WR_MACI_S) +#define FW_FILTER_WR_MACI_G(x) \ + (((x) >> FW_FILTER_WR_MACI_S) & FW_FILTER_WR_MACI_M) + +#define FW_FILTER_WR_MACIM_S 14 +#define FW_FILTER_WR_MACIM_M 0x1ff +#define FW_FILTER_WR_MACIM_V(x) ((x) << FW_FILTER_WR_MACIM_S) +#define FW_FILTER_WR_MACIM_G(x) \ + (((x) >> FW_FILTER_WR_MACIM_S) & FW_FILTER_WR_MACIM_M) + +#define FW_FILTER_WR_FCOE_S 13 +#define FW_FILTER_WR_FCOE_M 0x1 +#define FW_FILTER_WR_FCOE_V(x) ((x) << FW_FILTER_WR_FCOE_S) +#define FW_FILTER_WR_FCOE_G(x) \ + (((x) >> FW_FILTER_WR_FCOE_S) & FW_FILTER_WR_FCOE_M) +#define FW_FILTER_WR_FCOE_F FW_FILTER_WR_FCOE_V(1U) + +#define FW_FILTER_WR_FCOEM_S 12 +#define FW_FILTER_WR_FCOEM_M 0x1 +#define FW_FILTER_WR_FCOEM_V(x) ((x) << FW_FILTER_WR_FCOEM_S) +#define FW_FILTER_WR_FCOEM_G(x) \ + (((x) >> FW_FILTER_WR_FCOEM_S) & FW_FILTER_WR_FCOEM_M) +#define FW_FILTER_WR_FCOEM_F FW_FILTER_WR_FCOEM_V(1U) + +#define FW_FILTER_WR_PORT_S 9 +#define FW_FILTER_WR_PORT_M 0x7 +#define FW_FILTER_WR_PORT_V(x) ((x) << FW_FILTER_WR_PORT_S) +#define FW_FILTER_WR_PORT_G(x) \ + (((x) >> FW_FILTER_WR_PORT_S) & FW_FILTER_WR_PORT_M) + +#define FW_FILTER_WR_PORTM_S 6 +#define FW_FILTER_WR_PORTM_M 0x7 +#define FW_FILTER_WR_PORTM_V(x) ((x) << FW_FILTER_WR_PORTM_S) +#define FW_FILTER_WR_PORTM_G(x) \ + (((x) >> FW_FILTER_WR_PORTM_S) & FW_FILTER_WR_PORTM_M) + +#define FW_FILTER_WR_MATCHTYPE_S 3 +#define FW_FILTER_WR_MATCHTYPE_M 0x7 +#define FW_FILTER_WR_MATCHTYPE_V(x) ((x) << FW_FILTER_WR_MATCHTYPE_S) +#define FW_FILTER_WR_MATCHTYPE_G(x) \ + (((x) >> FW_FILTER_WR_MATCHTYPE_S) & FW_FILTER_WR_MATCHTYPE_M) + +#define FW_FILTER_WR_MATCHTYPEM_S 0 +#define FW_FILTER_WR_MATCHTYPEM_M 0x7 +#define FW_FILTER_WR_MATCHTYPEM_V(x) ((x) << FW_FILTER_WR_MATCHTYPEM_S) +#define FW_FILTER_WR_MATCHTYPEM_G(x) \ + (((x) >> FW_FILTER_WR_MATCHTYPEM_S) & FW_FILTER_WR_MATCHTYPEM_M) struct fw_ulptx_wr { __be32 op_to_compl; @@ -460,65 +491,65 @@ struct fw_ofld_connection_wr { } tcb; }; -#define S_FW_OFLD_CONNECTION_WR_VERSION 31 -#define M_FW_OFLD_CONNECTION_WR_VERSION 0x1 -#define V_FW_OFLD_CONNECTION_WR_VERSION(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_VERSION) -#define G_FW_OFLD_CONNECTION_WR_VERSION(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \ - M_FW_OFLD_CONNECTION_WR_VERSION) -#define F_FW_OFLD_CONNECTION_WR_VERSION \ - V_FW_OFLD_CONNECTION_WR_VERSION(1U) - -#define S_FW_OFLD_CONNECTION_WR_CPL 30 -#define M_FW_OFLD_CONNECTION_WR_CPL 0x1 -#define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL) -#define G_FW_OFLD_CONNECTION_WR_CPL(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL) -#define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U) - -#define S_FW_OFLD_CONNECTION_WR_T_STATE 28 -#define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf -#define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE) -#define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \ - M_FW_OFLD_CONNECTION_WR_T_STATE) - -#define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24 -#define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf -#define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE) -#define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \ - M_FW_OFLD_CONNECTION_WR_RCV_SCALE) - -#define S_FW_OFLD_CONNECTION_WR_ASTID 0 -#define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff -#define V_FW_OFLD_CONNECTION_WR_ASTID(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_ASTID) -#define G_FW_OFLD_CONNECTION_WR_ASTID(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID) - -#define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15 -#define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1 -#define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) -#define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \ - M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) -#define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \ - V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U) - -#define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14 -#define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1 -#define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) -#define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \ - M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) -#define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \ - V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U) +#define FW_OFLD_CONNECTION_WR_VERSION_S 31 +#define FW_OFLD_CONNECTION_WR_VERSION_M 0x1 +#define FW_OFLD_CONNECTION_WR_VERSION_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_VERSION_S) +#define FW_OFLD_CONNECTION_WR_VERSION_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_VERSION_S) & \ + FW_OFLD_CONNECTION_WR_VERSION_M) +#define FW_OFLD_CONNECTION_WR_VERSION_F \ + FW_OFLD_CONNECTION_WR_VERSION_V(1U) + +#define FW_OFLD_CONNECTION_WR_CPL_S 30 +#define FW_OFLD_CONNECTION_WR_CPL_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPL_V(x) ((x) << FW_OFLD_CONNECTION_WR_CPL_S) +#define FW_OFLD_CONNECTION_WR_CPL_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPL_S) & FW_OFLD_CONNECTION_WR_CPL_M) +#define FW_OFLD_CONNECTION_WR_CPL_F FW_OFLD_CONNECTION_WR_CPL_V(1U) + +#define FW_OFLD_CONNECTION_WR_T_STATE_S 28 +#define FW_OFLD_CONNECTION_WR_T_STATE_M 0xf +#define FW_OFLD_CONNECTION_WR_T_STATE_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_T_STATE_S) +#define FW_OFLD_CONNECTION_WR_T_STATE_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_T_STATE_S) & \ + FW_OFLD_CONNECTION_WR_T_STATE_M) + +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_S 24 +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_M 0xf +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_RCV_SCALE_S) +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_RCV_SCALE_S) & \ + FW_OFLD_CONNECTION_WR_RCV_SCALE_M) + +#define FW_OFLD_CONNECTION_WR_ASTID_S 0 +#define FW_OFLD_CONNECTION_WR_ASTID_M 0xffffff +#define FW_OFLD_CONNECTION_WR_ASTID_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_ASTID_S) +#define FW_OFLD_CONNECTION_WR_ASTID_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_ASTID_S) & FW_OFLD_CONNECTION_WR_ASTID_M) + +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S 15 +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) & \ + FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M) +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F \ + FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(1U) + +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S 14 +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) & \ + FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M) +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_F \ + FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(1U) enum fw_flowc_mnem { FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ @@ -539,33 +570,56 @@ struct fw_flowc_mnemval { struct fw_flowc_wr { __be32 op_to_nparams; -#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0) __be32 flowid_len16; struct fw_flowc_mnemval mnemval[0]; }; +#define FW_FLOWC_WR_NPARAMS_S 0 +#define FW_FLOWC_WR_NPARAMS_V(x) ((x) << FW_FLOWC_WR_NPARAMS_S) + struct fw_ofld_tx_data_wr { __be32 op_to_immdlen; __be32 flowid_len16; __be32 plen; __be32 tunnel_to_proxy; -#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19) -#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18) -#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17) -#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16) -#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15) -#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14) -#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10) -#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6) }; +#define FW_OFLD_TX_DATA_WR_TUNNEL_S 19 +#define FW_OFLD_TX_DATA_WR_TUNNEL_V(x) ((x) << FW_OFLD_TX_DATA_WR_TUNNEL_S) + +#define FW_OFLD_TX_DATA_WR_SAVE_S 18 +#define FW_OFLD_TX_DATA_WR_SAVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SAVE_S) + +#define FW_OFLD_TX_DATA_WR_FLUSH_S 17 +#define FW_OFLD_TX_DATA_WR_FLUSH_V(x) ((x) << FW_OFLD_TX_DATA_WR_FLUSH_S) +#define FW_OFLD_TX_DATA_WR_FLUSH_F FW_OFLD_TX_DATA_WR_FLUSH_V(1U) + +#define FW_OFLD_TX_DATA_WR_URGENT_S 16 +#define FW_OFLD_TX_DATA_WR_URGENT_V(x) ((x) << FW_OFLD_TX_DATA_WR_URGENT_S) + +#define FW_OFLD_TX_DATA_WR_MORE_S 15 +#define FW_OFLD_TX_DATA_WR_MORE_V(x) ((x) << FW_OFLD_TX_DATA_WR_MORE_S) + +#define FW_OFLD_TX_DATA_WR_SHOVE_S 14 +#define FW_OFLD_TX_DATA_WR_SHOVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SHOVE_S) +#define FW_OFLD_TX_DATA_WR_SHOVE_F FW_OFLD_TX_DATA_WR_SHOVE_V(1U) + +#define FW_OFLD_TX_DATA_WR_ULPMODE_S 10 +#define FW_OFLD_TX_DATA_WR_ULPMODE_V(x) ((x) << FW_OFLD_TX_DATA_WR_ULPMODE_S) + +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_S 6 +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(x) \ + ((x) << FW_OFLD_TX_DATA_WR_ULPSUBMODE_S) + struct fw_cmd_wr { __be32 op_dma; -#define FW_CMD_WR_DMA (1U << 17) __be32 len16_pkd; __be64 cookie_daddr; }; +#define FW_CMD_WR_DMA_S 17 +#define FW_CMD_WR_DMA_V(x) ((x) << FW_CMD_WR_DMA_S) + struct fw_eth_tx_pkt_vm_wr { __be32 op_immdlen; __be32 equiq_to_len16; @@ -641,18 +695,39 @@ struct fw_cmd_hdr { __be32 lo; }; -#define FW_CMD_OP(x) ((x) << 24) -#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff) -#define FW_CMD_REQUEST (1U << 23) -#define FW_CMD_REQUEST_GET(x) (((x) >> 23) & 0x1) -#define FW_CMD_READ (1U << 22) -#define FW_CMD_WRITE (1U << 21) -#define FW_CMD_EXEC (1U << 20) -#define FW_CMD_RAMASK(x) ((x) << 20) -#define FW_CMD_RETVAL(x) ((x) << 8) -#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff) -#define FW_CMD_LEN16(x) ((x) << 0) -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) +#define FW_CMD_OP_S 24 +#define FW_CMD_OP_M 0xff +#define FW_CMD_OP_V(x) ((x) << FW_CMD_OP_S) +#define FW_CMD_OP_G(x) (((x) >> FW_CMD_OP_S) & FW_CMD_OP_M) + +#define FW_CMD_REQUEST_S 23 +#define FW_CMD_REQUEST_V(x) ((x) << FW_CMD_REQUEST_S) +#define FW_CMD_REQUEST_F FW_CMD_REQUEST_V(1U) + +#define FW_CMD_READ_S 22 +#define FW_CMD_READ_V(x) ((x) << FW_CMD_READ_S) +#define FW_CMD_READ_F FW_CMD_READ_V(1U) + +#define FW_CMD_WRITE_S 21 +#define FW_CMD_WRITE_V(x) ((x) << FW_CMD_WRITE_S) +#define FW_CMD_WRITE_F FW_CMD_WRITE_V(1U) + +#define FW_CMD_EXEC_S 20 +#define FW_CMD_EXEC_V(x) ((x) << FW_CMD_EXEC_S) +#define FW_CMD_EXEC_F FW_CMD_EXEC_V(1U) + +#define FW_CMD_RAMASK_S 20 +#define FW_CMD_RAMASK_V(x) ((x) << FW_CMD_RAMASK_S) + +#define FW_CMD_RETVAL_S 8 +#define FW_CMD_RETVAL_M 0xff +#define FW_CMD_RETVAL_V(x) ((x) << FW_CMD_RETVAL_S) +#define FW_CMD_RETVAL_G(x) (((x) >> FW_CMD_RETVAL_S) & FW_CMD_RETVAL_M) + +#define FW_CMD_LEN16_S 0 +#define FW_CMD_LEN16_V(x) ((x) << FW_CMD_LEN16_S) + +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) enum fw_ldst_addrspc { FW_LDST_ADDRSPC_FIRMWARE = 0x0001, @@ -685,7 +760,8 @@ enum fw_ldst_func_mod_index { struct fw_ldst_cmd { __be32 op_to_addrspace; -#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0) +#define FW_LDST_CMD_ADDRSPACE_S 0 +#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S) __be32 cycles_to_len16; union fw_ldst { struct fw_ldst_addrval { @@ -741,15 +817,33 @@ struct fw_ldst_cmd { } u; }; -#define FW_LDST_CMD_MSG(x) ((x) << 31) -#define FW_LDST_CMD_PADDR(x) ((x) << 8) -#define FW_LDST_CMD_MMD(x) ((x) << 0) -#define FW_LDST_CMD_FID(x) ((x) << 15) -#define FW_LDST_CMD_CTL(x) ((x) << 0) -#define FW_LDST_CMD_RPLCPF(x) ((x) << 0) -#define FW_LDST_CMD_LC (1U << 4) -#define FW_LDST_CMD_NACCESS(x) ((x) << 0) -#define FW_LDST_CMD_FN(x) ((x) << 0) +#define FW_LDST_CMD_MSG_S 31 +#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S) + +#define FW_LDST_CMD_PADDR_S 8 +#define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S) + +#define FW_LDST_CMD_MMD_S 0 +#define FW_LDST_CMD_MMD_V(x) ((x) << FW_LDST_CMD_MMD_S) + +#define FW_LDST_CMD_FID_S 15 +#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S) + +#define FW_LDST_CMD_CTL_S 0 +#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S) + +#define FW_LDST_CMD_RPLCPF_S 0 +#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S) + +#define FW_LDST_CMD_LC_S 4 +#define FW_LDST_CMD_LC_V(x) ((x) << FW_LDST_CMD_LC_S) +#define FW_LDST_CMD_LC_F FW_LDST_CMD_LC_V(1U) + +#define FW_LDST_CMD_FN_S 0 +#define FW_LDST_CMD_FN_V(x) ((x) << FW_LDST_CMD_FN_S) + +#define FW_LDST_CMD_NACCESS_S 0 +#define FW_LDST_CMD_NACCESS_V(x) ((x) << FW_LDST_CMD_NACCESS_S) struct fw_reset_cmd { __be32 op_to_write; @@ -758,11 +852,12 @@ struct fw_reset_cmd { __be32 halt_pkd; }; -#define FW_RESET_CMD_HALT_SHIFT 31 -#define FW_RESET_CMD_HALT_MASK 0x1 -#define FW_RESET_CMD_HALT(x) ((x) << FW_RESET_CMD_HALT_SHIFT) -#define FW_RESET_CMD_HALT_GET(x) \ - (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK) +#define FW_RESET_CMD_HALT_S 31 +#define FW_RESET_CMD_HALT_M 0x1 +#define FW_RESET_CMD_HALT_V(x) ((x) << FW_RESET_CMD_HALT_S) +#define FW_RESET_CMD_HALT_G(x) \ + (((x) >> FW_RESET_CMD_HALT_S) & FW_RESET_CMD_HALT_M) +#define FW_RESET_CMD_HALT_F FW_RESET_CMD_HALT_V(1U) enum fw_hellow_cmd { fw_hello_cmd_stage_os = 0x0 @@ -772,22 +867,42 @@ struct fw_hello_cmd { __be32 op_to_write; __be32 retval_len16; __be32 err_to_clearinit; -#define FW_HELLO_CMD_ERR (1U << 31) -#define FW_HELLO_CMD_INIT (1U << 30) -#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) -#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) -#define FW_HELLO_CMD_MBMASTER_MASK 0xfU -#define FW_HELLO_CMD_MBMASTER_SHIFT 24 -#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT) -#define FW_HELLO_CMD_MBMASTER_GET(x) \ - (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK) -#define FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << 23) -#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) -#define FW_HELLO_CMD_STAGE(x) ((x) << 17) -#define FW_HELLO_CMD_CLEARINIT (1U << 16) __be32 fwrev; }; +#define FW_HELLO_CMD_ERR_S 31 +#define FW_HELLO_CMD_ERR_V(x) ((x) << FW_HELLO_CMD_ERR_S) +#define FW_HELLO_CMD_ERR_F FW_HELLO_CMD_ERR_V(1U) + +#define FW_HELLO_CMD_INIT_S 30 +#define FW_HELLO_CMD_INIT_V(x) ((x) << FW_HELLO_CMD_INIT_S) +#define FW_HELLO_CMD_INIT_F FW_HELLO_CMD_INIT_V(1U) + +#define FW_HELLO_CMD_MASTERDIS_S 29 +#define FW_HELLO_CMD_MASTERDIS_V(x) ((x) << FW_HELLO_CMD_MASTERDIS_S) + +#define FW_HELLO_CMD_MASTERFORCE_S 28 +#define FW_HELLO_CMD_MASTERFORCE_V(x) ((x) << FW_HELLO_CMD_MASTERFORCE_S) + +#define FW_HELLO_CMD_MBMASTER_S 24 +#define FW_HELLO_CMD_MBMASTER_M 0xfU +#define FW_HELLO_CMD_MBMASTER_V(x) ((x) << FW_HELLO_CMD_MBMASTER_S) +#define FW_HELLO_CMD_MBMASTER_G(x) \ + (((x) >> FW_HELLO_CMD_MBMASTER_S) & FW_HELLO_CMD_MBMASTER_M) + +#define FW_HELLO_CMD_MBASYNCNOTINT_S 23 +#define FW_HELLO_CMD_MBASYNCNOTINT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOTINT_S) + +#define FW_HELLO_CMD_MBASYNCNOT_S 20 +#define FW_HELLO_CMD_MBASYNCNOT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOT_S) + +#define FW_HELLO_CMD_STAGE_S 17 +#define FW_HELLO_CMD_STAGE_V(x) ((x) << FW_HELLO_CMD_STAGE_S) + +#define FW_HELLO_CMD_CLEARINIT_S 16 +#define FW_HELLO_CMD_CLEARINIT_V(x) ((x) << FW_HELLO_CMD_CLEARINIT_S) +#define FW_HELLO_CMD_CLEARINIT_F FW_HELLO_CMD_CLEARINIT_V(1U) + struct fw_bye_cmd { __be32 op_to_write; __be32 retval_len16; @@ -898,9 +1013,17 @@ struct fw_caps_config_cmd { __be32 finicsum; }; -#define FW_CAPS_CONFIG_CMD_CFVALID (1U << 27) -#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) ((x) << 24) -#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16) +#define FW_CAPS_CONFIG_CMD_CFVALID_S 27 +#define FW_CAPS_CONFIG_CMD_CFVALID_V(x) ((x) << FW_CAPS_CONFIG_CMD_CFVALID_S) +#define FW_CAPS_CONFIG_CMD_CFVALID_F FW_CAPS_CONFIG_CMD_CFVALID_V(1U) + +#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S 24 +#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(x) \ + ((x) << FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S) + +#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S 16 +#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(x) \ + ((x) << FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S) /* * params command mnemonics @@ -996,20 +1119,29 @@ enum fw_params_param_dmaq { FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13, }; -#define FW_PARAMS_MNEM(x) ((x) << 24) -#define FW_PARAMS_PARAM_X(x) ((x) << 16) -#define FW_PARAMS_PARAM_Y_SHIFT 8 -#define FW_PARAMS_PARAM_Y_MASK 0xffU -#define FW_PARAMS_PARAM_Y(x) ((x) << FW_PARAMS_PARAM_Y_SHIFT) -#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\ - FW_PARAMS_PARAM_Y_MASK) -#define FW_PARAMS_PARAM_Z_SHIFT 0 -#define FW_PARAMS_PARAM_Z_MASK 0xffu -#define FW_PARAMS_PARAM_Z(x) ((x) << FW_PARAMS_PARAM_Z_SHIFT) -#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\ - FW_PARAMS_PARAM_Z_MASK) -#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) -#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) +#define FW_PARAMS_MNEM_S 24 +#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S) + +#define FW_PARAMS_PARAM_X_S 16 +#define FW_PARAMS_PARAM_X_V(x) ((x) << FW_PARAMS_PARAM_X_S) + +#define FW_PARAMS_PARAM_Y_S 8 +#define FW_PARAMS_PARAM_Y_M 0xffU +#define FW_PARAMS_PARAM_Y_V(x) ((x) << FW_PARAMS_PARAM_Y_S) +#define FW_PARAMS_PARAM_Y_G(x) (((x) >> FW_PARAMS_PARAM_Y_S) &\ + FW_PARAMS_PARAM_Y_M) + +#define FW_PARAMS_PARAM_Z_S 0 +#define FW_PARAMS_PARAM_Z_M 0xffu +#define FW_PARAMS_PARAM_Z_V(x) ((x) << FW_PARAMS_PARAM_Z_S) +#define FW_PARAMS_PARAM_Z_G(x) (((x) >> FW_PARAMS_PARAM_Z_S) &\ + FW_PARAMS_PARAM_Z_M) + +#define FW_PARAMS_PARAM_XYZ_S 0 +#define FW_PARAMS_PARAM_XYZ_V(x) ((x) << FW_PARAMS_PARAM_XYZ_S) + +#define FW_PARAMS_PARAM_YZ_S 0 +#define FW_PARAMS_PARAM_YZ_V(x) ((x) << FW_PARAMS_PARAM_YZ_S) struct fw_params_cmd { __be32 op_to_vfn; @@ -1020,8 +1152,11 @@ struct fw_params_cmd { } param[7]; }; -#define FW_PARAMS_CMD_PFN(x) ((x) << 8) -#define FW_PARAMS_CMD_VFN(x) ((x) << 0) +#define FW_PARAMS_CMD_PFN_S 8 +#define FW_PARAMS_CMD_PFN_V(x) ((x) << FW_PARAMS_CMD_PFN_S) + +#define FW_PARAMS_CMD_VFN_S 0 +#define FW_PARAMS_CMD_VFN_V(x) ((x) << FW_PARAMS_CMD_VFN_S) struct fw_pfvf_cmd { __be32 op_to_vfn; @@ -1035,46 +1170,82 @@ struct fw_pfvf_cmd { __be32 r4; }; -#define FW_PFVF_CMD_PFN(x) ((x) << 8) -#define FW_PFVF_CMD_VFN(x) ((x) << 0) - -#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20) -#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff) - -#define FW_PFVF_CMD_NIQ(x) ((x) << 0) -#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_PFVF_CMD_TYPE (1 << 31) -#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1) - -#define FW_PFVF_CMD_CMASK(x) ((x) << 24) -#define FW_PFVF_CMD_CMASK_MASK 0xf -#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK) - -#define FW_PFVF_CMD_PMASK(x) ((x) << 20) -#define FW_PFVF_CMD_PMASK_MASK 0xf -#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK) - -#define FW_PFVF_CMD_NEQ(x) ((x) << 0) -#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_PFVF_CMD_TC(x) ((x) << 24) -#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff) - -#define FW_PFVF_CMD_NVI(x) ((x) << 16) -#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff) - -#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0) -#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff) - -#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24) -#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff) - -#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16) -#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff) - -#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0) -#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff) +#define FW_PFVF_CMD_PFN_S 8 +#define FW_PFVF_CMD_PFN_V(x) ((x) << FW_PFVF_CMD_PFN_S) + +#define FW_PFVF_CMD_VFN_S 0 +#define FW_PFVF_CMD_VFN_V(x) ((x) << FW_PFVF_CMD_VFN_S) + +#define FW_PFVF_CMD_NIQFLINT_S 20 +#define FW_PFVF_CMD_NIQFLINT_M 0xfff +#define FW_PFVF_CMD_NIQFLINT_V(x) ((x) << FW_PFVF_CMD_NIQFLINT_S) +#define FW_PFVF_CMD_NIQFLINT_G(x) \ + (((x) >> FW_PFVF_CMD_NIQFLINT_S) & FW_PFVF_CMD_NIQFLINT_M) + +#define FW_PFVF_CMD_NIQ_S 0 +#define FW_PFVF_CMD_NIQ_M 0xfffff +#define FW_PFVF_CMD_NIQ_V(x) ((x) << FW_PFVF_CMD_NIQ_S) +#define FW_PFVF_CMD_NIQ_G(x) \ + (((x) >> FW_PFVF_CMD_NIQ_S) & FW_PFVF_CMD_NIQ_M) + +#define FW_PFVF_CMD_TYPE_S 31 +#define FW_PFVF_CMD_TYPE_M 0x1 +#define FW_PFVF_CMD_TYPE_V(x) ((x) << FW_PFVF_CMD_TYPE_S) +#define FW_PFVF_CMD_TYPE_G(x) \ + (((x) >> FW_PFVF_CMD_TYPE_S) & FW_PFVF_CMD_TYPE_M) +#define FW_PFVF_CMD_TYPE_F FW_PFVF_CMD_TYPE_V(1U) + +#define FW_PFVF_CMD_CMASK_S 24 +#define FW_PFVF_CMD_CMASK_M 0xf +#define FW_PFVF_CMD_CMASK_V(x) ((x) << FW_PFVF_CMD_CMASK_S) +#define FW_PFVF_CMD_CMASK_G(x) \ + (((x) >> FW_PFVF_CMD_CMASK_S) & FW_PFVF_CMD_CMASK_M) + +#define FW_PFVF_CMD_PMASK_S 20 +#define FW_PFVF_CMD_PMASK_M 0xf +#define FW_PFVF_CMD_PMASK_V(x) ((x) << FW_PFVF_CMD_PMASK_S) +#define FW_PFVF_CMD_PMASK_G(x) \ + (((x) >> FW_PFVF_CMD_PMASK_S) & FW_PFVF_CMD_PMASK_M) + +#define FW_PFVF_CMD_NEQ_S 0 +#define FW_PFVF_CMD_NEQ_M 0xfffff +#define FW_PFVF_CMD_NEQ_V(x) ((x) << FW_PFVF_CMD_NEQ_S) +#define FW_PFVF_CMD_NEQ_G(x) \ + (((x) >> FW_PFVF_CMD_NEQ_S) & FW_PFVF_CMD_NEQ_M) + +#define FW_PFVF_CMD_TC_S 24 +#define FW_PFVF_CMD_TC_M 0xff +#define FW_PFVF_CMD_TC_V(x) ((x) << FW_PFVF_CMD_TC_S) +#define FW_PFVF_CMD_TC_G(x) (((x) >> FW_PFVF_CMD_TC_S) & FW_PFVF_CMD_TC_M) + +#define FW_PFVF_CMD_NVI_S 16 +#define FW_PFVF_CMD_NVI_M 0xff +#define FW_PFVF_CMD_NVI_V(x) ((x) << FW_PFVF_CMD_NVI_S) +#define FW_PFVF_CMD_NVI_G(x) (((x) >> FW_PFVF_CMD_NVI_S) & FW_PFVF_CMD_NVI_M) + +#define FW_PFVF_CMD_NEXACTF_S 0 +#define FW_PFVF_CMD_NEXACTF_M 0xffff +#define FW_PFVF_CMD_NEXACTF_V(x) ((x) << FW_PFVF_CMD_NEXACTF_S) +#define FW_PFVF_CMD_NEXACTF_G(x) \ + (((x) >> FW_PFVF_CMD_NEXACTF_S) & FW_PFVF_CMD_NEXACTF_M) + +#define FW_PFVF_CMD_R_CAPS_S 24 +#define FW_PFVF_CMD_R_CAPS_M 0xff +#define FW_PFVF_CMD_R_CAPS_V(x) ((x) << FW_PFVF_CMD_R_CAPS_S) +#define FW_PFVF_CMD_R_CAPS_G(x) \ + (((x) >> FW_PFVF_CMD_R_CAPS_S) & FW_PFVF_CMD_R_CAPS_M) + +#define FW_PFVF_CMD_WX_CAPS_S 16 +#define FW_PFVF_CMD_WX_CAPS_M 0xff +#define FW_PFVF_CMD_WX_CAPS_V(x) ((x) << FW_PFVF_CMD_WX_CAPS_S) +#define FW_PFVF_CMD_WX_CAPS_G(x) \ + (((x) >> FW_PFVF_CMD_WX_CAPS_S) & FW_PFVF_CMD_WX_CAPS_M) + +#define FW_PFVF_CMD_NETHCTRL_S 0 +#define FW_PFVF_CMD_NETHCTRL_M 0xffff +#define FW_PFVF_CMD_NETHCTRL_V(x) ((x) << FW_PFVF_CMD_NETHCTRL_S) +#define FW_PFVF_CMD_NETHCTRL_G(x) \ + (((x) >> FW_PFVF_CMD_NETHCTRL_S) & FW_PFVF_CMD_NETHCTRL_M) enum fw_iq_type { FW_IQ_TYPE_FL_INT_CAP, @@ -1102,85 +1273,239 @@ struct fw_iq_cmd { __be64 fl1addr; }; -#define FW_IQ_CMD_PFN(x) ((x) << 8) -#define FW_IQ_CMD_VFN(x) ((x) << 0) - -#define FW_IQ_CMD_ALLOC (1U << 31) -#define FW_IQ_CMD_FREE (1U << 30) -#define FW_IQ_CMD_MODIFY (1U << 29) -#define FW_IQ_CMD_IQSTART(x) ((x) << 28) -#define FW_IQ_CMD_IQSTOP(x) ((x) << 27) - -#define FW_IQ_CMD_TYPE(x) ((x) << 29) -#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28) -#define FW_IQ_CMD_VIID(x) ((x) << 16) -#define FW_IQ_CMD_IQANDST(x) ((x) << 15) -#define FW_IQ_CMD_IQANUS(x) ((x) << 14) -#define FW_IQ_CMD_IQANUD(x) ((x) << 12) -#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0) - -#define FW_IQ_CMD_IQDROPRSS (1U << 15) -#define FW_IQ_CMD_IQGTSMODE (1U << 14) -#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12) -#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11) -#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6) -#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4) -#define FW_IQ_CMD_IQO (1U << 3) -#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2) -#define FW_IQ_CMD_IQESIZE(x) ((x) << 0) - -#define FW_IQ_CMD_IQNS(x) ((x) << 31) -#define FW_IQ_CMD_IQRO(x) ((x) << 30) -#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28) -#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27) -#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26) -#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20) -#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15) -#define FW_IQ_CMD_FL0DBP(x) ((x) << 14) -#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13) -#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12) -#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11) -#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10) -#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9) -#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8) -#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7) -#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6) -#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4) -#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3) -#define FW_IQ_CMD_FL0PADEN(x) ((x) << 2) -#define FW_IQ_CMD_FL0PACKEN(x) ((x) << 1) -#define FW_IQ_CMD_FL0CONGEN (1U << 0) - -#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15) -#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10) -#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7) -#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4) -#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3) -#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0) - -#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20) -#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15) -#define FW_IQ_CMD_FL1DBP(x) ((x) << 14) -#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13) -#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12) -#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11) -#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10) -#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9) -#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8) -#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7) -#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6) -#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4) -#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3) -#define FW_IQ_CMD_FL1PADEN (1U << 2) -#define FW_IQ_CMD_FL1PACKEN (1U << 1) -#define FW_IQ_CMD_FL1CONGEN (1U << 0) - -#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15) -#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10) -#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7) -#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4) -#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3) -#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0) +#define FW_IQ_CMD_PFN_S 8 +#define FW_IQ_CMD_PFN_V(x) ((x) << FW_IQ_CMD_PFN_S) + +#define FW_IQ_CMD_VFN_S 0 +#define FW_IQ_CMD_VFN_V(x) ((x) << FW_IQ_CMD_VFN_S) + +#define FW_IQ_CMD_ALLOC_S 31 +#define FW_IQ_CMD_ALLOC_V(x) ((x) << FW_IQ_CMD_ALLOC_S) +#define FW_IQ_CMD_ALLOC_F FW_IQ_CMD_ALLOC_V(1U) + +#define FW_IQ_CMD_FREE_S 30 +#define FW_IQ_CMD_FREE_V(x) ((x) << FW_IQ_CMD_FREE_S) +#define FW_IQ_CMD_FREE_F FW_IQ_CMD_FREE_V(1U) + +#define FW_IQ_CMD_MODIFY_S 29 +#define FW_IQ_CMD_MODIFY_V(x) ((x) << FW_IQ_CMD_MODIFY_S) +#define FW_IQ_CMD_MODIFY_F FW_IQ_CMD_MODIFY_V(1U) + +#define FW_IQ_CMD_IQSTART_S 28 +#define FW_IQ_CMD_IQSTART_V(x) ((x) << FW_IQ_CMD_IQSTART_S) +#define FW_IQ_CMD_IQSTART_F FW_IQ_CMD_IQSTART_V(1U) + +#define FW_IQ_CMD_IQSTOP_S 27 +#define FW_IQ_CMD_IQSTOP_V(x) ((x) << FW_IQ_CMD_IQSTOP_S) +#define FW_IQ_CMD_IQSTOP_F FW_IQ_CMD_IQSTOP_V(1U) + +#define FW_IQ_CMD_TYPE_S 29 +#define FW_IQ_CMD_TYPE_V(x) ((x) << FW_IQ_CMD_TYPE_S) + +#define FW_IQ_CMD_IQASYNCH_S 28 +#define FW_IQ_CMD_IQASYNCH_V(x) ((x) << FW_IQ_CMD_IQASYNCH_S) + +#define FW_IQ_CMD_VIID_S 16 +#define FW_IQ_CMD_VIID_V(x) ((x) << FW_IQ_CMD_VIID_S) + +#define FW_IQ_CMD_IQANDST_S 15 +#define FW_IQ_CMD_IQANDST_V(x) ((x) << FW_IQ_CMD_IQANDST_S) + +#define FW_IQ_CMD_IQANUS_S 14 +#define FW_IQ_CMD_IQANUS_V(x) ((x) << FW_IQ_CMD_IQANUS_S) + +#define FW_IQ_CMD_IQANUD_S 12 +#define FW_IQ_CMD_IQANUD_V(x) ((x) << FW_IQ_CMD_IQANUD_S) + +#define FW_IQ_CMD_IQANDSTINDEX_S 0 +#define FW_IQ_CMD_IQANDSTINDEX_V(x) ((x) << FW_IQ_CMD_IQANDSTINDEX_S) + +#define FW_IQ_CMD_IQDROPRSS_S 15 +#define FW_IQ_CMD_IQDROPRSS_V(x) ((x) << FW_IQ_CMD_IQDROPRSS_S) +#define FW_IQ_CMD_IQDROPRSS_F FW_IQ_CMD_IQDROPRSS_V(1U) + +#define FW_IQ_CMD_IQGTSMODE_S 14 +#define FW_IQ_CMD_IQGTSMODE_V(x) ((x) << FW_IQ_CMD_IQGTSMODE_S) +#define FW_IQ_CMD_IQGTSMODE_F FW_IQ_CMD_IQGTSMODE_V(1U) + +#define FW_IQ_CMD_IQPCIECH_S 12 +#define FW_IQ_CMD_IQPCIECH_V(x) ((x) << FW_IQ_CMD_IQPCIECH_S) + +#define FW_IQ_CMD_IQDCAEN_S 11 +#define FW_IQ_CMD_IQDCAEN_V(x) ((x) << FW_IQ_CMD_IQDCAEN_S) + +#define FW_IQ_CMD_IQDCACPU_S 6 +#define FW_IQ_CMD_IQDCACPU_V(x) ((x) << FW_IQ_CMD_IQDCACPU_S) + +#define FW_IQ_CMD_IQINTCNTTHRESH_S 4 +#define FW_IQ_CMD_IQINTCNTTHRESH_V(x) ((x) << FW_IQ_CMD_IQINTCNTTHRESH_S) + +#define FW_IQ_CMD_IQO_S 3 +#define FW_IQ_CMD_IQO_V(x) ((x) << FW_IQ_CMD_IQO_S) +#define FW_IQ_CMD_IQO_F FW_IQ_CMD_IQO_V(1U) + +#define FW_IQ_CMD_IQCPRIO_S 2 +#define FW_IQ_CMD_IQCPRIO_V(x) ((x) << FW_IQ_CMD_IQCPRIO_S) + +#define FW_IQ_CMD_IQESIZE_S 0 +#define FW_IQ_CMD_IQESIZE_V(x) ((x) << FW_IQ_CMD_IQESIZE_S) + +#define FW_IQ_CMD_IQNS_S 31 +#define FW_IQ_CMD_IQNS_V(x) ((x) << FW_IQ_CMD_IQNS_S) + +#define FW_IQ_CMD_IQRO_S 30 +#define FW_IQ_CMD_IQRO_V(x) ((x) << FW_IQ_CMD_IQRO_S) + +#define FW_IQ_CMD_IQFLINTIQHSEN_S 28 +#define FW_IQ_CMD_IQFLINTIQHSEN_V(x) ((x) << FW_IQ_CMD_IQFLINTIQHSEN_S) + +#define FW_IQ_CMD_IQFLINTCONGEN_S 27 +#define FW_IQ_CMD_IQFLINTCONGEN_V(x) ((x) << FW_IQ_CMD_IQFLINTCONGEN_S) + +#define FW_IQ_CMD_IQFLINTISCSIC_S 26 +#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S) + +#define FW_IQ_CMD_FL0CNGCHMAP_S 20 +#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S) + +#define FW_IQ_CMD_FL0CACHELOCK_S 15 +#define FW_IQ_CMD_FL0CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL0CACHELOCK_S) + +#define FW_IQ_CMD_FL0DBP_S 14 +#define FW_IQ_CMD_FL0DBP_V(x) ((x) << FW_IQ_CMD_FL0DBP_S) + +#define FW_IQ_CMD_FL0DATANS_S 13 +#define FW_IQ_CMD_FL0DATANS_V(x) ((x) << FW_IQ_CMD_FL0DATANS_S) + +#define FW_IQ_CMD_FL0DATARO_S 12 +#define FW_IQ_CMD_FL0DATARO_V(x) ((x) << FW_IQ_CMD_FL0DATARO_S) +#define FW_IQ_CMD_FL0DATARO_F FW_IQ_CMD_FL0DATARO_V(1U) + +#define FW_IQ_CMD_FL0CONGCIF_S 11 +#define FW_IQ_CMD_FL0CONGCIF_V(x) ((x) << FW_IQ_CMD_FL0CONGCIF_S) + +#define FW_IQ_CMD_FL0ONCHIP_S 10 +#define FW_IQ_CMD_FL0ONCHIP_V(x) ((x) << FW_IQ_CMD_FL0ONCHIP_S) + +#define FW_IQ_CMD_FL0STATUSPGNS_S 9 +#define FW_IQ_CMD_FL0STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGNS_S) + +#define FW_IQ_CMD_FL0STATUSPGRO_S 8 +#define FW_IQ_CMD_FL0STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGRO_S) + +#define FW_IQ_CMD_FL0FETCHNS_S 7 +#define FW_IQ_CMD_FL0FETCHNS_V(x) ((x) << FW_IQ_CMD_FL0FETCHNS_S) + +#define FW_IQ_CMD_FL0FETCHRO_S 6 +#define FW_IQ_CMD_FL0FETCHRO_V(x) ((x) << FW_IQ_CMD_FL0FETCHRO_S) +#define FW_IQ_CMD_FL0FETCHRO_F FW_IQ_CMD_FL0FETCHRO_V(1U) + +#define FW_IQ_CMD_FL0HOSTFCMODE_S 4 +#define FW_IQ_CMD_FL0HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL0HOSTFCMODE_S) + +#define FW_IQ_CMD_FL0CPRIO_S 3 +#define FW_IQ_CMD_FL0CPRIO_V(x) ((x) << FW_IQ_CMD_FL0CPRIO_S) + +#define FW_IQ_CMD_FL0PADEN_S 2 +#define FW_IQ_CMD_FL0PADEN_V(x) ((x) << FW_IQ_CMD_FL0PADEN_S) +#define FW_IQ_CMD_FL0PADEN_F FW_IQ_CMD_FL0PADEN_V(1U) + +#define FW_IQ_CMD_FL0PACKEN_S 1 +#define FW_IQ_CMD_FL0PACKEN_V(x) ((x) << FW_IQ_CMD_FL0PACKEN_S) +#define FW_IQ_CMD_FL0PACKEN_F FW_IQ_CMD_FL0PACKEN_V(1U) + +#define FW_IQ_CMD_FL0CONGEN_S 0 +#define FW_IQ_CMD_FL0CONGEN_V(x) ((x) << FW_IQ_CMD_FL0CONGEN_S) +#define FW_IQ_CMD_FL0CONGEN_F FW_IQ_CMD_FL0CONGEN_V(1U) + +#define FW_IQ_CMD_FL0DCAEN_S 15 +#define FW_IQ_CMD_FL0DCAEN_V(x) ((x) << FW_IQ_CMD_FL0DCAEN_S) + +#define FW_IQ_CMD_FL0DCACPU_S 10 +#define FW_IQ_CMD_FL0DCACPU_V(x) ((x) << FW_IQ_CMD_FL0DCACPU_S) + +#define FW_IQ_CMD_FL0FBMIN_S 7 +#define FW_IQ_CMD_FL0FBMIN_V(x) ((x) << FW_IQ_CMD_FL0FBMIN_S) + +#define FW_IQ_CMD_FL0FBMAX_S 4 +#define FW_IQ_CMD_FL0FBMAX_V(x) ((x) << FW_IQ_CMD_FL0FBMAX_S) + +#define FW_IQ_CMD_FL0CIDXFTHRESHO_S 3 +#define FW_IQ_CMD_FL0CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESHO_S) +#define FW_IQ_CMD_FL0CIDXFTHRESHO_F FW_IQ_CMD_FL0CIDXFTHRESHO_V(1U) + +#define FW_IQ_CMD_FL0CIDXFTHRESH_S 0 +#define FW_IQ_CMD_FL0CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESH_S) + +#define FW_IQ_CMD_FL1CNGCHMAP_S 20 +#define FW_IQ_CMD_FL1CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL1CNGCHMAP_S) + +#define FW_IQ_CMD_FL1CACHELOCK_S 15 +#define FW_IQ_CMD_FL1CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL1CACHELOCK_S) + +#define FW_IQ_CMD_FL1DBP_S 14 +#define FW_IQ_CMD_FL1DBP_V(x) ((x) << FW_IQ_CMD_FL1DBP_S) + +#define FW_IQ_CMD_FL1DATANS_S 13 +#define FW_IQ_CMD_FL1DATANS_V(x) ((x) << FW_IQ_CMD_FL1DATANS_S) + +#define FW_IQ_CMD_FL1DATARO_S 12 +#define FW_IQ_CMD_FL1DATARO_V(x) ((x) << FW_IQ_CMD_FL1DATARO_S) + +#define FW_IQ_CMD_FL1CONGCIF_S 11 +#define FW_IQ_CMD_FL1CONGCIF_V(x) ((x) << FW_IQ_CMD_FL1CONGCIF_S) + +#define FW_IQ_CMD_FL1ONCHIP_S 10 +#define FW_IQ_CMD_FL1ONCHIP_V(x) ((x) << FW_IQ_CMD_FL1ONCHIP_S) + +#define FW_IQ_CMD_FL1STATUSPGNS_S 9 +#define FW_IQ_CMD_FL1STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGNS_S) + +#define FW_IQ_CMD_FL1STATUSPGRO_S 8 +#define FW_IQ_CMD_FL1STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGRO_S) + +#define FW_IQ_CMD_FL1FETCHNS_S 7 +#define FW_IQ_CMD_FL1FETCHNS_V(x) ((x) << FW_IQ_CMD_FL1FETCHNS_S) + +#define FW_IQ_CMD_FL1FETCHRO_S 6 +#define FW_IQ_CMD_FL1FETCHRO_V(x) ((x) << FW_IQ_CMD_FL1FETCHRO_S) + +#define FW_IQ_CMD_FL1HOSTFCMODE_S 4 +#define FW_IQ_CMD_FL1HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL1HOSTFCMODE_S) + +#define FW_IQ_CMD_FL1CPRIO_S 3 +#define FW_IQ_CMD_FL1CPRIO_V(x) ((x) << FW_IQ_CMD_FL1CPRIO_S) + +#define FW_IQ_CMD_FL1PADEN_S 2 +#define FW_IQ_CMD_FL1PADEN_V(x) ((x) << FW_IQ_CMD_FL1PADEN_S) +#define FW_IQ_CMD_FL1PADEN_F FW_IQ_CMD_FL1PADEN_V(1U) + +#define FW_IQ_CMD_FL1PACKEN_S 1 +#define FW_IQ_CMD_FL1PACKEN_V(x) ((x) << FW_IQ_CMD_FL1PACKEN_S) +#define FW_IQ_CMD_FL1PACKEN_F FW_IQ_CMD_FL1PACKEN_V(1U) + +#define FW_IQ_CMD_FL1CONGEN_S 0 +#define FW_IQ_CMD_FL1CONGEN_V(x) ((x) << FW_IQ_CMD_FL1CONGEN_S) +#define FW_IQ_CMD_FL1CONGEN_F FW_IQ_CMD_FL1CONGEN_V(1U) + +#define FW_IQ_CMD_FL1DCAEN_S 15 +#define FW_IQ_CMD_FL1DCAEN_V(x) ((x) << FW_IQ_CMD_FL1DCAEN_S) + +#define FW_IQ_CMD_FL1DCACPU_S 10 +#define FW_IQ_CMD_FL1DCACPU_V(x) ((x) << FW_IQ_CMD_FL1DCACPU_S) + +#define FW_IQ_CMD_FL1FBMIN_S 7 +#define FW_IQ_CMD_FL1FBMIN_V(x) ((x) << FW_IQ_CMD_FL1FBMIN_S) + +#define FW_IQ_CMD_FL1FBMAX_S 4 +#define FW_IQ_CMD_FL1FBMAX_V(x) ((x) << FW_IQ_CMD_FL1FBMAX_S) + +#define FW_IQ_CMD_FL1CIDXFTHRESHO_S 3 +#define FW_IQ_CMD_FL1CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESHO_S) +#define FW_IQ_CMD_FL1CIDXFTHRESHO_F FW_IQ_CMD_FL1CIDXFTHRESHO_V(1U) + +#define FW_IQ_CMD_FL1CIDXFTHRESH_S 0 +#define FW_IQ_CMD_FL1CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESH_S) struct fw_eq_eth_cmd { __be32 op_to_vfn; @@ -1195,40 +1520,102 @@ struct fw_eq_eth_cmd { __be64 r9; }; -#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8) -#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0) -#define FW_EQ_ETH_CMD_ALLOC (1U << 31) -#define FW_EQ_ETH_CMD_FREE (1U << 30) -#define FW_EQ_ETH_CMD_MODIFY (1U << 29) -#define FW_EQ_ETH_CMD_EQSTART (1U << 28) -#define FW_EQ_ETH_CMD_EQSTOP (1U << 27) - -#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) -#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) -#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) -#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) -#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24) -#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23) -#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22) -#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) - -#define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30) -#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) +#define FW_EQ_ETH_CMD_PFN_S 8 +#define FW_EQ_ETH_CMD_PFN_V(x) ((x) << FW_EQ_ETH_CMD_PFN_S) + +#define FW_EQ_ETH_CMD_VFN_S 0 +#define FW_EQ_ETH_CMD_VFN_V(x) ((x) << FW_EQ_ETH_CMD_VFN_S) + +#define FW_EQ_ETH_CMD_ALLOC_S 31 +#define FW_EQ_ETH_CMD_ALLOC_V(x) ((x) << FW_EQ_ETH_CMD_ALLOC_S) +#define FW_EQ_ETH_CMD_ALLOC_F FW_EQ_ETH_CMD_ALLOC_V(1U) + +#define FW_EQ_ETH_CMD_FREE_S 30 +#define FW_EQ_ETH_CMD_FREE_V(x) ((x) << FW_EQ_ETH_CMD_FREE_S) +#define FW_EQ_ETH_CMD_FREE_F FW_EQ_ETH_CMD_FREE_V(1U) + +#define FW_EQ_ETH_CMD_MODIFY_S 29 +#define FW_EQ_ETH_CMD_MODIFY_V(x) ((x) << FW_EQ_ETH_CMD_MODIFY_S) +#define FW_EQ_ETH_CMD_MODIFY_F FW_EQ_ETH_CMD_MODIFY_V(1U) + +#define FW_EQ_ETH_CMD_EQSTART_S 28 +#define FW_EQ_ETH_CMD_EQSTART_V(x) ((x) << FW_EQ_ETH_CMD_EQSTART_S) +#define FW_EQ_ETH_CMD_EQSTART_F FW_EQ_ETH_CMD_EQSTART_V(1U) + +#define FW_EQ_ETH_CMD_EQSTOP_S 27 +#define FW_EQ_ETH_CMD_EQSTOP_V(x) ((x) << FW_EQ_ETH_CMD_EQSTOP_S) +#define FW_EQ_ETH_CMD_EQSTOP_F FW_EQ_ETH_CMD_EQSTOP_V(1U) + +#define FW_EQ_ETH_CMD_EQID_S 0 +#define FW_EQ_ETH_CMD_EQID_M 0xfffff +#define FW_EQ_ETH_CMD_EQID_V(x) ((x) << FW_EQ_ETH_CMD_EQID_S) +#define FW_EQ_ETH_CMD_EQID_G(x) \ + (((x) >> FW_EQ_ETH_CMD_EQID_S) & FW_EQ_ETH_CMD_EQID_M) + +#define FW_EQ_ETH_CMD_PHYSEQID_S 0 +#define FW_EQ_ETH_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_ETH_CMD_PHYSEQID_V(x) ((x) << FW_EQ_ETH_CMD_PHYSEQID_S) +#define FW_EQ_ETH_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_ETH_CMD_PHYSEQID_S) & FW_EQ_ETH_CMD_PHYSEQID_M) + +#define FW_EQ_ETH_CMD_FETCHSZM_S 26 +#define FW_EQ_ETH_CMD_FETCHSZM_V(x) ((x) << FW_EQ_ETH_CMD_FETCHSZM_S) +#define FW_EQ_ETH_CMD_FETCHSZM_F FW_EQ_ETH_CMD_FETCHSZM_V(1U) + +#define FW_EQ_ETH_CMD_STATUSPGNS_S 25 +#define FW_EQ_ETH_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGNS_S) + +#define FW_EQ_ETH_CMD_STATUSPGRO_S 24 +#define FW_EQ_ETH_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGRO_S) + +#define FW_EQ_ETH_CMD_FETCHNS_S 23 +#define FW_EQ_ETH_CMD_FETCHNS_V(x) ((x) << FW_EQ_ETH_CMD_FETCHNS_S) + +#define FW_EQ_ETH_CMD_FETCHRO_S 22 +#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S) + +#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20 +#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S) + +#define FW_EQ_ETH_CMD_CPRIO_S 19 +#define FW_EQ_ETH_CMD_CPRIO_V(x) ((x) << FW_EQ_ETH_CMD_CPRIO_S) + +#define FW_EQ_ETH_CMD_ONCHIP_S 18 +#define FW_EQ_ETH_CMD_ONCHIP_V(x) ((x) << FW_EQ_ETH_CMD_ONCHIP_S) + +#define FW_EQ_ETH_CMD_PCIECHN_S 16 +#define FW_EQ_ETH_CMD_PCIECHN_V(x) ((x) << FW_EQ_ETH_CMD_PCIECHN_S) + +#define FW_EQ_ETH_CMD_IQID_S 0 +#define FW_EQ_ETH_CMD_IQID_V(x) ((x) << FW_EQ_ETH_CMD_IQID_S) + +#define FW_EQ_ETH_CMD_DCAEN_S 31 +#define FW_EQ_ETH_CMD_DCAEN_V(x) ((x) << FW_EQ_ETH_CMD_DCAEN_S) + +#define FW_EQ_ETH_CMD_DCACPU_S 26 +#define FW_EQ_ETH_CMD_DCACPU_V(x) ((x) << FW_EQ_ETH_CMD_DCACPU_S) + +#define FW_EQ_ETH_CMD_FBMIN_S 23 +#define FW_EQ_ETH_CMD_FBMIN_V(x) ((x) << FW_EQ_ETH_CMD_FBMIN_S) + +#define FW_EQ_ETH_CMD_FBMAX_S 20 +#define FW_EQ_ETH_CMD_FBMAX_V(x) ((x) << FW_EQ_ETH_CMD_FBMAX_S) + +#define FW_EQ_ETH_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_ETH_CMD_CIDXFTHRESHO_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_ETH_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_ETH_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESH_S) + +#define FW_EQ_ETH_CMD_EQSIZE_S 0 +#define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S) + +#define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30 +#define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S) +#define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U) + +#define FW_EQ_ETH_CMD_VIID_S 16 +#define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S) struct fw_eq_ctrl_cmd { __be32 op_to_vfn; @@ -1240,38 +1627,102 @@ struct fw_eq_ctrl_cmd { __be64 eqaddr; }; -#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8) -#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0) - -#define FW_EQ_CTRL_CMD_ALLOC (1U << 31) -#define FW_EQ_CTRL_CMD_FREE (1U << 30) -#define FW_EQ_CTRL_CMD_MODIFY (1U << 29) -#define FW_EQ_CTRL_CMD_EQSTART (1U << 28) -#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27) - -#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0) -#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26) -#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25) -#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24) -#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23) -#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22) -#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0) +#define FW_EQ_CTRL_CMD_PFN_S 8 +#define FW_EQ_CTRL_CMD_PFN_V(x) ((x) << FW_EQ_CTRL_CMD_PFN_S) + +#define FW_EQ_CTRL_CMD_VFN_S 0 +#define FW_EQ_CTRL_CMD_VFN_V(x) ((x) << FW_EQ_CTRL_CMD_VFN_S) + +#define FW_EQ_CTRL_CMD_ALLOC_S 31 +#define FW_EQ_CTRL_CMD_ALLOC_V(x) ((x) << FW_EQ_CTRL_CMD_ALLOC_S) +#define FW_EQ_CTRL_CMD_ALLOC_F FW_EQ_CTRL_CMD_ALLOC_V(1U) + +#define FW_EQ_CTRL_CMD_FREE_S 30 +#define FW_EQ_CTRL_CMD_FREE_V(x) ((x) << FW_EQ_CTRL_CMD_FREE_S) +#define FW_EQ_CTRL_CMD_FREE_F FW_EQ_CTRL_CMD_FREE_V(1U) + +#define FW_EQ_CTRL_CMD_MODIFY_S 29 +#define FW_EQ_CTRL_CMD_MODIFY_V(x) ((x) << FW_EQ_CTRL_CMD_MODIFY_S) +#define FW_EQ_CTRL_CMD_MODIFY_F FW_EQ_CTRL_CMD_MODIFY_V(1U) + +#define FW_EQ_CTRL_CMD_EQSTART_S 28 +#define FW_EQ_CTRL_CMD_EQSTART_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTART_S) +#define FW_EQ_CTRL_CMD_EQSTART_F FW_EQ_CTRL_CMD_EQSTART_V(1U) + +#define FW_EQ_CTRL_CMD_EQSTOP_S 27 +#define FW_EQ_CTRL_CMD_EQSTOP_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTOP_S) +#define FW_EQ_CTRL_CMD_EQSTOP_F FW_EQ_CTRL_CMD_EQSTOP_V(1U) + +#define FW_EQ_CTRL_CMD_CMPLIQID_S 20 +#define FW_EQ_CTRL_CMD_CMPLIQID_V(x) ((x) << FW_EQ_CTRL_CMD_CMPLIQID_S) + +#define FW_EQ_CTRL_CMD_EQID_S 0 +#define FW_EQ_CTRL_CMD_EQID_M 0xfffff +#define FW_EQ_CTRL_CMD_EQID_V(x) ((x) << FW_EQ_CTRL_CMD_EQID_S) +#define FW_EQ_CTRL_CMD_EQID_G(x) \ + (((x) >> FW_EQ_CTRL_CMD_EQID_S) & FW_EQ_CTRL_CMD_EQID_M) + +#define FW_EQ_CTRL_CMD_PHYSEQID_S 0 +#define FW_EQ_CTRL_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_CTRL_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_CTRL_CMD_PHYSEQID_S) & FW_EQ_CTRL_CMD_PHYSEQID_M) + +#define FW_EQ_CTRL_CMD_FETCHSZM_S 26 +#define FW_EQ_CTRL_CMD_FETCHSZM_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHSZM_S) +#define FW_EQ_CTRL_CMD_FETCHSZM_F FW_EQ_CTRL_CMD_FETCHSZM_V(1U) + +#define FW_EQ_CTRL_CMD_STATUSPGNS_S 25 +#define FW_EQ_CTRL_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGNS_S) +#define FW_EQ_CTRL_CMD_STATUSPGNS_F FW_EQ_CTRL_CMD_STATUSPGNS_V(1U) + +#define FW_EQ_CTRL_CMD_STATUSPGRO_S 24 +#define FW_EQ_CTRL_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGRO_S) +#define FW_EQ_CTRL_CMD_STATUSPGRO_F FW_EQ_CTRL_CMD_STATUSPGRO_V(1U) + +#define FW_EQ_CTRL_CMD_FETCHNS_S 23 +#define FW_EQ_CTRL_CMD_FETCHNS_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHNS_S) +#define FW_EQ_CTRL_CMD_FETCHNS_F FW_EQ_CTRL_CMD_FETCHNS_V(1U) + +#define FW_EQ_CTRL_CMD_FETCHRO_S 22 +#define FW_EQ_CTRL_CMD_FETCHRO_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHRO_S) +#define FW_EQ_CTRL_CMD_FETCHRO_F FW_EQ_CTRL_CMD_FETCHRO_V(1U) + +#define FW_EQ_CTRL_CMD_HOSTFCMODE_S 20 +#define FW_EQ_CTRL_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_CTRL_CMD_HOSTFCMODE_S) + +#define FW_EQ_CTRL_CMD_CPRIO_S 19 +#define FW_EQ_CTRL_CMD_CPRIO_V(x) ((x) << FW_EQ_CTRL_CMD_CPRIO_S) + +#define FW_EQ_CTRL_CMD_ONCHIP_S 18 +#define FW_EQ_CTRL_CMD_ONCHIP_V(x) ((x) << FW_EQ_CTRL_CMD_ONCHIP_S) + +#define FW_EQ_CTRL_CMD_PCIECHN_S 16 +#define FW_EQ_CTRL_CMD_PCIECHN_V(x) ((x) << FW_EQ_CTRL_CMD_PCIECHN_S) + +#define FW_EQ_CTRL_CMD_IQID_S 0 +#define FW_EQ_CTRL_CMD_IQID_V(x) ((x) << FW_EQ_CTRL_CMD_IQID_S) + +#define FW_EQ_CTRL_CMD_DCAEN_S 31 +#define FW_EQ_CTRL_CMD_DCAEN_V(x) ((x) << FW_EQ_CTRL_CMD_DCAEN_S) + +#define FW_EQ_CTRL_CMD_DCACPU_S 26 +#define FW_EQ_CTRL_CMD_DCACPU_V(x) ((x) << FW_EQ_CTRL_CMD_DCACPU_S) + +#define FW_EQ_CTRL_CMD_FBMIN_S 23 +#define FW_EQ_CTRL_CMD_FBMIN_V(x) ((x) << FW_EQ_CTRL_CMD_FBMIN_S) + +#define FW_EQ_CTRL_CMD_FBMAX_S 20 +#define FW_EQ_CTRL_CMD_FBMAX_V(x) ((x) << FW_EQ_CTRL_CMD_FBMAX_S) + +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_V(x) \ + ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_CTRL_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_CTRL_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESH_S) + +#define FW_EQ_CTRL_CMD_EQSIZE_S 0 +#define FW_EQ_CTRL_CMD_EQSIZE_V(x) ((x) << FW_EQ_CTRL_CMD_EQSIZE_S) struct fw_eq_ofld_cmd { __be32 op_to_vfn; @@ -1283,45 +1734,112 @@ struct fw_eq_ofld_cmd { __be64 eqaddr; }; -#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8) -#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0) - -#define FW_EQ_OFLD_CMD_ALLOC (1U << 31) -#define FW_EQ_OFLD_CMD_FREE (1U << 30) -#define FW_EQ_OFLD_CMD_MODIFY (1U << 29) -#define FW_EQ_OFLD_CMD_EQSTART (1U << 28) -#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27) - -#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0) -#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26) -#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25) -#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24) -#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23) -#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22) -#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0) +#define FW_EQ_OFLD_CMD_PFN_S 8 +#define FW_EQ_OFLD_CMD_PFN_V(x) ((x) << FW_EQ_OFLD_CMD_PFN_S) + +#define FW_EQ_OFLD_CMD_VFN_S 0 +#define FW_EQ_OFLD_CMD_VFN_V(x) ((x) << FW_EQ_OFLD_CMD_VFN_S) + +#define FW_EQ_OFLD_CMD_ALLOC_S 31 +#define FW_EQ_OFLD_CMD_ALLOC_V(x) ((x) << FW_EQ_OFLD_CMD_ALLOC_S) +#define FW_EQ_OFLD_CMD_ALLOC_F FW_EQ_OFLD_CMD_ALLOC_V(1U) + +#define FW_EQ_OFLD_CMD_FREE_S 30 +#define FW_EQ_OFLD_CMD_FREE_V(x) ((x) << FW_EQ_OFLD_CMD_FREE_S) +#define FW_EQ_OFLD_CMD_FREE_F FW_EQ_OFLD_CMD_FREE_V(1U) + +#define FW_EQ_OFLD_CMD_MODIFY_S 29 +#define FW_EQ_OFLD_CMD_MODIFY_V(x) ((x) << FW_EQ_OFLD_CMD_MODIFY_S) +#define FW_EQ_OFLD_CMD_MODIFY_F FW_EQ_OFLD_CMD_MODIFY_V(1U) + +#define FW_EQ_OFLD_CMD_EQSTART_S 28 +#define FW_EQ_OFLD_CMD_EQSTART_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTART_S) +#define FW_EQ_OFLD_CMD_EQSTART_F FW_EQ_OFLD_CMD_EQSTART_V(1U) + +#define FW_EQ_OFLD_CMD_EQSTOP_S 27 +#define FW_EQ_OFLD_CMD_EQSTOP_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTOP_S) +#define FW_EQ_OFLD_CMD_EQSTOP_F FW_EQ_OFLD_CMD_EQSTOP_V(1U) + +#define FW_EQ_OFLD_CMD_EQID_S 0 +#define FW_EQ_OFLD_CMD_EQID_M 0xfffff +#define FW_EQ_OFLD_CMD_EQID_V(x) ((x) << FW_EQ_OFLD_CMD_EQID_S) +#define FW_EQ_OFLD_CMD_EQID_G(x) \ + (((x) >> FW_EQ_OFLD_CMD_EQID_S) & FW_EQ_OFLD_CMD_EQID_M) + +#define FW_EQ_OFLD_CMD_PHYSEQID_S 0 +#define FW_EQ_OFLD_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_OFLD_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_OFLD_CMD_PHYSEQID_S) & FW_EQ_OFLD_CMD_PHYSEQID_M) + +#define FW_EQ_OFLD_CMD_FETCHSZM_S 26 +#define FW_EQ_OFLD_CMD_FETCHSZM_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHSZM_S) + +#define FW_EQ_OFLD_CMD_STATUSPGNS_S 25 +#define FW_EQ_OFLD_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGNS_S) + +#define FW_EQ_OFLD_CMD_STATUSPGRO_S 24 +#define FW_EQ_OFLD_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGRO_S) + +#define FW_EQ_OFLD_CMD_FETCHNS_S 23 +#define FW_EQ_OFLD_CMD_FETCHNS_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHNS_S) + +#define FW_EQ_OFLD_CMD_FETCHRO_S 22 +#define FW_EQ_OFLD_CMD_FETCHRO_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHRO_S) +#define FW_EQ_OFLD_CMD_FETCHRO_F FW_EQ_OFLD_CMD_FETCHRO_V(1U) + +#define FW_EQ_OFLD_CMD_HOSTFCMODE_S 20 +#define FW_EQ_OFLD_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_OFLD_CMD_HOSTFCMODE_S) + +#define FW_EQ_OFLD_CMD_CPRIO_S 19 +#define FW_EQ_OFLD_CMD_CPRIO_V(x) ((x) << FW_EQ_OFLD_CMD_CPRIO_S) + +#define FW_EQ_OFLD_CMD_ONCHIP_S 18 +#define FW_EQ_OFLD_CMD_ONCHIP_V(x) ((x) << FW_EQ_OFLD_CMD_ONCHIP_S) + +#define FW_EQ_OFLD_CMD_PCIECHN_S 16 +#define FW_EQ_OFLD_CMD_PCIECHN_V(x) ((x) << FW_EQ_OFLD_CMD_PCIECHN_S) + +#define FW_EQ_OFLD_CMD_IQID_S 0 +#define FW_EQ_OFLD_CMD_IQID_V(x) ((x) << FW_EQ_OFLD_CMD_IQID_S) + +#define FW_EQ_OFLD_CMD_DCAEN_S 31 +#define FW_EQ_OFLD_CMD_DCAEN_V(x) ((x) << FW_EQ_OFLD_CMD_DCAEN_S) + +#define FW_EQ_OFLD_CMD_DCACPU_S 26 +#define FW_EQ_OFLD_CMD_DCACPU_V(x) ((x) << FW_EQ_OFLD_CMD_DCACPU_S) + +#define FW_EQ_OFLD_CMD_FBMIN_S 23 +#define FW_EQ_OFLD_CMD_FBMIN_V(x) ((x) << FW_EQ_OFLD_CMD_FBMIN_S) + +#define FW_EQ_OFLD_CMD_FBMAX_S 20 +#define FW_EQ_OFLD_CMD_FBMAX_V(x) ((x) << FW_EQ_OFLD_CMD_FBMAX_S) + +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(x) \ + ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_OFLD_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_OFLD_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESH_S) + +#define FW_EQ_OFLD_CMD_EQSIZE_S 0 +#define FW_EQ_OFLD_CMD_EQSIZE_V(x) ((x) << FW_EQ_OFLD_CMD_EQSIZE_S) /* * Macros for VIID parsing: * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number */ -#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) -#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) -#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) + +#define FW_VIID_PFN_S 8 +#define FW_VIID_PFN_M 0x7 +#define FW_VIID_PFN_G(x) (((x) >> FW_VIID_PFN_S) & FW_VIID_PFN_M) + +#define FW_VIID_VIVLD_S 7 +#define FW_VIID_VIVLD_M 0x1 +#define FW_VIID_VIVLD_G(x) (((x) >> FW_VIID_VIVLD_S) & FW_VIID_VIVLD_M) + +#define FW_VIID_VIN_S 0 +#define FW_VIID_VIN_M 0x7F +#define FW_VIID_VIN_G(x) (((x) >> FW_VIID_VIN_S) & FW_VIID_VIN_M) struct fw_vi_cmd { __be32 op_to_vfn; @@ -1341,15 +1859,35 @@ struct fw_vi_cmd { __be64 r10; }; -#define FW_VI_CMD_PFN(x) ((x) << 8) -#define FW_VI_CMD_VFN(x) ((x) << 0) -#define FW_VI_CMD_ALLOC (1U << 31) -#define FW_VI_CMD_FREE (1U << 30) -#define FW_VI_CMD_VIID(x) ((x) << 0) -#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff) -#define FW_VI_CMD_PORTID(x) ((x) << 4) -#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf) -#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) +#define FW_VI_CMD_PFN_S 8 +#define FW_VI_CMD_PFN_V(x) ((x) << FW_VI_CMD_PFN_S) + +#define FW_VI_CMD_VFN_S 0 +#define FW_VI_CMD_VFN_V(x) ((x) << FW_VI_CMD_VFN_S) + +#define FW_VI_CMD_ALLOC_S 31 +#define FW_VI_CMD_ALLOC_V(x) ((x) << FW_VI_CMD_ALLOC_S) +#define FW_VI_CMD_ALLOC_F FW_VI_CMD_ALLOC_V(1U) + +#define FW_VI_CMD_FREE_S 30 +#define FW_VI_CMD_FREE_V(x) ((x) << FW_VI_CMD_FREE_S) +#define FW_VI_CMD_FREE_F FW_VI_CMD_FREE_V(1U) + +#define FW_VI_CMD_VIID_S 0 +#define FW_VI_CMD_VIID_M 0xfff +#define FW_VI_CMD_VIID_V(x) ((x) << FW_VI_CMD_VIID_S) +#define FW_VI_CMD_VIID_G(x) (((x) >> FW_VI_CMD_VIID_S) & FW_VI_CMD_VIID_M) + +#define FW_VI_CMD_PORTID_S 4 +#define FW_VI_CMD_PORTID_M 0xf +#define FW_VI_CMD_PORTID_V(x) ((x) << FW_VI_CMD_PORTID_S) +#define FW_VI_CMD_PORTID_G(x) \ + (((x) >> FW_VI_CMD_PORTID_S) & FW_VI_CMD_PORTID_M) + +#define FW_VI_CMD_RSSSIZE_S 0 +#define FW_VI_CMD_RSSSIZE_M 0x7ff +#define FW_VI_CMD_RSSSIZE_G(x) \ + (((x) >> FW_VI_CMD_RSSSIZE_S) & FW_VI_CMD_RSSSIZE_M) /* Special VI_MAC command index ids */ #define FW_VI_MAC_ADD_MAC 0x3FF @@ -1385,16 +1923,37 @@ struct fw_vi_mac_cmd { } u; }; -#define FW_VI_MAC_CMD_VIID(x) ((x) << 0) -#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31) -#define FW_VI_MAC_CMD_HASHVECEN (1U << 23) -#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22) -#define FW_VI_MAC_CMD_VALID (1U << 15) -#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12) -#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10) -#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3) -#define FW_VI_MAC_CMD_IDX(x) ((x) << 0) -#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff) +#define FW_VI_MAC_CMD_VIID_S 0 +#define FW_VI_MAC_CMD_VIID_V(x) ((x) << FW_VI_MAC_CMD_VIID_S) + +#define FW_VI_MAC_CMD_FREEMACS_S 31 +#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S) + +#define FW_VI_MAC_CMD_HASHVECEN_S 23 +#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S) +#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U) + +#define FW_VI_MAC_CMD_HASHUNIEN_S 22 +#define FW_VI_MAC_CMD_HASHUNIEN_V(x) ((x) << FW_VI_MAC_CMD_HASHUNIEN_S) + +#define FW_VI_MAC_CMD_VALID_S 15 +#define FW_VI_MAC_CMD_VALID_V(x) ((x) << FW_VI_MAC_CMD_VALID_S) +#define FW_VI_MAC_CMD_VALID_F FW_VI_MAC_CMD_VALID_V(1U) + +#define FW_VI_MAC_CMD_PRIO_S 12 +#define FW_VI_MAC_CMD_PRIO_V(x) ((x) << FW_VI_MAC_CMD_PRIO_S) + +#define FW_VI_MAC_CMD_SMAC_RESULT_S 10 +#define FW_VI_MAC_CMD_SMAC_RESULT_M 0x3 +#define FW_VI_MAC_CMD_SMAC_RESULT_V(x) ((x) << FW_VI_MAC_CMD_SMAC_RESULT_S) +#define FW_VI_MAC_CMD_SMAC_RESULT_G(x) \ + (((x) >> FW_VI_MAC_CMD_SMAC_RESULT_S) & FW_VI_MAC_CMD_SMAC_RESULT_M) + +#define FW_VI_MAC_CMD_IDX_S 0 +#define FW_VI_MAC_CMD_IDX_M 0x3ff +#define FW_VI_MAC_CMD_IDX_V(x) ((x) << FW_VI_MAC_CMD_IDX_S) +#define FW_VI_MAC_CMD_IDX_G(x) \ + (((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M) #define FW_RXMODE_MTU_NO_CHG 65535 @@ -1405,17 +1964,30 @@ struct fw_vi_rxmode_cmd { __be32 r4_lo; }; -#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) -#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff -#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) -#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) -#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) -#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) -#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8) +#define FW_VI_RXMODE_CMD_VIID_S 0 +#define FW_VI_RXMODE_CMD_VIID_V(x) ((x) << FW_VI_RXMODE_CMD_VIID_S) + +#define FW_VI_RXMODE_CMD_MTU_S 16 +#define FW_VI_RXMODE_CMD_MTU_M 0xffff +#define FW_VI_RXMODE_CMD_MTU_V(x) ((x) << FW_VI_RXMODE_CMD_MTU_S) + +#define FW_VI_RXMODE_CMD_PROMISCEN_S 14 +#define FW_VI_RXMODE_CMD_PROMISCEN_M 0x3 +#define FW_VI_RXMODE_CMD_PROMISCEN_V(x) ((x) << FW_VI_RXMODE_CMD_PROMISCEN_S) + +#define FW_VI_RXMODE_CMD_ALLMULTIEN_S 12 +#define FW_VI_RXMODE_CMD_ALLMULTIEN_M 0x3 +#define FW_VI_RXMODE_CMD_ALLMULTIEN_V(x) \ + ((x) << FW_VI_RXMODE_CMD_ALLMULTIEN_S) + +#define FW_VI_RXMODE_CMD_BROADCASTEN_S 10 +#define FW_VI_RXMODE_CMD_BROADCASTEN_M 0x3 +#define FW_VI_RXMODE_CMD_BROADCASTEN_V(x) \ + ((x) << FW_VI_RXMODE_CMD_BROADCASTEN_S) + +#define FW_VI_RXMODE_CMD_VLANEXEN_S 8 +#define FW_VI_RXMODE_CMD_VLANEXEN_M 0x3 +#define FW_VI_RXMODE_CMD_VLANEXEN_V(x) ((x) << FW_VI_RXMODE_CMD_VLANEXEN_S) struct fw_vi_enable_cmd { __be32 op_to_viid; @@ -1425,11 +1997,21 @@ struct fw_vi_enable_cmd { __be32 r4; }; -#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0) -#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31) -#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30) -#define FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << 28) -#define FW_VI_ENABLE_CMD_LED (1U << 29) +#define FW_VI_ENABLE_CMD_VIID_S 0 +#define FW_VI_ENABLE_CMD_VIID_V(x) ((x) << FW_VI_ENABLE_CMD_VIID_S) + +#define FW_VI_ENABLE_CMD_IEN_S 31 +#define FW_VI_ENABLE_CMD_IEN_V(x) ((x) << FW_VI_ENABLE_CMD_IEN_S) + +#define FW_VI_ENABLE_CMD_EEN_S 30 +#define FW_VI_ENABLE_CMD_EEN_V(x) ((x) << FW_VI_ENABLE_CMD_EEN_S) + +#define FW_VI_ENABLE_CMD_LED_S 29 +#define FW_VI_ENABLE_CMD_LED_V(x) ((x) << FW_VI_ENABLE_CMD_LED_S) +#define FW_VI_ENABLE_CMD_LED_F FW_VI_ENABLE_CMD_LED_V(1U) + +#define FW_VI_ENABLE_CMD_DCB_INFO_S 28 +#define FW_VI_ENABLE_CMD_DCB_INFO_V(x) ((x) << FW_VI_ENABLE_CMD_DCB_INFO_S) /* VI VF stats offset definitions */ #define VI_VF_NUM_STATS 16 @@ -1529,9 +2111,14 @@ struct fw_vi_stats_cmd { } u; }; -#define FW_VI_STATS_CMD_VIID(x) ((x) << 0) -#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12) -#define FW_VI_STATS_CMD_IX(x) ((x) << 0) +#define FW_VI_STATS_CMD_VIID_S 0 +#define FW_VI_STATS_CMD_VIID_V(x) ((x) << FW_VI_STATS_CMD_VIID_S) + +#define FW_VI_STATS_CMD_NSTATS_S 12 +#define FW_VI_STATS_CMD_NSTATS_V(x) ((x) << FW_VI_STATS_CMD_NSTATS_S) + +#define FW_VI_STATS_CMD_IX_S 0 +#define FW_VI_STATS_CMD_IX_V(x) ((x) << FW_VI_STATS_CMD_IX_S) struct fw_acl_mac_cmd { __be32 op_to_vfn; @@ -1548,9 +2135,14 @@ struct fw_acl_mac_cmd { u8 macaddr3[6]; }; -#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8) -#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0) -#define FW_ACL_MAC_CMD_EN(x) ((x) << 31) +#define FW_ACL_MAC_CMD_PFN_S 8 +#define FW_ACL_MAC_CMD_PFN_V(x) ((x) << FW_ACL_MAC_CMD_PFN_S) + +#define FW_ACL_MAC_CMD_VFN_S 0 +#define FW_ACL_MAC_CMD_VFN_V(x) ((x) << FW_ACL_MAC_CMD_VFN_S) + +#define FW_ACL_MAC_CMD_EN_S 31 +#define FW_ACL_MAC_CMD_EN_V(x) ((x) << FW_ACL_MAC_CMD_EN_S) struct fw_acl_vlan_cmd { __be32 op_to_vfn; @@ -1561,11 +2153,20 @@ struct fw_acl_vlan_cmd { __be16 vlanid[16]; }; -#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8) -#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0) -#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31) -#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7) -#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6) +#define FW_ACL_VLAN_CMD_PFN_S 8 +#define FW_ACL_VLAN_CMD_PFN_V(x) ((x) << FW_ACL_VLAN_CMD_PFN_S) + +#define FW_ACL_VLAN_CMD_VFN_S 0 +#define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S) + +#define FW_ACL_VLAN_CMD_EN_S 31 +#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S) + +#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7 +#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S) + +#define FW_ACL_VLAN_CMD_FM_S 6 +#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S) enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, @@ -1587,13 +2188,14 @@ enum fw_port_cap { }; enum fw_port_mdi { - FW_PORT_MDI_UNCHANGED, - FW_PORT_MDI_AUTO, - FW_PORT_MDI_F_STRAIGHT, - FW_PORT_MDI_F_CROSSOVER + FW_PORT_CAP_MDI_UNCHANGED, + FW_PORT_CAP_MDI_AUTO, + FW_PORT_CAP_MDI_F_STRAIGHT, + FW_PORT_CAP_MDI_F_CROSSOVER }; -#define FW_PORT_MDI(x) ((x) << 9) +#define FW_PORT_CAP_MDI_S 9 +#define FW_PORT_CAP_MDI_V(x) ((x) << FW_PORT_CAP_MDI_S) enum fw_port_action { FW_PORT_ACTION_L1_CFG = 0x0001, @@ -1753,52 +2355,105 @@ struct fw_port_cmd { } u; }; -#define FW_PORT_CMD_READ (1U << 22) - -#define FW_PORT_CMD_PORTID(x) ((x) << 0) -#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) - -#define FW_PORT_CMD_ACTION(x) ((x) << 16) -#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff) - -#define FW_PORT_CMD_CTLBF(x) ((x) << 10) -#define FW_PORT_CMD_OVLAN3(x) ((x) << 7) -#define FW_PORT_CMD_OVLAN2(x) ((x) << 6) -#define FW_PORT_CMD_OVLAN1(x) ((x) << 5) -#define FW_PORT_CMD_OVLAN0(x) ((x) << 4) -#define FW_PORT_CMD_IVLAN0(x) ((x) << 3) - -#define FW_PORT_CMD_TXIPG(x) ((x) << 19) - -#define FW_PORT_CMD_LSTATUS (1U << 31) -#define FW_PORT_CMD_LSTATUS_GET(x) (((x) >> 31) & 0x1) -#define FW_PORT_CMD_LSPEED(x) ((x) << 24) -#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f) -#define FW_PORT_CMD_TXPAUSE (1U << 23) -#define FW_PORT_CMD_RXPAUSE (1U << 22) -#define FW_PORT_CMD_MDIOCAP (1U << 21) -#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f) -#define FW_PORT_CMD_LPTXPAUSE (1U << 15) -#define FW_PORT_CMD_LPRXPAUSE (1U << 14) -#define FW_PORT_CMD_PTYPE_MASK 0x1f -#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK) -#define FW_PORT_CMD_MODTYPE_MASK 0x1f -#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK) - -#define FW_PORT_CMD_DCBXDIS (1U << 7) -#define FW_PORT_CMD_APPLY (1U << 7) -#define FW_PORT_CMD_ALL_SYNCD (1U << 7) -#define FW_PORT_CMD_DCB_VERSION_GET(x) (((x) >> 8) & 0xf) - -#define FW_PORT_CMD_PPPEN(x) ((x) << 31) -#define FW_PORT_CMD_TPSRC(x) ((x) << 28) -#define FW_PORT_CMD_NCSISRC(x) ((x) << 24) - -#define FW_PORT_CMD_CH0(x) ((x) << 20) -#define FW_PORT_CMD_CH1(x) ((x) << 16) -#define FW_PORT_CMD_CH2(x) ((x) << 12) -#define FW_PORT_CMD_CH3(x) ((x) << 8) -#define FW_PORT_CMD_NCSICH(x) ((x) << 4) +#define FW_PORT_CMD_READ_S 22 +#define FW_PORT_CMD_READ_V(x) ((x) << FW_PORT_CMD_READ_S) +#define FW_PORT_CMD_READ_F FW_PORT_CMD_READ_V(1U) + +#define FW_PORT_CMD_PORTID_S 0 +#define FW_PORT_CMD_PORTID_M 0xf +#define FW_PORT_CMD_PORTID_V(x) ((x) << FW_PORT_CMD_PORTID_S) +#define FW_PORT_CMD_PORTID_G(x) \ + (((x) >> FW_PORT_CMD_PORTID_S) & FW_PORT_CMD_PORTID_M) + +#define FW_PORT_CMD_ACTION_S 16 +#define FW_PORT_CMD_ACTION_M 0xffff +#define FW_PORT_CMD_ACTION_V(x) ((x) << FW_PORT_CMD_ACTION_S) +#define FW_PORT_CMD_ACTION_G(x) \ + (((x) >> FW_PORT_CMD_ACTION_S) & FW_PORT_CMD_ACTION_M) + +#define FW_PORT_CMD_OVLAN3_S 7 +#define FW_PORT_CMD_OVLAN3_V(x) ((x) << FW_PORT_CMD_OVLAN3_S) + +#define FW_PORT_CMD_OVLAN2_S 6 +#define FW_PORT_CMD_OVLAN2_V(x) ((x) << FW_PORT_CMD_OVLAN2_S) + +#define FW_PORT_CMD_OVLAN1_S 5 +#define FW_PORT_CMD_OVLAN1_V(x) ((x) << FW_PORT_CMD_OVLAN1_S) + +#define FW_PORT_CMD_OVLAN0_S 4 +#define FW_PORT_CMD_OVLAN0_V(x) ((x) << FW_PORT_CMD_OVLAN0_S) + +#define FW_PORT_CMD_IVLAN0_S 3 +#define FW_PORT_CMD_IVLAN0_V(x) ((x) << FW_PORT_CMD_IVLAN0_S) + +#define FW_PORT_CMD_TXIPG_S 3 +#define FW_PORT_CMD_TXIPG_V(x) ((x) << FW_PORT_CMD_TXIPG_S) + +#define FW_PORT_CMD_LSTATUS_S 31 +#define FW_PORT_CMD_LSTATUS_M 0x1 +#define FW_PORT_CMD_LSTATUS_V(x) ((x) << FW_PORT_CMD_LSTATUS_S) +#define FW_PORT_CMD_LSTATUS_G(x) \ + (((x) >> FW_PORT_CMD_LSTATUS_S) & FW_PORT_CMD_LSTATUS_M) +#define FW_PORT_CMD_LSTATUS_F FW_PORT_CMD_LSTATUS_V(1U) + +#define FW_PORT_CMD_LSPEED_S 24 +#define FW_PORT_CMD_LSPEED_M 0x3f +#define FW_PORT_CMD_LSPEED_V(x) ((x) << FW_PORT_CMD_LSPEED_S) +#define FW_PORT_CMD_LSPEED_G(x) \ + (((x) >> FW_PORT_CMD_LSPEED_S) & FW_PORT_CMD_LSPEED_M) + +#define FW_PORT_CMD_TXPAUSE_S 23 +#define FW_PORT_CMD_TXPAUSE_V(x) ((x) << FW_PORT_CMD_TXPAUSE_S) +#define FW_PORT_CMD_TXPAUSE_F FW_PORT_CMD_TXPAUSE_V(1U) + +#define FW_PORT_CMD_RXPAUSE_S 22 +#define FW_PORT_CMD_RXPAUSE_V(x) ((x) << FW_PORT_CMD_RXPAUSE_S) +#define FW_PORT_CMD_RXPAUSE_F FW_PORT_CMD_RXPAUSE_V(1U) + +#define FW_PORT_CMD_MDIOCAP_S 21 +#define FW_PORT_CMD_MDIOCAP_V(x) ((x) << FW_PORT_CMD_MDIOCAP_S) +#define FW_PORT_CMD_MDIOCAP_F FW_PORT_CMD_MDIOCAP_V(1U) + +#define FW_PORT_CMD_MDIOADDR_S 16 +#define FW_PORT_CMD_MDIOADDR_M 0x1f +#define FW_PORT_CMD_MDIOADDR_G(x) \ + (((x) >> FW_PORT_CMD_MDIOADDR_S) & FW_PORT_CMD_MDIOADDR_M) + +#define FW_PORT_CMD_LPTXPAUSE_S 15 +#define FW_PORT_CMD_LPTXPAUSE_V(x) ((x) << FW_PORT_CMD_LPTXPAUSE_S) +#define FW_PORT_CMD_LPTXPAUSE_F FW_PORT_CMD_LPTXPAUSE_V(1U) + +#define FW_PORT_CMD_LPRXPAUSE_S 14 +#define FW_PORT_CMD_LPRXPAUSE_V(x) ((x) << FW_PORT_CMD_LPRXPAUSE_S) +#define FW_PORT_CMD_LPRXPAUSE_F FW_PORT_CMD_LPRXPAUSE_V(1U) + +#define FW_PORT_CMD_PTYPE_S 8 +#define FW_PORT_CMD_PTYPE_M 0x1f +#define FW_PORT_CMD_PTYPE_G(x) \ + (((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M) + +#define FW_PORT_CMD_MODTYPE_S 0 +#define FW_PORT_CMD_MODTYPE_M 0x1f +#define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S) +#define FW_PORT_CMD_MODTYPE_G(x) \ + (((x) >> FW_PORT_CMD_MODTYPE_S) & FW_PORT_CMD_MODTYPE_M) + +#define FW_PORT_CMD_DCBXDIS_S 7 +#define FW_PORT_CMD_DCBXDIS_V(x) ((x) << FW_PORT_CMD_DCBXDIS_S) +#define FW_PORT_CMD_DCBXDIS_F FW_PORT_CMD_DCBXDIS_V(1U) + +#define FW_PORT_CMD_APPLY_S 7 +#define FW_PORT_CMD_APPLY_V(x) ((x) << FW_PORT_CMD_APPLY_S) +#define FW_PORT_CMD_APPLY_F FW_PORT_CMD_APPLY_V(1U) + +#define FW_PORT_CMD_ALL_SYNCD_S 7 +#define FW_PORT_CMD_ALL_SYNCD_V(x) ((x) << FW_PORT_CMD_ALL_SYNCD_S) +#define FW_PORT_CMD_ALL_SYNCD_F FW_PORT_CMD_ALL_SYNCD_V(1U) + +#define FW_PORT_CMD_DCB_VERSION_S 12 +#define FW_PORT_CMD_DCB_VERSION_M 0x7 +#define FW_PORT_CMD_DCB_VERSION_G(x) \ + (((x) >> FW_PORT_CMD_DCB_VERSION_S) & FW_PORT_CMD_DCB_VERSION_M) enum fw_port_type { FW_PORT_TYPE_FIBER_XFI, @@ -1817,7 +2472,7 @@ enum fw_port_type { FW_PORT_TYPE_QSFP, FW_PORT_TYPE_BP40_BA, - FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK + FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M }; enum fw_port_module_type { @@ -1828,11 +2483,11 @@ enum fw_port_module_type { FW_PORT_MOD_TYPE_TWINAX_PASSIVE, FW_PORT_MOD_TYPE_TWINAX_ACTIVE, FW_PORT_MOD_TYPE_LRM, - FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_MASK - 3, - FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_MASK - 2, - FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_MASK - 1, + FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_M - 3, + FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_M - 2, + FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_M - 1, - FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK + FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_M }; enum fw_port_mod_sub_type { @@ -1988,11 +2643,6 @@ struct fw_port_stats_cmd { } u; }; -#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4) -#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0) -#define FW_PORT_STATS_CMD_TX(x) ((x) << 7) -#define FW_PORT_STATS_CMD_IX(x) ((x) << 0) - /* port loopback stats */ #define FW_NUM_LB_STATS 16 enum fw_port_lb_stats_index { @@ -2048,22 +2698,13 @@ struct fw_port_lb_stats_cmd { } u; }; -#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0) -#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4) -#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0) -#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0) - struct fw_rss_ind_tbl_cmd { __be32 op_to_viid; -#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0) __be32 retval_len16; __be16 niqid; __be16 startidx; __be32 r3; __be32 iq0_to_iq2; -#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20) -#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10) -#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0) __be32 iq3_to_iq5; __be32 iq6_to_iq8; __be32 iq9_to_iq11; @@ -2077,6 +2718,18 @@ struct fw_rss_ind_tbl_cmd { __be32 r15_lo; }; +#define FW_RSS_IND_TBL_CMD_VIID_S 0 +#define FW_RSS_IND_TBL_CMD_VIID_V(x) ((x) << FW_RSS_IND_TBL_CMD_VIID_S) + +#define FW_RSS_IND_TBL_CMD_IQ0_S 20 +#define FW_RSS_IND_TBL_CMD_IQ0_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ0_S) + +#define FW_RSS_IND_TBL_CMD_IQ1_S 10 +#define FW_RSS_IND_TBL_CMD_IQ1_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ1_S) + +#define FW_RSS_IND_TBL_CMD_IQ2_S 0 +#define FW_RSS_IND_TBL_CMD_IQ2_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ2_S) + struct fw_rss_glb_config_cmd { __be32 op_to_write; __be32 retval_len16; @@ -2090,27 +2743,75 @@ struct fw_rss_glb_config_cmd { struct fw_rss_glb_config_basicvirtual { __be32 mode_pkd; __be32 synmapen_to_hashtoeplitz; -#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8) -#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7) -#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6) -#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5) -#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4) -#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3) -#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2) -#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1) -#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0) __be64 r8; __be64 r9; } basicvirtual; } u; }; -#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) -#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf) +#define FW_RSS_GLB_CONFIG_CMD_MODE_S 28 +#define FW_RSS_GLB_CONFIG_CMD_MODE_M 0xf +#define FW_RSS_GLB_CONFIG_CMD_MODE_V(x) ((x) << FW_RSS_GLB_CONFIG_CMD_MODE_S) +#define FW_RSS_GLB_CONFIG_CMD_MODE_G(x) \ + (((x) >> FW_RSS_GLB_CONFIG_CMD_MODE_S) & FW_RSS_GLB_CONFIG_CMD_MODE_M) #define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 #define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S 8 +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S 7 +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F \ + FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S 6 +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F \ + FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S 5 +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F \ + FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S 4 +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F \ + FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S 3 +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S 2 +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S 1 +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S) +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F \ + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S 0 +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S) +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F \ + FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(1U) + struct fw_rss_vi_config_cmd { __be32 op_to_viid; #define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) @@ -2124,19 +2825,51 @@ struct fw_rss_vi_config_cmd { struct fw_rss_vi_config_basicvirtual { __be32 r6; __be32 defaultq_to_udpen; -#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) -#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff) -#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) -#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) -#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) -#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) -#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0) __be64 r9; __be64 r10; } basicvirtual; } u; }; +#define FW_RSS_VI_CONFIG_CMD_VIID_S 0 +#define FW_RSS_VI_CONFIG_CMD_VIID_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_VIID_S) + +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S 16 +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M 0x3ff +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(x) \ + (((x) >> FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) & \ + FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M) + +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S 4 +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S 3 +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S 2 +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S 1 +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_UDPEN_S 0 +#define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S) +#define FW_RSS_VI_CONFIG_CMD_UDPEN_F FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U) + struct fw_clip_cmd { __be32 op_to_write; __be32 alloc_to_len16; @@ -2145,19 +2878,13 @@ struct fw_clip_cmd { __be32 r4[2]; }; -#define S_FW_CLIP_CMD_ALLOC 31 -#define M_FW_CLIP_CMD_ALLOC 0x1 -#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC) -#define G_FW_CLIP_CMD_ALLOC(x) \ - (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC) -#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U) +#define FW_CLIP_CMD_ALLOC_S 31 +#define FW_CLIP_CMD_ALLOC_V(x) ((x) << FW_CLIP_CMD_ALLOC_S) +#define FW_CLIP_CMD_ALLOC_F FW_CLIP_CMD_ALLOC_V(1U) -#define S_FW_CLIP_CMD_FREE 30 -#define M_FW_CLIP_CMD_FREE 0x1 -#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE) -#define G_FW_CLIP_CMD_FREE(x) \ - (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE) -#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U) +#define FW_CLIP_CMD_FREE_S 30 +#define FW_CLIP_CMD_FREE_V(x) ((x) << FW_CLIP_CMD_FREE_S) +#define FW_CLIP_CMD_FREE_F FW_CLIP_CMD_FREE_V(1U) enum fw_error_type { FW_ERROR_TYPE_EXCEPTION = 0x0, @@ -2196,7 +2923,6 @@ struct fw_error_cmd { struct fw_debug_cmd { __be32 op_type; -#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff) __be32 len16_pkd; union fw_debug { struct fw_debug_assert { @@ -2219,19 +2945,35 @@ struct fw_debug_cmd { } u; }; -#define FW_PCIE_FW_ERR (1U << 31) -#define FW_PCIE_FW_INIT (1U << 30) -#define FW_PCIE_FW_HALT (1U << 29) -#define FW_PCIE_FW_MASTER_VLD (1U << 15) -#define FW_PCIE_FW_MASTER_MASK 0x7 -#define FW_PCIE_FW_MASTER_SHIFT 12 -#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) -#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ - FW_PCIE_FW_MASTER_MASK) -#define FW_PCIE_FW_EVAL_MASK 0x7 -#define FW_PCIE_FW_EVAL_SHIFT 24 -#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \ - FW_PCIE_FW_EVAL_MASK) +#define FW_DEBUG_CMD_TYPE_S 0 +#define FW_DEBUG_CMD_TYPE_M 0xff +#define FW_DEBUG_CMD_TYPE_G(x) \ + (((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M) + +#define PCIE_FW_ERR_S 31 +#define PCIE_FW_ERR_V(x) ((x) << PCIE_FW_ERR_S) +#define PCIE_FW_ERR_F PCIE_FW_ERR_V(1U) + +#define PCIE_FW_INIT_S 30 +#define PCIE_FW_INIT_V(x) ((x) << PCIE_FW_INIT_S) +#define PCIE_FW_INIT_F PCIE_FW_INIT_V(1U) + +#define PCIE_FW_HALT_S 29 +#define PCIE_FW_HALT_V(x) ((x) << PCIE_FW_HALT_S) +#define PCIE_FW_HALT_F PCIE_FW_HALT_V(1U) + +#define PCIE_FW_EVAL_S 24 +#define PCIE_FW_EVAL_M 0x7 +#define PCIE_FW_EVAL_G(x) (((x) >> PCIE_FW_EVAL_S) & PCIE_FW_EVAL_M) + +#define PCIE_FW_MASTER_VLD_S 15 +#define PCIE_FW_MASTER_VLD_V(x) ((x) << PCIE_FW_MASTER_VLD_S) +#define PCIE_FW_MASTER_VLD_F PCIE_FW_MASTER_VLD_V(1U) + +#define PCIE_FW_MASTER_S 12 +#define PCIE_FW_MASTER_M 0x7 +#define PCIE_FW_MASTER_V(x) ((x) << PCIE_FW_MASTER_S) +#define PCIE_FW_MASTER_G(x) (((x) >> PCIE_FW_MASTER_S) & PCIE_FW_MASTER_M) struct fw_hdr { u8 ver; @@ -2259,10 +3001,25 @@ enum fw_hdr_chip { FW_HDR_CHIP_T5 }; -#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) -#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) -#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) -#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) +#define FW_HDR_FW_VER_MAJOR_S 24 +#define FW_HDR_FW_VER_MAJOR_M 0xff +#define FW_HDR_FW_VER_MAJOR_G(x) \ + (((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M) + +#define FW_HDR_FW_VER_MINOR_S 16 +#define FW_HDR_FW_VER_MINOR_M 0xff +#define FW_HDR_FW_VER_MINOR_G(x) \ + (((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M) + +#define FW_HDR_FW_VER_MICRO_S 8 +#define FW_HDR_FW_VER_MICRO_M 0xff +#define FW_HDR_FW_VER_MICRO_G(x) \ + (((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M) + +#define FW_HDR_FW_VER_BUILD_S 0 +#define FW_HDR_FW_VER_BUILD_M 0xff +#define FW_HDR_FW_VER_BUILD_G(x) \ + (((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M) enum fw_hdr_intfver { FW_HDR_INTFVER_NIC = 0x00, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 68eaa9c88c7d..d00a751f0588 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -138,6 +138,8 @@ struct sge_fl { struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */ __be64 *desc; /* address of HW RX descriptor ring */ dma_addr_t addr; /* PCI bus address of hardware ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; /* @@ -178,6 +180,8 @@ struct sge_rspq { u16 abs_id; /* SGE abs QID for the response Q */ __be64 *desc; /* address of hardware response ring */ dma_addr_t phys_addr; /* PCI bus address of ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ unsigned int iqe_len; /* entry size */ unsigned int size; /* capcity of response Q */ struct adapter *adapter; /* our adapter */ @@ -240,6 +244,8 @@ struct sge_txq { struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */ struct sge_qstat *stat; /* queue status entry */ dma_addr_t phys_addr; /* PCI bus address of hardware ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; /* @@ -299,6 +305,14 @@ struct sge { u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ + /* Decoded Adapter Parameters. + */ + u32 fl_pg_order; /* large page allocation size */ + u32 stat_len; /* length of status page at ring end */ + u32 pktshift; /* padding between CPL & packet data */ + u32 fl_align; /* response queue message alignment */ + u32 fl_starve_thres; /* Free List starvation threshold */ + /* * Reverse maps from Absolute Queue IDs to associated queue pointers. * The absolute Queue IDs are in a compact range which start at a @@ -337,6 +351,7 @@ struct sge { struct adapter { /* PCI resources */ void __iomem *regs; + void __iomem *bar2; struct pci_dev *pdev; struct device *pdev_dev; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index bfa398d91826..aa74ec34a467 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1030,10 +1030,10 @@ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, pktcnt_idx = closest_thres(&adapter->sge, cnt); if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) { - v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X( + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | - FW_PARAMS_PARAM_YZ(rspq->cntxt_id); + FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id); err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx); if (err) return err; @@ -1230,14 +1230,14 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", - FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev)); + FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev)); } /* @@ -2095,7 +2095,6 @@ static int adap_init0(struct adapter *adapter) unsigned int ethqsets; int err; u32 param, val = 0; - unsigned int chipid; /* * Wait for the device to become ready before proceeding ... @@ -2123,17 +2122,6 @@ static int adap_init0(struct adapter *adapter) return err; } - adapter->params.chip = 0; - switch (adapter->pdev->device >> 12) { - case CHELSIO_T4: - adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); - break; - case CHELSIO_T5: - chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); - adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); - break; - } - /* * Grab basic operational parameters. These will predominantly have * been set up by the Physical Function Driver or will be hard coded @@ -2184,8 +2172,8 @@ static int adap_init0(struct adapter *adapter) * firmware won't understand this and we'll just get * unencapsulated messages ... */ - param = FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP); + param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP); val = 1; (void) t4vf_set_params(adapter, 1, ¶m, &val); @@ -2594,6 +2582,27 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, goto err_free_adapter; } + /* Wait for the device to become ready before proceeding ... + */ + err = t4vf_prep_adapter(adapter); + if (err) { + dev_err(adapter->pdev_dev, "device didn't become ready:" + " err=%d\n", err); + goto err_unmap_bar0; + } + + /* For T5 and later we want to use the new BAR-based User Doorbells, + * so we need to map BAR2 here ... + */ + if (!is_t4(adapter->params.chip)) { + adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!adapter->bar2) { + dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n"); + err = -ENOMEM; + goto err_unmap_bar0; + } + } /* * Initialize adapter level features. */ @@ -2786,6 +2795,10 @@ err_free_dev: } err_unmap_bar: + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); + +err_unmap_bar0: iounmap(adapter->regs); err_free_adapter: @@ -2856,6 +2869,8 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev) free_netdev(netdev); } iounmap(adapter->regs); + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); kfree(adapter); } @@ -2908,67 +2923,18 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } -/* - * PCI Device registration data structures. - */ -#define CH_DEVICE(devid) \ - { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 } - -static const struct pci_device_id cxgb4vf_pci_tbl[] = { - CH_DEVICE(0xb000), /* PE10K FPGA */ - CH_DEVICE(0x4801), /* T420-cr */ - CH_DEVICE(0x4802), /* T422-cr */ - CH_DEVICE(0x4803), /* T440-cr */ - CH_DEVICE(0x4804), /* T420-bch */ - CH_DEVICE(0x4805), /* T440-bch */ - CH_DEVICE(0x4806), /* T460-ch */ - CH_DEVICE(0x4807), /* T420-so */ - CH_DEVICE(0x4808), /* T420-cx */ - CH_DEVICE(0x4809), /* T420-bt */ - CH_DEVICE(0x480a), /* T404-bt */ - CH_DEVICE(0x480d), /* T480-cr */ - CH_DEVICE(0x480e), /* T440-lp-cr */ - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x5801), /* T520-cr */ - CH_DEVICE(0x5802), /* T522-cr */ - CH_DEVICE(0x5803), /* T540-cr */ - CH_DEVICE(0x5804), /* T520-bch */ - CH_DEVICE(0x5805), /* T540-bch */ - CH_DEVICE(0x5806), /* T540-ch */ - CH_DEVICE(0x5807), /* T520-so */ - CH_DEVICE(0x5808), /* T520-cx */ - CH_DEVICE(0x5809), /* T520-bt */ - CH_DEVICE(0x580a), /* T504-bt */ - CH_DEVICE(0x580b), /* T520-sr */ - CH_DEVICE(0x580c), /* T504-bt */ - CH_DEVICE(0x580d), /* T580-cr */ - CH_DEVICE(0x580e), /* T540-lp-cr */ - CH_DEVICE(0x580f), /* Amsterdam */ - CH_DEVICE(0x5810), /* T580-lp-cr */ - CH_DEVICE(0x5811), /* T520-lp-cr */ - CH_DEVICE(0x5812), /* T560-cr */ - CH_DEVICE(0x5813), /* T580-cr */ - CH_DEVICE(0x5814), /* T580-so-cr */ - CH_DEVICE(0x5815), /* T502-bt */ - CH_DEVICE(0x5880), - CH_DEVICE(0x5881), - CH_DEVICE(0x5882), - CH_DEVICE(0x5883), - CH_DEVICE(0x5884), - CH_DEVICE(0x5885), - CH_DEVICE(0x5886), - CH_DEVICE(0x5887), - CH_DEVICE(0x5888), - { 0, } -}; +/* Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static struct pci_device_id cxgb4vf_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x8 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { PCI_VDEVICE(CHELSIO, (devid)), 0 } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } } + +#include "../cxgb4/t4_pci_id_tbl.h" MODULE_DESCRIPTION(DRV_DESC); MODULE_AUTHOR("Chelsio Communications"); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 85036e6b42c4..f7fd1317d996 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -51,14 +51,6 @@ #include "../cxgb4/t4_msg.h" /* - * Decoded Adapter Parameters. - */ -static u32 FL_PG_ORDER; /* large page allocation size */ -static u32 STAT_LEN; /* length of status page at ring end */ -static u32 PKTSHIFT; /* padding between CPL and packet data */ -static u32 FL_ALIGN; /* response queue message alignment */ - -/* * Constants ... */ enum { @@ -102,12 +94,6 @@ enum { MAX_TIMER_TX_RECLAIM = 100, /* - * An FL with <= FL_STARVE_THRES buffers is starving and a periodic - * timer will attempt to refill it. - */ - FL_STARVE_THRES = 4, - - /* * Suspend an Ethernet TX queue with fewer available descriptors than * this. We always want to have room for a maximum sized packet: * inline immediate data + MAX_SKB_FRAGS. This is the same as @@ -132,7 +118,7 @@ enum { * we can specify for immediate data in the firmware Ethernet TX * Work Request. */ - MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK, + MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M, /* * Max size of a WR sent through a control TX queue. @@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl) /** * fl_starving - return whether a Free List is starving. + * @adapter: pointer to the adapter * @fl: the Free List * * Tests specified Free List to see whether the number of buffers * available to the hardware has falled below our "starvation" * threshold. */ -static inline bool fl_starving(const struct sge_fl *fl) +static inline bool fl_starving(const struct adapter *adapter, + const struct sge_fl *fl) { - return fl->avail - fl->pend_cred <= FL_STARVE_THRES; + const struct sge *s = &adapter->sge; + + return fl->avail - fl->pend_cred <= s->fl_starve_thres; } /** @@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter, /** * get_buf_size - return the size of an RX Free List buffer. + * @adapter: pointer to the associated adapter * @sdesc: pointer to the software buffer descriptor */ -static inline int get_buf_size(const struct rx_sw_desc *sdesc) +static inline int get_buf_size(const struct adapter *adapter, + const struct rx_sw_desc *sdesc) { - return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) - ? (PAGE_SIZE << FL_PG_ORDER) - : PAGE_SIZE; + const struct sge *s = &adapter->sge; + + return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF) + ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE); } /** @@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) if (is_buf_mapped(sdesc)) dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), - get_buf_size(sdesc), PCI_DMA_FROMDEVICE); + get_buf_size(adapter, sdesc), + PCI_DMA_FROMDEVICE); put_page(sdesc->page); sdesc->page = NULL; if (++fl->cidx == fl->size) @@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) if (is_buf_mapped(sdesc)) dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), - get_buf_size(sdesc), PCI_DMA_FROMDEVICE); + get_buf_size(adapter, sdesc), + PCI_DMA_FROMDEVICE); sdesc->page = NULL; if (++fl->cidx == fl->size) fl->cidx = 0; @@ -530,19 +525,40 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) { u32 val; - /* - * The SGE keeps track of its Producer and Consumer Indices in terms + /* The SGE keeps track of its Producer and Consumer Indices in terms * of Egress Queue Units so we can only tell it about integral numbers * of multiples of Free List Entries per Egress Queue Units ... */ if (fl->pend_cred >= FL_PER_EQ_UNIT) { - val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); - if (!is_t4(adapter->params.chip)) - val |= DBTYPE(1); + if (is_t4(adapter->params.chip)) + val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); + else + val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) | + DBTYPE(1); + val |= DBPRIO(1); + + /* Make sure all memory writes to the Free List queue are + * committed before we tell the hardware about them. + */ wmb(); - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, - DBPRIO(1) | - QID(fl->cntxt_id) | val); + + /* If we don't have access to the new User Doorbell (T5+), use + * the old doorbell mechanism; otherwise use the new BAR2 + * mechanism. + */ + if (unlikely(fl->bar2_addr == NULL)) { + t4_write_reg(adapter, + T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + QID(fl->cntxt_id) | val); + } else { + writel(val | QID(fl->bar2_qid), + fl->bar2_addr + SGE_UDB_KDOORBELL); + + /* This Write memory Barrier will force the write to + * the User Doorbell area to be flushed. + */ + wmb(); + } fl->pend_cred %= FL_PER_EQ_UNIT; } } @@ -589,6 +605,7 @@ static inline void poison_buf(struct page *page, size_t sz) static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, int n, gfp_t gfp) { + struct sge *s = &adapter->sge; struct page *page; dma_addr_t dma_addr; unsigned int cred = fl->avail; @@ -602,18 +619,19 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, */ BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); + gfp |= __GFP_NOWARN; + /* * If we support large pages, prefer large buffers and fail over to * small pages if we can't allocate large pages to satisfy the refill. * If we don't support large pages, drop directly into the small page * allocation code. */ - if (FL_PG_ORDER == 0) + if (s->fl_pg_order == 0) goto alloc_small_pages; while (n) { - page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, - FL_PG_ORDER); + page = __dev_alloc_pages(gfp, s->fl_pg_order); if (unlikely(!page)) { /* * We've failed inour attempt to allocate a "large @@ -623,10 +641,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, fl->large_alloc_failed++; break; } - poison_buf(page, PAGE_SIZE << FL_PG_ORDER); + poison_buf(page, PAGE_SIZE << s->fl_pg_order); dma_addr = dma_map_page(adapter->pdev_dev, page, 0, - PAGE_SIZE << FL_PG_ORDER, + PAGE_SIZE << s->fl_pg_order, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { /* @@ -637,7 +655,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, * because DMA mapping resources are typically * critical resources once they become scarse. */ - __free_pages(page, FL_PG_ORDER); + __free_pages(page, s->fl_pg_order); goto out; } dma_addr |= RX_LARGE_BUF; @@ -657,7 +675,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, alloc_small_pages: while (n--) { - page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL); + page = __dev_alloc_page(gfp); if (unlikely(!page)) { fl->alloc_failed++; break; @@ -693,7 +711,7 @@ out: fl->pend_cred += cred; ring_fl_db(adapter, fl); - if (unlikely(fl_starving(fl))) { + if (unlikely(fl_starving(adapter, fl))) { smp_wmb(); set_bit(fl->cntxt_id, adapter->sge.starving_fl); } @@ -906,7 +924,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, sgl->addr0 = cpu_to_be64(addr[1]); } - sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); if (likely(--nfrags == 0)) return; @@ -952,14 +970,74 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, int n) { - /* - * Warn if we write doorbells with the wrong priority and write - * descriptors before telling HW. + /* Make sure that all writes to the TX Descriptors are committed + * before we tell the hardware about them. */ - WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1)); wmb(); - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, - QID(tq->cntxt_id) | PIDX(n)); + + /* If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(tq->bar2_addr == NULL)) { + u32 val = PIDX(n); + + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + QID(tq->cntxt_id) | val); + } else { + u32 val = PIDX_T5(n); + + /* T4 and later chips share the same PIDX field offset within + * the doorbell, but T5 and later shrank the field in order to + * gain a bit for Doorbell Priority. The field was absurdly + * large in the first place (14 bits) so we just use the T5 + * and later limits and warn if a Queue ID is too large. + */ + WARN_ON(val & DBPRIO(1)); + + /* If we're only writing a single Egress Unit and the BAR2 + * Queue ID is 0, we can use the Write Combining Doorbell + * Gather Buffer; otherwise we use the simple doorbell. + */ + if (n == 1 && tq->bar2_qid == 0) { + unsigned int index = (tq->pidx + ? (tq->pidx - 1) + : (tq->size - 1)); + __be64 *src = (__be64 *)&tq->desc[index]; + __be64 __iomem *dst = (__be64 *)(tq->bar2_addr + + SGE_UDB_WCDOORBELL); + unsigned int count = EQ_UNIT / sizeof(__be64); + + /* Copy the TX Descriptor in a tight loop in order to + * try to get it to the adapter in a single Write + * Combined transfer on the PCI-E Bus. If the Write + * Combine fails (say because of an interrupt, etc.) + * the hardware will simply take the last write as a + * simple doorbell write with a PIDX Increment of 1 + * and will fetch the TX Descriptor from memory via + * DMA. + */ + while (count) { + writeq(*src, dst); + src++; + dst++; + count--; + } + } else + writel(val | QID(tq->bar2_qid), + tq->bar2_addr + SGE_UDB_KDOORBELL); + + /* This Write Memory Barrier will force the write to the User + * Doorbell area to be flushed. This is needed to prevent + * writes on different CPUs for the same queue from hitting + * the adapter out of order. This is required when some Work + * Requests take the Write Combine Gather Buffer path (user + * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some + * take the traditional path where we simply increment the + * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the + * hardware DMA read the actual Work Request. + */ + wmb(); + } } /** @@ -1149,7 +1227,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) goto out_free; } - wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); if (unlikely(credits < ETHTXQ_STOP_THRES)) { /* * After we're done injecting the Work Request for this @@ -1161,7 +1239,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) * has opened up. */ txq_stop(txq); - wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } /* @@ -1191,9 +1269,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; wr->op_immdlen = - cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | - FW_WR_IMMDLEN(sizeof(*lso) + - sizeof(*cpl))); + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(sizeof(*lso) + + sizeof(*cpl))); /* * Fill in the LSO CPL message. */ @@ -1228,8 +1306,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); wr->op_immdlen = - cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | - FW_WR_IMMDLEN(len)); + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(len)); /* * Set up TX Packet CPL pointer, control word and perform @@ -1468,6 +1546,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl) static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, const struct cpl_rx_pkt *pkt) { + struct adapter *adapter = rxq->rspq.adapter; + struct sge *s = &adapter->sge; int ret; struct sk_buff *skb; @@ -1478,8 +1558,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, return; } - copy_frags(skb, gl, PKTSHIFT); - skb->len = gl->tot_len - PKTSHIFT; + copy_frags(skb, gl, s->pktshift); + skb->len = gl->tot_len - s->pktshift; skb->data_len = skb->len; skb->truesize += skb->data_len; skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1516,6 +1596,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, bool csum_ok = pkt->csum_calc && !pkt->err_vec && (rspq->netdev->features & NETIF_F_RXCSUM); struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + struct adapter *adapter = rspq->adapter; + struct sge *s = &adapter->sge; /* * If this is a good TCP packet and we have Generic Receive Offload @@ -1537,7 +1619,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, rxq->stats.rx_drops++; return 0; } - __skb_pull(skb, PKTSHIFT); + __skb_pull(skb, s->pktshift); skb->protocol = eth_type_trans(skb, rspq->netdev); skb_record_rx_queue(skb, rspq->idx); rxq->stats.pkts++; @@ -1648,6 +1730,8 @@ static inline void rspq_next(struct sge_rspq *rspq) static int process_responses(struct sge_rspq *rspq, int budget) { struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + struct adapter *adapter = rspq->adapter; + struct sge *s = &adapter->sge; int budget_left = budget; while (likely(budget_left)) { @@ -1697,7 +1781,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) BUG_ON(frag >= MAX_SKB_FRAGS); BUG_ON(rxq->fl.avail == 0); sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; - bufsz = get_buf_size(sdesc); + bufsz = get_buf_size(adapter, sdesc); fp->page = sdesc->page; fp->offset = rspq->offset; fp->size = min(bufsz, len); @@ -1726,7 +1810,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) */ ret = rspq->handler(rspq, rspq->cur_desc, &gl); if (likely(ret == 0)) - rspq->offset += ALIGN(fp->size, FL_ALIGN); + rspq->offset += ALIGN(fp->size, s->fl_align); else restore_rx_bufs(&gl, &rxq->fl, frag); } else if (likely(rsp_type == RSP_TYPE_CPL)) { @@ -1779,6 +1863,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) unsigned int intr_params; struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); int work_done = process_responses(rspq, budget); + u32 val; if (likely(work_done < budget)) { napi_complete(napi); @@ -1790,11 +1875,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) if (unlikely(work_done == 0)) rspq->unhandled_irqs++; - t4_write_reg(rspq->adapter, - T4VF_SGE_BASE_ADDR + SGE_VF_GTS, - CIDXINC(work_done) | - INGRESSQID((u32)rspq->cntxt_id) | - SEINTARM(intr_params)); + val = CIDXINC(work_done) | SEINTARM(intr_params); + if (is_t4(rspq->adapter->params.chip)) { + t4_write_reg(rspq->adapter, + T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + val | INGRESSQID((u32)rspq->cntxt_id)); + } else { + writel(val | INGRESSQID(rspq->bar2_qid), + rspq->bar2_addr + SGE_UDB_GTS); + wmb(); + } return work_done; } @@ -1819,6 +1909,7 @@ static unsigned int process_intrq(struct adapter *adapter) struct sge *s = &adapter->sge; struct sge_rspq *intrq = &s->intrq; unsigned int work_done; + u32 val; spin_lock(&adapter->sge.intrq_lock); for (work_done = 0; ; work_done++) { @@ -1884,10 +1975,15 @@ static unsigned int process_intrq(struct adapter *adapter) rspq_next(intrq); } - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, - CIDXINC(work_done) | - INGRESSQID(intrq->cntxt_id) | - SEINTARM(intrq->intr_params)); + val = CIDXINC(work_done) | SEINTARM(intrq->intr_params); + if (is_t4(adapter->params.chip)) + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + val | INGRESSQID(intrq->cntxt_id)); + else { + writel(val | INGRESSQID(intrq->bar2_qid), + intrq->bar2_addr + SGE_UDB_GTS); + wmb(); + } spin_unlock(&adapter->sge.intrq_lock); @@ -1963,7 +2059,7 @@ static void sge_rx_timer_cb(unsigned long data) * schedule napi but the FL is no longer starving. * No biggie. */ - if (fl_starving(fl)) { + if (fl_starving(adapter, fl)) { struct sge_eth_rxq *rxq; rxq = container_of(fl, struct sge_eth_rxq, fl); @@ -2033,6 +2129,35 @@ static void sge_tx_timer_cb(unsigned long data) } /** + * bar2_address - return the BAR2 address for an SGE Queue's Registers + * @adapter: the adapter + * @qid: the SGE Queue ID + * @qtype: the SGE Queue Type (Egress or Ingress) + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 address for the SGE Queue Registers associated with + * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also + * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE + * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" + * Registers are supported (e.g. the Write Combining Doorbell Buffer). + */ +static void __iomem *bar2_address(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + unsigned int *pbar2_qid) +{ + u64 bar2_qoffset; + int ret; + + ret = t4_bar2_sge_qregs(adapter, qid, qtype, + &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; + + return adapter->bar2 + bar2_qoffset; +} + +/** * t4vf_sge_alloc_rxq - allocate an SGE RX Queue * @adapter: the adapter * @rspq: pointer to to the new rxq's Response Queue to be filled in @@ -2047,6 +2172,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, int intr_dest, struct sge_fl *fl, rspq_handler_t hnd) { + struct sge *s = &adapter->sge; struct port_info *pi = netdev_priv(dev); struct fw_iq_cmd cmd, rpl; int ret, iqandst, flsz = 0; @@ -2084,26 +2210,26 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, * into OS-independent common code ... */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC | - FW_IQ_CMD_IQSTART(1) | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F | + FW_IQ_CMD_IQSTART_F | FW_LEN16(cmd)); cmd.type_to_iqandstindex = - cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | - FW_IQ_CMD_IQASYNCH(iqasynch) | - FW_IQ_CMD_VIID(pi->viid) | - FW_IQ_CMD_IQANDST(iqandst) | - FW_IQ_CMD_IQANUS(1) | - FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) | - FW_IQ_CMD_IQANDSTINDEX(intr_dest)); + cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH_V(iqasynch) | + FW_IQ_CMD_VIID_V(pi->viid) | + FW_IQ_CMD_IQANDST_V(iqandst) | + FW_IQ_CMD_IQANUS_V(1) | + FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) | + FW_IQ_CMD_IQANDSTINDEX_V(intr_dest)); cmd.iqdroprss_to_iqesize = - cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) | - FW_IQ_CMD_IQGTSMODE | - FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) | - FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4)); + cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) | + FW_IQ_CMD_IQGTSMODE_F | + FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4)); cmd.iqsize = cpu_to_be16(rspq->size); cmd.iqaddr = cpu_to_be64(rspq->phys_addr); @@ -2117,7 +2243,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, fl->size = roundup(fl->size, FL_PER_EQ_UNIT); fl->desc = alloc_ring(adapter->pdev_dev, fl->size, sizeof(__be64), sizeof(struct rx_sw_desc), - &fl->addr, &fl->sdesc, STAT_LEN); + &fl->addr, &fl->sdesc, s->stat_len); if (!fl->desc) { ret = -ENOMEM; goto err; @@ -2129,7 +2255,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, * free list ring) in Egress Queue Units. */ flsz = (fl->size / FL_PER_EQ_UNIT + - STAT_LEN / EQ_UNIT); + s->stat_len / EQ_UNIT); /* * Fill in all the relevant firmware Ingress Queue Command @@ -2137,13 +2263,13 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, */ cmd.iqns_to_fl0congen = cpu_to_be32( - FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) | - FW_IQ_CMD_FL0PACKEN(1) | - FW_IQ_CMD_FL0PADEN(1)); + FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | + FW_IQ_CMD_FL0PACKEN_F | + FW_IQ_CMD_FL0PADEN_F); cmd.fl0dcaen_to_fl0cidxfthresh = cpu_to_be16( - FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) | - FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B)); + FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) | + FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B)); cmd.fl0size = cpu_to_be16(flsz); cmd.fl0addr = cpu_to_be64(fl->addr); } @@ -2162,6 +2288,10 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, rspq->gen = 1; rspq->next_intr_params = rspq->intr_params; rspq->cntxt_id = be16_to_cpu(rpl.iqid); + rspq->bar2_addr = bar2_address(adapter, + rspq->cntxt_id, + T4_BAR2_QTYPE_INGRESS, + &rspq->bar2_qid); rspq->abs_id = be16_to_cpu(rpl.physiqid); rspq->size--; /* subtract status entry */ rspq->adapter = adapter; @@ -2180,6 +2310,15 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, fl->alloc_failed = 0; fl->large_alloc_failed = 0; fl->starving = 0; + + /* Note, we must initialize the BAR2 Free List User Doorbell + * information before refilling the Free List! + */ + fl->bar2_addr = bar2_address(adapter, + fl->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &fl->bar2_qid); + refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); } @@ -2217,6 +2356,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, struct net_device *dev, struct netdev_queue *devq, unsigned int iqid) { + struct sge *s = &adapter->sge; int ret, nentries; struct fw_eq_eth_cmd cmd, rpl; struct port_info *pi = netdev_priv(dev); @@ -2225,7 +2365,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, * Calculate the size of the hardware TX Queue (including the Status * Page on the end of the TX Queue) in units of TX Descriptors. */ - nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); /* * Allocate the hardware ring for the TX ring (with space for its @@ -2234,7 +2374,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, sizeof(struct tx_desc), sizeof(struct tx_sw_desc), - &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); if (!txq->q.desc) return -ENOMEM; @@ -2246,24 +2386,25 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, * into the common code ... */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | - FW_EQ_ETH_CMD_EQSTART | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F | + FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(cmd)); - cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE | - FW_EQ_ETH_CMD_VIID(pi->viid)); + cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); cmd.fetchszm_to_iqid = - cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | - FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | - FW_EQ_ETH_CMD_IQID(iqid)); + cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) | + FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) | + FW_EQ_ETH_CMD_IQID_V(iqid)); cmd.dcaen_to_eqsize = - cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) | - FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) | - FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) | - FW_EQ_ETH_CMD_EQSIZE(nentries)); + cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) | + FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) | + FW_EQ_ETH_CMD_CIDXFTHRESH_V( + SGE_CIDXFLUSHTHRESH_32) | + FW_EQ_ETH_CMD_EQSIZE_V(nentries)); cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); /* @@ -2289,9 +2430,13 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, txq->q.cidx = 0; txq->q.pidx = 0; txq->q.stat = (void *)&txq->q.desc[txq->q.size]; - txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd)); + txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd)); + txq->q.bar2_addr = bar2_address(adapter, + txq->q.cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &txq->q.bar2_qid); txq->q.abs_id = - FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd)); + FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd)); txq->txq = devq; txq->tso = 0; txq->tx_cso = 0; @@ -2307,8 +2452,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, */ static void free_txq(struct adapter *adapter, struct sge_txq *tq) { + struct sge *s = &adapter->sge; + dma_free_coherent(adapter->pdev_dev, - tq->size * sizeof(*tq->desc) + STAT_LEN, + tq->size * sizeof(*tq->desc) + s->stat_len, tq->desc, tq->phys_addr); tq->cntxt_id = 0; tq->sdesc = NULL; @@ -2322,6 +2469,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq) static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, struct sge_fl *fl) { + struct sge *s = &adapter->sge; unsigned int flid = fl ? fl->cntxt_id : 0xffff; t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, @@ -2337,7 +2485,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, if (fl) { free_rx_bufs(adapter, fl, fl->avail); dma_free_coherent(adapter->pdev_dev, - fl->size * sizeof(*fl->desc) + STAT_LEN, + fl->size * sizeof(*fl->desc) + s->stat_len, fl->desc, fl->addr); kfree(fl->sdesc); fl->sdesc = NULL; @@ -2423,6 +2571,7 @@ int t4vf_sge_init(struct adapter *adapter) u32 fl0 = sge_params->sge_fl_buffer_size[0]; u32 fl1 = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; + unsigned int ingpadboundary, ingpackboundary; /* * Start by vetting the basic SGE parameters which have been set up by @@ -2443,12 +2592,48 @@ int t4vf_sge_init(struct adapter *adapter) * Now translate the adapter parameters into our internal forms. */ if (fl1) - FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; - STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) - ? 128 : 64); - PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); - FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + - SGE_INGPADBOUNDARY_SHIFT); + s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) + ? 128 : 64); + s->pktshift = PKTSHIFT_GET(sge_params->sge_control); + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. (Note that it makes no real practical sense to + * have the Pading Boudary be larger than the Packing Boundary but you + * could set the chip up that way and, in fact, legacy T4 code would + * end doing this because it would initialize the Padding Boundary and + * leave the Packing Boundary initialized to 0 (16 bytes).) + */ + ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + + X_INGPADBOUNDARY_SHIFT); + if (is_t4(adapter->params.chip)) { + s->fl_align = ingpadboundary; + } else { + /* T5 has a different interpretation of one of the PCIe Packing + * Boundary values. + */ + ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + s->fl_align = max(ingpadboundary, ingpackboundary); + } + + /* A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) + */ + s->fl_starve_thres + = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1; /* * Set up tasklet timers. diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 95df61dcb4ce..8d3237f5e364 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -67,7 +67,7 @@ enum chip_type { /* * The "len16" field of a Firmware Command Structure ... */ -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) /* * Per-VF statistics. @@ -134,11 +134,16 @@ struct dev_params { */ struct sge_params { u32 sge_control; /* padding, boundaries, lengths, etc. */ - u32 sge_host_page_size; /* RDMA page sizes */ - u32 sge_queues_per_page; /* RDMA queues/page */ - u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ + u32 sge_control2; /* T5: more of the same */ + u32 sge_host_page_size; /* PF0-7 page sizes */ + u32 sge_egress_queues_per_page; /* PF0-7 egress queues/page */ + u32 sge_ingress_queues_per_page;/* PF0-7 ingress queues/page */ + u32 sge_vf_hps; /* host page size for our vf */ + u32 sge_vf_eq_qpp; /* egress queues/page for our VF */ + u32 sge_vf_iq_qpp; /* ingress queues/page for our VF */ u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ + u32 sge_congestion_control; /* congestion thresholds, etc. */ u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ u32 sge_timer_value_2_and_3; u32 sge_timer_value_4_and_5; @@ -265,6 +270,8 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); } +#define CHELSIO_PCI_ID_VER(dev_id) ((dev_id) >> 12) + static inline int is_t4(enum chip_type chip) { return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; @@ -276,6 +283,13 @@ int t4vf_port_init(struct adapter *, int); int t4vf_fw_reset(struct adapter *); int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + int t4vf_get_sge_params(struct adapter *); int t4vf_get_vpd_params(struct adapter *); int t4vf_get_dev_params(struct adapter *); @@ -307,5 +321,6 @@ int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int, int t4vf_eth_eq_free(struct adapter *, unsigned int); int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); +int t4vf_prep_adapter(struct adapter *); #endif /* __T4VF_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index e984fdc48ba2..02e8833b7797 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -204,20 +204,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, /* return value in low-order little-endian word */ v = t4_read_reg(adapter, mbox_data); - if (FW_CMD_RETVAL_GET(v)) + if (FW_CMD_RETVAL_G(v)) dump_mbox(adapter, "FW Error", mbox_data); if (rpl) { /* request bit in high-order BE word */ WARN_ON((be32_to_cpu(*(const u32 *)cmd) - & FW_CMD_REQUEST) == 0); + & FW_CMD_REQUEST_F) == 0); get_mbox_rpl(adapter, rpl, size, mbox_data); WARN_ON((be32_to_cpu(*(u32 *)rpl) - & FW_CMD_REQUEST) != 0); + & FW_CMD_REQUEST_F) != 0); } t4_write_reg(adapter, mbox_ctl, MBOWNER(MBOX_OWNER_NONE)); - return -FW_CMD_RETVAL_GET(v); + return -FW_CMD_RETVAL_G(v); } } @@ -287,17 +287,17 @@ int t4vf_port_init(struct adapter *adapter, int pidx) * like MAC address, etc. */ memset(&vi_cmd, 0, sizeof(vi_cmd)); - vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); - vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid)); + vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); if (v) return v; - BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd)); - pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd)); + BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); + pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); /* @@ -308,12 +308,12 @@ int t4vf_port_init(struct adapter *adapter, int pidx) return 0; memset(&port_cmd, 0, sizeof(port_cmd)); - port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | - FW_PORT_CMD_PORTID(pi->port_id)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->port_id)); port_cmd.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | FW_LEN16(port_cmd)); v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); if (v) @@ -349,8 +349,8 @@ int t4vf_fw_reset(struct adapter *adapter) struct fw_reset_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) | - FW_CMD_WRITE); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | + FW_CMD_WRITE_F); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -377,12 +377,12 @@ static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, return -EINVAL; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, param[nparams].mnem), 16); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) p->mnem = htonl(*params++); @@ -415,12 +415,12 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, return -EINVAL; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, param[nparams]), 16); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { p->mnem = cpu_to_be32(*params++); p->val = cpu_to_be32(*vals++); @@ -430,6 +430,95 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, } /** + * t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.sge_vf_hps + 10; + page_size = 1 << page_shift; + + /* Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS + ? adapter->params.sge.sge_vf_eq_qpp + : adapter->params.sge.sge_vf_iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + +/** * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters * @adapter: the adapter * @@ -443,20 +532,20 @@ int t4vf_get_sge_params(struct adapter *adapter) u32 params[7], vals[7]; int v; - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_CONTROL)); - params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE)); - params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0)); - params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1)); - params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1)); - params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3)); - params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE)); + params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0)); + params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1)); + params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1)); + params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3)); + params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5)); v = t4vf_query_params(adapter, 7, params, vals); if (v) return v; @@ -468,12 +557,87 @@ int t4vf_get_sge_params(struct adapter *adapter) sge_params->sge_timer_value_2_and_3 = vals[5]; sge_params->sge_timer_value_4_and_5 = vals[6]; - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); - v = t4vf_query_params(adapter, 1, params, vals); + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately with the Padding Boundary in SGE_CONTROL and and Packing + * Boundary in SGE_CONTROL2. So for T5 and later we need to grab + * SGE_CONTROL in order to determine how ingress packet data will be + * laid out in Packed Buffer Mode. Unfortunately, older versions of + * the firmware won't let us retrieve SGE_CONTROL2 so if we get a + * failure grabbing it we throw an error since we can't figure out the + * right value. + */ + if (!is_t4(adapter->params.chip)) { + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v != FW_SUCCESS) { + dev_err(adapter->pdev_dev, + "Unable to get SGE Control2; " + "probably old firmware.\n"); + return v; + } + sge_params->sge_control2 = vals[0]; + } + + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL)); + v = t4vf_query_params(adapter, 2, params, vals); if (v) return v; sge_params->sge_ingress_rx_threshold = vals[0]; + sge_params->sge_congestion_control = vals[1]; + + /* For T5 and later we want to use the new BAR2 Doorbells. + * Unfortunately, older firmware didn't allow the this register to be + * read. + */ + if (!is_t4(adapter->params.chip)) { + u32 whoami; + unsigned int pf, s_hps, s_qpp; + + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V( + SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V( + SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v != FW_SUCCESS) { + dev_warn(adapter->pdev_dev, + "Unable to get VF SGE Queues/Page; " + "probably old firmware.\n"); + return v; + } + sge_params->sge_egress_queues_per_page = vals[0]; + sge_params->sge_ingress_queues_per_page = vals[1]; + + /* We need the Queues/Page for our VF. This is based on the + * PF from which we're instantiated and is indexed in the + * register we just read. Do it once here so other code in + * the driver can just use it. + */ + whoami = t4_read_reg(adapter, + T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI); + pf = SOURCEPF_GET(whoami); + + s_hps = (HOSTPAGESIZEPF0_S + + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); + sge_params->sge_vf_hps = + ((sge_params->sge_host_page_size >> s_hps) + & HOSTPAGESIZEPF0_M); + + s_qpp = (QUEUESPERPAGEPF0_S + + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); + sge_params->sge_vf_eq_qpp = + ((sge_params->sge_egress_queues_per_page >> s_qpp) + & QUEUESPERPAGEPF0_MASK); + sge_params->sge_vf_iq_qpp = + ((sge_params->sge_ingress_queues_per_page >> s_qpp) + & QUEUESPERPAGEPF0_MASK); + } return 0; } @@ -491,8 +655,8 @@ int t4vf_get_vpd_params(struct adapter *adapter) u32 params[7], vals[7]; int v; - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); v = t4vf_query_params(adapter, 1, params, vals); if (v) return v; @@ -514,10 +678,10 @@ int t4vf_get_dev_params(struct adapter *adapter) u32 params[7], vals[7]; int v; - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV)); - params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); v = t4vf_query_params(adapter, 2, params, vals); if (v) return v; @@ -545,9 +709,9 @@ int t4vf_get_rss_glb_config(struct adapter *adapter) * our RSS configuration. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) @@ -559,7 +723,7 @@ int t4vf_get_rss_glb_config(struct adapter *adapter) * filtering at this point to weed out modes which don't support * VF Drivers ... */ - rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET( + rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( be32_to_cpu(rpl.u.manual.mode_pkd)); switch (rss->mode) { case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { @@ -567,26 +731,26 @@ int t4vf_get_rss_glb_config(struct adapter *adapter) rpl.u.basicvirtual.synmapen_to_hashtoeplitz); rss->u.basicvirtual.synmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); rss->u.basicvirtual.syn4tupenipv6 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); rss->u.basicvirtual.syn2tupenipv6 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); rss->u.basicvirtual.syn4tupenipv4 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); rss->u.basicvirtual.syn2tupenipv4 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); rss->u.basicvirtual.ofdmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); rss->u.basicvirtual.tnlmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); rss->u.basicvirtual.tnlalllookup = - ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); rss->u.basicvirtual.hashtoeplitz = - ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); /* we need at least Tunnel Map Enable to be set */ if (!rss->u.basicvirtual.tnlmapen) @@ -621,9 +785,9 @@ int t4vf_get_vfres(struct adapter *adapter) * with error on command failure. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) @@ -633,22 +797,22 @@ int t4vf_get_vfres(struct adapter *adapter) * Extract VF resource limits and return success. */ word = be32_to_cpu(rpl.niqflint_niq); - vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word); - vfres->niq = FW_PFVF_CMD_NIQ_GET(word); + vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); + vfres->niq = FW_PFVF_CMD_NIQ_G(word); word = be32_to_cpu(rpl.type_to_neq); - vfres->neq = FW_PFVF_CMD_NEQ_GET(word); - vfres->pmask = FW_PFVF_CMD_PMASK_GET(word); + vfres->neq = FW_PFVF_CMD_NEQ_G(word); + vfres->pmask = FW_PFVF_CMD_PMASK_G(word); word = be32_to_cpu(rpl.tc_to_nexactf); - vfres->tc = FW_PFVF_CMD_TC_GET(word); - vfres->nvi = FW_PFVF_CMD_NVI_GET(word); - vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word); + vfres->tc = FW_PFVF_CMD_TC_G(word); + vfres->nvi = FW_PFVF_CMD_NVI_G(word); + vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); word = be32_to_cpu(rpl.r_caps_to_nethctrl); - vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word); - vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word); - vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word); + vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); + vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); + vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); return 0; } @@ -669,9 +833,9 @@ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, int v; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_RSS_VI_CONFIG_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); @@ -683,17 +847,17 @@ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); config->basicvirtual.ip6fourtupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); config->basicvirtual.ip6twotupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); config->basicvirtual.ip4fourtupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); config->basicvirtual.ip4twotupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); config->basicvirtual.udpen = - ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); config->basicvirtual.defaultq = - FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word); + FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); break; } @@ -719,9 +883,9 @@ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, struct fw_rss_vi_config_cmd cmd, rpl; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_RSS_VI_CONFIG_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); switch (adapter->params.rss.mode) { @@ -729,16 +893,16 @@ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, u32 word = 0; if (config->basicvirtual.ip6fourtupen) - word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; if (config->basicvirtual.ip6twotupen) - word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; if (config->basicvirtual.ip4fourtupen) - word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; if (config->basicvirtual.ip4twotupen) - word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; if (config->basicvirtual.udpen) - word |= FW_RSS_VI_CONFIG_CMD_UDPEN; - word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ( + word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; + word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( config->basicvirtual.defaultq); cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); break; @@ -777,10 +941,10 @@ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, * Initialize firmware command template to write the RSS table. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_RSS_IND_TBL_CMD_VIID_V(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); /* @@ -831,9 +995,9 @@ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, if (rsp >= rsp_end) rsp = rspq; } - *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | - FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | - FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); + *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | + FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | + FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); } /* @@ -866,18 +1030,18 @@ int t4vf_alloc_vi(struct adapter *adapter, int port_id) * VIID. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | - FW_VI_CMD_ALLOC); - cmd.portid_pkd = FW_VI_CMD_PORTID(port_id); + FW_VI_CMD_ALLOC_F); + cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) return v; - return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid)); + return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); } /** @@ -896,12 +1060,12 @@ int t4vf_free_vi(struct adapter *adapter, int viid) * Execute a VI command to free the Virtual Interface. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | - FW_VI_CMD_FREE); - cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid)); + FW_VI_CMD_FREE_F); + cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -920,12 +1084,12 @@ int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, struct fw_vi_enable_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) | - FW_VI_ENABLE_CMD_EEN(tx_en) | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | + FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(cmd)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -944,11 +1108,11 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid, struct fw_vi_enable_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(cmd)); cmd.blinkdur = cpu_to_be16(nblinks); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); @@ -975,28 +1139,28 @@ int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, /* convert to FW values */ if (mtu < 0) - mtu = FW_VI_RXMODE_CMD_MTU_MASK; + mtu = FW_VI_RXMODE_CMD_MTU_M; if (promisc < 0) - promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; if (all_multi < 0) - all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; if (bcast < 0) - bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; if (vlanex < 0) - vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_RXMODE_CMD_VIID(viid)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_RXMODE_CMD_VIID_V(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.mtu_to_vlanexen = - cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) | - FW_VI_RXMODE_CMD_PROMISCEN(promisc) | - FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | - FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | - FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); } @@ -1046,19 +1210,19 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, int i; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + (free ? FW_CMD_EXEC_F : 0) | + FW_VI_MAC_CMD_VIID_V(viid)); cmd.freemacs_to_len16 = - cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16(len16)); + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | + FW_CMD_LEN16_V(len16)); for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { p->valid_to_idx = cpu_to_be16( - FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); } @@ -1069,7 +1233,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, break; for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET( + u16 index = FW_VI_MAC_CMD_IDX_G( be16_to_cpu(p->valid_to_idx)); if (idx) @@ -1135,19 +1299,19 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_MAC_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); - p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(idx)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_MAC_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); + p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (ret == 0) { p = &rpl.u.exact[0]; - ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); + ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); if (ret >= max_naddr) ret = -ENOMEM; } @@ -1172,13 +1336,13 @@ int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, u.exact[0]), 16); memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN | - FW_VI_MAC_CMD_HASHUNIEN(ucast) | - FW_CMD_LEN16(len16)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | + FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | + FW_CMD_LEN16_V(len16)); cmd.u.hash.hashvec = cpu_to_be64(vec); return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); } @@ -1214,14 +1378,14 @@ int t4vf_get_port_stats(struct adapter *adapter, int pidx, int ret; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) | - FW_VI_STATS_CMD_VIID(pi->viid) | - FW_CMD_REQUEST | - FW_CMD_READ); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | + FW_VI_STATS_CMD_VIID_V(pi->viid) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); cmd.u.ctl.nstats_ix = - cpu_to_be16(FW_VI_STATS_CMD_IX(ix) | - FW_VI_STATS_CMD_NSTATS(nstats)); + cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | + FW_VI_STATS_CMD_NSTATS_V(nstats)); ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); if (ret) return ret; @@ -1273,13 +1437,13 @@ int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, struct fw_iq_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(cmd)); cmd.type_to_iqandstindex = - cpu_to_be32(FW_IQ_CMD_TYPE(iqtype)); + cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); cmd.iqid = cpu_to_be16(iqid); cmd.fl0id = cpu_to_be16(fl0id); @@ -1299,12 +1463,12 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) struct fw_eq_eth_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(cmd)); - cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid)); + cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -1318,7 +1482,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) { const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; - u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); + u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); switch (opcode) { case FW_PORT_CMD: { @@ -1333,7 +1497,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) /* * Extract various fields from port status change message. */ - action = FW_PORT_CMD_ACTION_GET( + action = FW_PORT_CMD_ACTION_G( be32_to_cpu(port_cmd->action_to_len16)); if (action != FW_PORT_ACTION_GET_PORT_INFO) { dev_err(adapter->pdev_dev, @@ -1342,24 +1506,24 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) break; } - port_id = FW_PORT_CMD_PORTID_GET( + port_id = FW_PORT_CMD_PORTID_G( be32_to_cpu(port_cmd->op_to_portid)); word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); - link_ok = (word & FW_PORT_CMD_LSTATUS) != 0; + link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0; speed = 0; fc = 0; - if (word & FW_PORT_CMD_RXPAUSE) + if (word & FW_PORT_CMD_RXPAUSE_F) fc |= PAUSE_RX; - if (word & FW_PORT_CMD_TXPAUSE) + if (word & FW_PORT_CMD_TXPAUSE_F) fc |= PAUSE_TX; - if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) speed = 100; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) speed = 1000; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) speed = 10000; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) speed = 40000; /* @@ -1394,3 +1558,38 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) } return 0; } + +/** + */ +int t4vf_prep_adapter(struct adapter *adapter) +{ + int err; + unsigned int chipid; + + /* Wait for the device to become ready before proceeding ... + */ + err = t4vf_wait_dev_ready(adapter); + if (err) + return err; + + /* Default port and clock for debugging in case we can't reach + * firmware. + */ + adapter->params.nports = 1; + adapter->params.vfres.pmask = 1; + adapter->params.vpd.cclk = 50000; + + adapter->params.chip = 0; + switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { + case CHELSIO_T4: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); + break; + + case CHELSIO_T5: + chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); + break; + } + + return 0; +} diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index e285f384b096..07719676c305 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -216,14 +216,10 @@ struct net_device * __init mac89x0_probe(int unit) ioaddr = (unsigned long) nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE); { - unsigned long flags; int card_present; - local_irq_save(flags); - card_present = (hwreg_present((void*) ioaddr+4) && - hwreg_present((void*) ioaddr + DATA_PORT)); - local_irq_restore(flags); - + card_present = (hwreg_present((void *)ioaddr + 4) && + hwreg_present((void *)ioaddr + DATA_PORT)); if (!card_present) goto out; } diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c index 69dfd3c9e529..0be6850be8a2 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c @@ -86,7 +86,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic) int i; enic_rfs_timer_stop(enic); - spin_lock(&enic->rfs_h.lock); + spin_lock_bh(&enic->rfs_h.lock); enic->rfs_h.free = 0; for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { struct hlist_head *hhead; @@ -100,7 +100,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic) kfree(n); } } - spin_unlock(&enic->rfs_h.lock); + spin_unlock_bh(&enic->rfs_h.lock); } struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id) @@ -128,7 +128,7 @@ void enic_flow_may_expire(unsigned long data) bool res; int j; - spin_lock(&enic->rfs_h.lock); + spin_lock_bh(&enic->rfs_h.lock); for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { struct hlist_head *hhead; struct hlist_node *tmp; @@ -148,7 +148,7 @@ void enic_flow_may_expire(unsigned long data) } } } - spin_unlock(&enic->rfs_h.lock); + spin_unlock_bh(&enic->rfs_h.lock); mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); } @@ -183,7 +183,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, return -EPROTONOSUPPORT; tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; - spin_lock(&enic->rfs_h.lock); + spin_lock_bh(&enic->rfs_h.lock); n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); if (n) { /* entry already present */ @@ -277,7 +277,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, } ret_unlock: - spin_unlock(&enic->rfs_h.lock); + spin_unlock_bh(&enic->rfs_h.lock); return res; } diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 929bfe70080a..86ee350e57f0 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -283,12 +283,10 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) return IRQ_HANDLED; } - if (ENIC_TEST_INTR(pba, io_intr)) { - if (napi_schedule_prep(&enic->napi[0])) - __napi_schedule(&enic->napi[0]); - } else { + if (ENIC_TEST_INTR(pba, io_intr)) + napi_schedule_irqoff(&enic->napi[0]); + else vnic_intr_unmask(&enic->intr[io_intr]); - } return IRQ_HANDLED; } @@ -313,7 +311,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data) * writes). */ - napi_schedule(&enic->napi[0]); + napi_schedule_irqoff(&enic->napi[0]); return IRQ_HANDLED; } @@ -322,7 +320,7 @@ static irqreturn_t enic_isr_msix(int irq, void *data) { struct napi_struct *napi = data; - napi_schedule(napi); + napi_schedule_irqoff(napi); return IRQ_HANDLED; } @@ -531,8 +529,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, { struct enic *enic = netdev_priv(netdev); struct vnic_wq *wq; - unsigned long flags; unsigned int txq_map; + struct netdev_queue *txq; if (skb->len <= 0) { dev_kfree_skb_any(skb); @@ -541,6 +539,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, txq_map = skb_get_queue_mapping(skb) % enic->wq_count; wq = &enic->wq[txq_map]; + txq = netdev_get_tx_queue(netdev, txq_map); /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, * which is very likely. In the off chance it's going to take @@ -554,23 +553,25 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - spin_lock_irqsave(&enic->wq_lock[txq_map], flags); + spin_lock(&enic->wq_lock[txq_map]); if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { - netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); + netif_tx_stop_queue(txq); /* This is a hard error, log it */ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); - spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); + spin_unlock(&enic->wq_lock[txq_map]); return NETDEV_TX_BUSY; } enic_queue_wq_skb(enic, wq, skb); if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) - netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); + netif_tx_stop_queue(txq); + if (!skb->xmit_more || netif_xmit_stopped(txq)) + vnic_wq_doorbell(wq); - spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); + spin_unlock(&enic->wq_lock[txq_map]); return NETDEV_TX_OK; } @@ -940,18 +941,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) struct vnic_rq_buf *buf = rq->to_use; if (buf->os_buf) { - buf = buf->next; - rq->to_use = buf; - rq->ring.desc_avail--; - if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { - /* Adding write memory barrier prevents compiler and/or - * CPU reordering, thus avoiding descriptor posting - * before descriptor is initialized. Otherwise, hardware - * can read stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &rq->ctrl->posted_index); - } + enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, + buf->len); return 0; } @@ -1037,7 +1028,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, enic->rq_truncated_pkts++; } + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); + buf->os_buf = NULL; return; } @@ -1088,7 +1082,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, /* Buffer overflow */ + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); + buf->os_buf = NULL; } } @@ -1316,9 +1313,10 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget) if (!wq_work_done) { napi_complete(napi); vnic_intr_unmask(&enic->intr[intr]); + return 0; } - return 0; + return budget; } static int enic_poll_msix_rq(struct napi_struct *napi, int budget) @@ -1674,13 +1672,13 @@ static int enic_stop(struct net_device *netdev) enic_dev_disable(enic); - local_bh_disable(); for (i = 0; i < enic->rq_count; i++) { napi_disable(&enic->napi[i]); + local_bh_disable(); while (!enic_poll_lock_napi(&enic->rq[i])) mdelay(1); + local_bh_enable(); } - local_bh_enable(); netif_carrier_off(netdev); netif_tx_disable(netdev); @@ -1892,23 +1890,23 @@ static int enic_dev_hang_reset(struct enic *enic) static int enic_set_rsskey(struct enic *enic) { + union vnic_rss_key *rss_key_buf_va; dma_addr_t rss_key_buf_pa; - union vnic_rss_key *rss_key_buf_va = NULL; - union vnic_rss_key rss_key = { - .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}, - .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}, - .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}, - .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}, - }; - int err; + u8 rss_key[ENIC_RSS_LEN]; + int i, kidx, bidx, err; - rss_key_buf_va = pci_alloc_consistent(enic->pdev, - sizeof(union vnic_rss_key), &rss_key_buf_pa); + rss_key_buf_va = pci_zalloc_consistent(enic->pdev, + sizeof(union vnic_rss_key), + &rss_key_buf_pa); if (!rss_key_buf_va) return -ENOMEM; - memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); - + netdev_rss_key_fill(rss_key, ENIC_RSS_LEN); + for (i = 0; i < ENIC_RSS_LEN; i++) { + kidx = i / ENIC_RSS_BYTES_PER_KEY; + bidx = i % ENIC_RSS_BYTES_PER_KEY; + rss_key_buf_va->key[kidx].b[bidx] = rss_key[i]; + } spin_lock_bh(&enic->devcmd_lock); err = enic_set_rss_key(enic, rss_key_buf_pa, diff --git a/drivers/net/ethernet/cisco/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h index fa421baf45b8..881fa18542b3 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rss.h +++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h @@ -20,11 +20,16 @@ #define _VNIC_RSS_H_ /* RSS key array */ + +#define ENIC_RSS_BYTES_PER_KEY 10 +#define ENIC_RSS_KEYS 4 +#define ENIC_RSS_LEN (ENIC_RSS_BYTES_PER_KEY * ENIC_RSS_KEYS) + union vnic_rss_key { struct { - u8 b[10]; + u8 b[ENIC_RSS_BYTES_PER_KEY]; u8 b_pad[6]; - } key[4]; + } key[ENIC_RSS_KEYS]; u64 raw[8]; }; diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h index 2c6c70804a39..816f1ad6072f 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h @@ -104,6 +104,17 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq) return wq->to_use->desc; } +static inline void vnic_wq_doorbell(struct vnic_wq *wq) +{ + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(wq->to_use->index, &wq->ctrl->posted_index); +} + static inline void vnic_wq_post(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, int sop, int eop, @@ -122,15 +133,6 @@ static inline void vnic_wq_post(struct vnic_wq *wq, buf->wr_id = wrid; buf = buf->next; - if (eop) { - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &wq->ctrl->posted_index); - } wq->to_use = buf; wq->ring.desc_avail -= desc_skip_cnt; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index cf8b6ff21613..badff181e719 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -995,7 +995,6 @@ static void de4x5_dbg_mii(struct net_device *dev, int k); static void de4x5_dbg_media(struct net_device *dev); static void de4x5_dbg_srom(struct de4x5_srom *p); static void de4x5_dbg_rx(struct sk_buff *skb, int len); -static int de4x5_strncmp(char *a, char *b, int n); static int dc21041_infoleaf(struct net_device *dev); static int dc21140_infoleaf(struct net_device *dev); static int dc21142_infoleaf(struct net_device *dev); @@ -4102,8 +4101,7 @@ get_hw_addr(struct net_device *dev) } /* -** Test for enet addresses in the first 32 bytes. The built-in strncmp -** didn't seem to work here...? +** Test for enet addresses in the first 32 bytes. */ static int de4x5_bad_srom(struct de4x5_private *lp) @@ -4111,8 +4109,8 @@ de4x5_bad_srom(struct de4x5_private *lp) int i, status = 0; for (i = 0; i < ARRAY_SIZE(enet_det); i++) { - if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) && - !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) { + if (!memcmp(&lp->srom, &enet_det[i], 3) && + !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) { if (i == 0) { status = SMC; } else if (i == 1) { @@ -4125,18 +4123,6 @@ de4x5_bad_srom(struct de4x5_private *lp) return status; } -static int -de4x5_strncmp(char *a, char *b, int n) -{ - int ret=0; - - for (;n && !ret; n--) { - ret = *a++ - *b++; - } - - return ret; -} - static void srom_repair(struct net_device *dev, int card) { diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index c8205606c775..50a00777228e 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -2265,7 +2265,7 @@ static int __init dmfe_init_module(void) static void __exit dmfe_cleanup_module(void) { - DMFE_DBUG(0, "dmfe_clean_module() ", debug); + DMFE_DBUG(0, "dmfe_cleanup_module() ", debug); pci_unregister_driver(&dmfe_driver); } diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 4061f9b22812..1c5916b13778 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -1837,7 +1837,7 @@ static int __init uli526x_init_module(void) static void __exit uli526x_cleanup_module(void) { - ULI526X_DBUG(0, "uli526x_clean_module() ", debug); + ULI526X_DBUG(0, "uli526x_cleanup_module() ", debug); pci_unregister_driver(&uli526x_driver); } diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index e42a791c1835..73a500ccbf69 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1171,7 +1171,8 @@ static u32 be_get_rxfh_key_size(struct net_device *netdev) return RSS_HASH_KEY_LEN; } -static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey) +static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey, + u8 *hfunc) { struct be_adapter *adapter = netdev_priv(netdev); int i; @@ -1185,16 +1186,23 @@ static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey) if (hkey) memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN); + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + return 0; } static int be_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *hkey) + const u8 *hkey, const u8 hfunc) { int rc = 0, i, j; struct be_adapter *adapter = netdev_priv(netdev); u8 rsstable[RSS_INDIR_TABLE_LEN]; + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + if (indir) { struct be_rx_obj *rxo; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 9a18e7930b31..9461ad8d837b 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -887,7 +887,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, } if (vlan_tag) { - skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), + vlan_tag); if (unlikely(!skb)) return skb; skb->vlan_tci = 0; @@ -896,7 +897,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, /* Insert the outer VLAN, if any */ if (adapter->qnq_vid) { vlan_tag = adapter->qnq_vid; - skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), + vlan_tag); if (unlikely(!skb)) return skb; if (skip_hw_vlan) @@ -1015,9 +1017,8 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, * to pad short packets (<= 32 bytes) to a 36-byte length. */ if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { - if (skb_padto(skb, 36)) + if (skb_put_padto(skb, 36)) return NULL; - skb->len = 36; } if (BEx_chip(adapter) || lancer_chip(adapter)) { @@ -2853,10 +2854,10 @@ static int be_close(struct net_device *netdev) static int be_rx_qs_create(struct be_adapter *adapter) { + struct rss_info *rss = &adapter->rss_info; + u8 rss_key[RSS_HASH_KEY_LEN]; struct be_rx_obj *rxo; int rc, i, j; - u8 rss_hkey[RSS_HASH_KEY_LEN]; - struct rss_info *rss = &adapter->rss_info; for_all_rx_queues(adapter, rxo, i) { rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, @@ -2901,15 +2902,15 @@ static int be_rx_qs_create(struct be_adapter *adapter) rss->rss_flags = RSS_ENABLE_NONE; } - get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN); + netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, - 128, rss_hkey); + 128, rss_key); if (rc) { rss->rss_flags = RSS_ENABLE_NONE; return rc; } - memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN); + memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN); /* First time posting */ for_all_rx_queues(adapter, rxo, i) @@ -4309,11 +4310,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) return -EOPNOTSUPP; br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + mode = nla_get_u16(attr); if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) return -EINVAL; @@ -4360,7 +4366,8 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return ndo_dflt_bridge_getlink(skb, pid, seq, dev, hsw_mode == PORT_FWD_TYPE_VEPA ? - BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB); + BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, + 0, 0); } #ifdef CONFIG_BE2NET_VXLAN @@ -4421,6 +4428,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, "Disabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); } + +static bool be_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops be_netdev_ops = { @@ -4450,6 +4462,7 @@ static const struct net_device_ops be_netdev_ops = { #ifdef CONFIG_BE2NET_VXLAN .ndo_add_vxlan_port = be_add_vxlan_port, .ndo_del_vxlan_port = be_del_vxlan_port, + .ndo_gso_check = be_gso_check, #endif }; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 9af296a1ca99..469691ad4a1e 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -38,9 +38,9 @@ #define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ #define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ #define FEC_OPD 0x0ec /* Opcode + Pause duration */ -#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */ -#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */ -#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */ +#define FEC_TXIC0 0x0f0 /* Tx Interrupt Coalescing for ring 0 */ +#define FEC_TXIC1 0x0f4 /* Tx Interrupt Coalescing for ring 1 */ +#define FEC_TXIC2 0x0f8 /* Tx Interrupt Coalescing for ring 2 */ #define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */ #define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */ #define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */ @@ -53,16 +53,18 @@ #define FEC_R_FSTART 0x150 /* FIFO receive start reg */ #define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */ #define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */ +#define FEC_R_BUFF_SIZE_1 0x168 /* Maximum receive buff ring1 size */ #define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */ #define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */ +#define FEC_R_BUFF_SIZE_2 0x174 /* Maximum receive buff ring2 size */ #define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */ #define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */ -#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ +#define FEC_R_BUFF_SIZE_0 0x188 /* Maximum receive buff size */ #define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ -#define FEC_RACC 0x1C4 /* Receive Accelerator function */ +#define FEC_RACC 0x1c4 /* Receive Accelerator function */ #define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ #define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ #define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */ @@ -82,57 +84,57 @@ #define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */ #define RMON_T_PACKETS 0x204 /* RMON TX packet count */ #define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */ -#define RMON_T_MC_PKT 0x20C /* RMON TX multicast pkts */ +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */ #define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */ #define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */ #define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */ -#define RMON_T_FRAG 0x21C /* RMON TX pkts < 64 bytes, bad CRC */ +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */ #define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */ #define RMON_T_COL 0x224 /* RMON TX collision count */ #define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */ -#define RMON_T_P65TO127 0x22C /* RMON TX 65 to 127 byte pkts */ +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */ #define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */ #define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */ #define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */ -#define RMON_T_P1024TO2047 0x23C /* RMON TX 1024 to 2047 byte pkts */ +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */ #define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */ #define RMON_T_OCTETS 0x244 /* RMON TX octets */ #define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */ -#define IEEE_T_FRAME_OK 0x24C /* Frames tx'd OK */ +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */ #define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */ #define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ #define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ -#define IEEE_T_LCOL 0x25C /* Frames tx'd with late collision */ +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */ #define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ #define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ #define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ -#define IEEE_T_SQE 0x26C /* Frames tx'd with SQE err */ +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */ #define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */ #define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */ #define RMON_R_PACKETS 0x284 /* RMON RX packet count */ #define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */ -#define RMON_R_MC_PKT 0x28C /* RMON RX multicast pkts */ +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */ #define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */ #define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */ #define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */ -#define RMON_R_FRAG 0x29C /* RMON RX pkts < 64 bytes, bad CRC */ -#define RMON_R_JAB 0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ -#define RMON_R_RESVD_O 0x2A4 /* Reserved */ -#define RMON_R_P64 0x2A8 /* RMON RX 64 byte pkts */ -#define RMON_R_P65TO127 0x2AC /* RMON RX 65 to 127 byte pkts */ -#define RMON_R_P128TO255 0x2B0 /* RMON RX 128 to 255 byte pkts */ -#define RMON_R_P256TO511 0x2B4 /* RMON RX 256 to 511 byte pkts */ -#define RMON_R_P512TO1023 0x2B8 /* RMON RX 512 to 1023 byte pkts */ -#define RMON_R_P1024TO2047 0x2BC /* RMON RX 1024 to 2047 byte pkts */ -#define RMON_R_P_GTE2048 0x2C0 /* RMON RX pkts > 2048 bytes */ -#define RMON_R_OCTETS 0x2C4 /* RMON RX octets */ -#define IEEE_R_DROP 0x2C8 /* Count frames not counted correctly */ -#define IEEE_R_FRAME_OK 0x2CC /* Frames rx'd OK */ -#define IEEE_R_CRC 0x2D0 /* Frames rx'd with CRC err */ -#define IEEE_R_ALIGN 0x2D4 /* Frames rx'd with alignment err */ -#define IEEE_R_MACERR 0x2D8 /* Receive FIFO overflow count */ -#define IEEE_R_FDXFC 0x2DC /* Flow control pause frames rx'd */ -#define IEEE_R_OCTETS_OK 0x2E0 /* Octet cnt for frames rx'd w/o err */ +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */ +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ +#define RMON_R_RESVD_O 0x2a4 /* Reserved */ +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */ +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */ +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */ +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */ +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */ +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */ +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */ +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */ +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */ +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */ +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */ +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */ +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */ +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */ +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */ #else @@ -165,21 +167,23 @@ #define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */ #define FEC_X_DES_START_1 FEC_X_DES_START_0 #define FEC_X_DES_START_2 FEC_X_DES_START_0 -#define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */ +#define FEC_R_BUFF_SIZE_0 0x3d8 /* Maximum receive buff size */ +#define FEC_R_BUFF_SIZE_1 FEC_R_BUFF_SIZE_0 +#define FEC_R_BUFF_SIZE_2 FEC_R_BUFF_SIZE_0 #define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */ /* Not existed in real chip * Just for pass build. */ -#define FEC_RCMR_1 0xFFF -#define FEC_RCMR_2 0xFFF -#define FEC_DMA_CFG_1 0xFFF -#define FEC_DMA_CFG_2 0xFFF -#define FEC_TXIC0 0xFFF -#define FEC_TXIC1 0xFFF -#define FEC_TXIC2 0xFFF -#define FEC_RXIC0 0xFFF -#define FEC_RXIC1 0xFFF -#define FEC_RXIC2 0xFFF +#define FEC_RCMR_1 0xfff +#define FEC_RCMR_2 0xfff +#define FEC_DMA_CFG_1 0xfff +#define FEC_DMA_CFG_2 0xfff +#define FEC_TXIC0 0xfff +#define FEC_TXIC1 0xfff +#define FEC_TXIC2 0xfff +#define FEC_RXIC0 0xfff +#define FEC_RXIC1 0xfff +#define FEC_RXIC2 0xfff #endif /* CONFIG_M5272 */ @@ -213,60 +217,60 @@ struct bufdesc_ex { * The following definitions courtesy of commproc.h, which where * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). */ -#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */ -#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */ -#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */ -#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */ -#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */ -#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */ -#define BD_SC_P ((ushort)0x0100) /* xmt preamble */ -#define BD_SC_BR ((ushort)0x0020) /* Break received */ -#define BD_SC_FR ((ushort)0x0010) /* Framing error */ -#define BD_SC_PR ((ushort)0x0008) /* Parity error */ -#define BD_SC_OV ((ushort)0x0002) /* Overrun */ -#define BD_SC_CD ((ushort)0x0001) /* ?? */ +#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */ +#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */ +#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */ +#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */ +#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */ +#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */ +#define BD_SC_P ((ushort)0x0100) /* xmt preamble */ +#define BD_SC_BR ((ushort)0x0020) /* Break received */ +#define BD_SC_FR ((ushort)0x0010) /* Framing error */ +#define BD_SC_PR ((ushort)0x0008) /* Parity error */ +#define BD_SC_OV ((ushort)0x0002) /* Overrun */ +#define BD_SC_CD ((ushort)0x0001) /* ?? */ /* Buffer descriptor control/status used by Ethernet receive. -*/ -#define BD_ENET_RX_EMPTY ((ushort)0x8000) -#define BD_ENET_RX_WRAP ((ushort)0x2000) -#define BD_ENET_RX_INTR ((ushort)0x1000) -#define BD_ENET_RX_LAST ((ushort)0x0800) -#define BD_ENET_RX_FIRST ((ushort)0x0400) -#define BD_ENET_RX_MISS ((ushort)0x0100) -#define BD_ENET_RX_LG ((ushort)0x0020) -#define BD_ENET_RX_NO ((ushort)0x0010) -#define BD_ENET_RX_SH ((ushort)0x0008) -#define BD_ENET_RX_CR ((ushort)0x0004) -#define BD_ENET_RX_OV ((ushort)0x0002) -#define BD_ENET_RX_CL ((ushort)0x0001) -#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ + */ +#define BD_ENET_RX_EMPTY ((ushort)0x8000) +#define BD_ENET_RX_WRAP ((ushort)0x2000) +#define BD_ENET_RX_INTR ((ushort)0x1000) +#define BD_ENET_RX_LAST ((ushort)0x0800) +#define BD_ENET_RX_FIRST ((ushort)0x0400) +#define BD_ENET_RX_MISS ((ushort)0x0100) +#define BD_ENET_RX_LG ((ushort)0x0020) +#define BD_ENET_RX_NO ((ushort)0x0010) +#define BD_ENET_RX_SH ((ushort)0x0008) +#define BD_ENET_RX_CR ((ushort)0x0004) +#define BD_ENET_RX_OV ((ushort)0x0002) +#define BD_ENET_RX_CL ((ushort)0x0001) +#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ /* Enhanced buffer descriptor control/status used by Ethernet receive */ -#define BD_ENET_RX_VLAN 0x00000004 +#define BD_ENET_RX_VLAN 0x00000004 /* Buffer descriptor control/status used by Ethernet transmit. -*/ -#define BD_ENET_TX_READY ((ushort)0x8000) -#define BD_ENET_TX_PAD ((ushort)0x4000) -#define BD_ENET_TX_WRAP ((ushort)0x2000) -#define BD_ENET_TX_INTR ((ushort)0x1000) -#define BD_ENET_TX_LAST ((ushort)0x0800) -#define BD_ENET_TX_TC ((ushort)0x0400) -#define BD_ENET_TX_DEF ((ushort)0x0200) -#define BD_ENET_TX_HB ((ushort)0x0100) -#define BD_ENET_TX_LC ((ushort)0x0080) -#define BD_ENET_TX_RL ((ushort)0x0040) -#define BD_ENET_TX_RCMASK ((ushort)0x003c) -#define BD_ENET_TX_UN ((ushort)0x0002) -#define BD_ENET_TX_CSL ((ushort)0x0001) -#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */ - -/*enhanced buffer descriptor control/status used by Ethernet transmit*/ -#define BD_ENET_TX_INT 0x40000000 -#define BD_ENET_TX_TS 0x20000000 -#define BD_ENET_TX_PINS 0x10000000 -#define BD_ENET_TX_IINS 0x08000000 + */ +#define BD_ENET_TX_READY ((ushort)0x8000) +#define BD_ENET_TX_PAD ((ushort)0x4000) +#define BD_ENET_TX_WRAP ((ushort)0x2000) +#define BD_ENET_TX_INTR ((ushort)0x1000) +#define BD_ENET_TX_LAST ((ushort)0x0800) +#define BD_ENET_TX_TC ((ushort)0x0400) +#define BD_ENET_TX_DEF ((ushort)0x0200) +#define BD_ENET_TX_HB ((ushort)0x0100) +#define BD_ENET_TX_LC ((ushort)0x0080) +#define BD_ENET_TX_RL ((ushort)0x0040) +#define BD_ENET_TX_RCMASK ((ushort)0x003c) +#define BD_ENET_TX_UN ((ushort)0x0002) +#define BD_ENET_TX_CSL ((ushort)0x0001) +#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */ + +/* enhanced buffer descriptor control/status used by Ethernet transmit */ +#define BD_ENET_TX_INT 0x40000000 +#define BD_ENET_TX_TS 0x20000000 +#define BD_ENET_TX_PINS 0x10000000 +#define BD_ENET_TX_IINS 0x08000000 /* This device has up to three irqs on some platforms */ @@ -279,36 +283,40 @@ struct bufdesc_ex { #define FEC_ENET_MAX_TX_QS 3 #define FEC_ENET_MAX_RX_QS 3 -#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \ - ((X == 2) ? \ +#define FEC_R_DES_START(X) (((X) == 1) ? FEC_R_DES_START_1 : \ + (((X) == 2) ? \ FEC_R_DES_START_2 : FEC_R_DES_START_0)) -#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \ - ((X == 2) ? \ +#define FEC_X_DES_START(X) (((X) == 1) ? FEC_X_DES_START_1 : \ + (((X) == 2) ? \ FEC_X_DES_START_2 : FEC_X_DES_START_0)) -#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \ - ((X == 2) ? \ +#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \ + (((X) == 2) ? \ + FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0)) +#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \ + (((X) == 2) ? \ FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) -#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \ - ((X == 2) ? \ +#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \ + (((X) == 2) ? \ FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) -#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) +#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) #define DMA_CLASS_EN (1 << 16) -#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1) -#define IDLE_SLOPE_MASK 0xFFFF +#define FEC_RCMR(X) (((X) == 2) ? FEC_RCMR_2 : FEC_RCMR_1) +#define IDLE_SLOPE_MASK 0xffff #define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */ #define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */ -#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ +#define IDLE_SLOPE(X) (((X) == 1) ? \ + (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ (IDLE_SLOPE_2 & IDLE_SLOPE_MASK)) -#define RCMR_MATCHEN (0x1 << 16) -#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2)) +#define RCMR_MATCHEN (0x1 << 16) +#define RCMR_CMP_CFG(v, n) (((v) & 0x7) << (n << 2)) #define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \ RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3)) #define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \ RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3)) -#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2) -#define FEC_TX_BD_FTYPE(X) ((X & 0xF) << 20) +#define RCMR_CMP(X) (((X) == 1) ? RCMR_CMP_1 : RCMR_CMP_2) +#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20) /* The number of Tx and Rx buffers. These are allocated from the page * pool. The code may assume these are power of two, so it it best @@ -326,8 +334,8 @@ struct bufdesc_ex { #define TX_RING_SIZE 512 /* Must be power of two */ #define TX_RING_MOD_MASK 511 /* for this to work */ -#define BD_ENET_RX_INT 0x00800000 -#define BD_ENET_RX_PTP ((ushort)0x0400) +#define BD_ENET_RX_INT 0x00800000 +#define BD_ENET_RX_PTP ((ushort)0x0400) #define BD_ENET_RX_ICE 0x00000020 #define BD_ENET_RX_PCR 0x00000010 #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) @@ -359,13 +367,13 @@ struct bufdesc_ex { /* ENET interrupt coalescing macro define */ #define FEC_ITR_CLK_SEL (0x1 << 30) #define FEC_ITR_EN (0x1 << 31) -#define FEC_ITR_ICFT(X) ((X & 0xFF) << 20) -#define FEC_ITR_ICTT(X) ((X) & 0xFFFF) +#define FEC_ITR_ICFT(X) (((X) & 0xff) << 20) +#define FEC_ITR_ICTT(X) ((X) & 0xffff) #define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */ #define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */ -#define FEC_VLAN_TAG_LEN 0x04 -#define FEC_ETHTYPE_LEN 0x02 +#define FEC_VLAN_TAG_LEN 0x04 +#define FEC_ETHTYPE_LEN 0x02 /* Controller is ENET-MAC */ #define FEC_QUIRK_ENET_MAC (1 << 0) @@ -390,7 +398,7 @@ struct bufdesc_ex { * frames not being transmitted until there is a 0-to-1 transition on * ENET_TDAR[TDAR]. */ -#define FEC_QUIRK_ERR006358 (1 << 7) +#define FEC_QUIRK_ERR006358 (1 << 7) /* ENET IP hw AVB * * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. @@ -501,8 +509,9 @@ struct fec_enet_private { int speed; struct completion mdio_done; int irq[FEC_IRQ_NUM]; - int bufdesc_ex; + bool bufdesc_ex; int pause_flag; + u32 quirks; struct napi_struct napi; int csum_flags; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 81b96cf87574..d2955ce24d0b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -287,15 +287,23 @@ static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, return entries > 0 ? entries : entries + txq->tx_ring_size; } -static void *swap_buffer(void *bufaddr, int len) +static void swap_buffer(void *bufaddr, int len) { int i; unsigned int *buf = bufaddr; - for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++) - *buf = cpu_to_be32(*buf); + for (i = 0; i < len; i += 4, buf++) + swab32s(buf); +} + +static void swap_buffer2(void *dst_buf, void *src_buf, int len) +{ + int i; + unsigned int *src = src_buf; + unsigned int *dst = dst_buf; - return bufaddr; + for (i = 0; i < len; i += 4, src++, dst++) + *dst = swab32p(src); } static void fec_dump(struct net_device *ndev) @@ -351,8 +359,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct bufdesc *bdp = txq->cur_tx; struct bufdesc_ex *ebdp; int nr_frags = skb_shinfo(skb)->nr_frags; @@ -388,7 +394,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, } if (fep->bufdesc_ex) { - if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + if (fep->quirks & FEC_QUIRK_HAS_AVB) estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; @@ -400,11 +406,11 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); if (((unsigned long) bufaddr) & fep->tx_align || - id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], bufaddr, frag_len); bufaddr = txq->tx_bounce[index]; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, frag_len); } @@ -440,8 +446,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); int nr_frags = skb_shinfo(skb)->nr_frags; struct bufdesc *bdp, *last_bdp; void *bufaddr; @@ -480,11 +484,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, queue = skb_get_queue_mapping(skb); index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); if (((unsigned long) bufaddr) & fep->tx_align || - id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], skb->data, buflen); bufaddr = txq->tx_bounce[index]; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, buflen); } @@ -519,7 +523,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, fep->hwts_tx_en)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + if (fep->quirks & FEC_QUIRK_HAS_AVB) estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -563,8 +567,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, int size, bool last_tcp, bool is_last) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); unsigned short queue = skb_get_queue_mapping(skb); unsigned short status; @@ -577,11 +579,11 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); if (((unsigned long) data) & fep->tx_align || - id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], data, size); data = txq->tx_bounce[index]; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, size); } @@ -597,7 +599,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, bdp->cbd_bufaddr = addr; if (fep->bufdesc_ex) { - if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + if (fep->quirks & FEC_QUIRK_HAS_AVB) estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; @@ -625,8 +627,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, struct bufdesc *bdp, int index) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); unsigned short queue = skb_get_queue_mapping(skb); @@ -642,11 +642,11 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; if (((unsigned long)bufaddr) & fep->tx_align || - id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], skb->data, hdr_len); bufaddr = txq->tx_bounce[index]; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, hdr_len); dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, @@ -663,7 +663,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, bdp->cbd_datlen = hdr_len; if (fep->bufdesc_ex) { - if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + if (fep->quirks & FEC_QUIRK_HAS_AVB) estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; @@ -688,8 +688,6 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct tso_t tso; unsigned int index = 0; int ret; - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { dev_kfree_skb_any(skb); @@ -751,7 +749,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, txq->cur_tx = bdp; /* Trigger transmission start */ - if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) || + if (!(fep->quirks & FEC_QUIRK_ERR007885) || !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || @@ -869,6 +867,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_rx_queues; i++) { rxq = fep->rx_queue[i]; writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); + writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); /* enable DMA1/2 */ if (i) @@ -914,8 +913,6 @@ static void fec_restart(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); u32 val; u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; @@ -925,7 +922,7 @@ fec_restart(struct net_device *ndev) * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. */ - if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) { writel(0, fep->hwp + FEC_ECNTRL); } else { writel(1, fep->hwp + FEC_ECNTRL); @@ -936,7 +933,7 @@ fec_restart(struct net_device *ndev) * enet-mac reset will reset mac address registers too, * so need to reconfigure it. */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); @@ -945,9 +942,6 @@ fec_restart(struct net_device *ndev) /* Clear any outstanding interrupt. */ writel(0xffc00000, fep->hwp + FEC_IEVENT); - /* Set maximum receive buffer size. */ - writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); - fec_enet_bd_init(ndev); fec_enet_enable_ring(ndev); @@ -982,7 +976,7 @@ fec_restart(struct net_device *ndev) * The phy interface and speed need to get configured * differently on enet-mac. */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { /* Enable flow control and length check */ rcntl |= 0x40000000 | 0x00000020; @@ -1005,7 +999,7 @@ fec_restart(struct net_device *ndev) } } else { #ifdef FEC_MIIGSK_ENR - if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { + if (fep->quirks & FEC_QUIRK_USE_GASKET) { u32 cfgr; /* disable the gasket and wait */ writel(0, fep->hwp + FEC_MIIGSK_ENR); @@ -1058,7 +1052,7 @@ fec_restart(struct net_device *ndev) writel(0, fep->hwp + FEC_HASH_TABLE_LOW); #endif - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { /* enable ENET endian swap */ ecntl |= (1 << 8); /* enable ENET store and forward mode */ @@ -1092,8 +1086,6 @@ static void fec_stop(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); /* We cannot expect a graceful transmit stop without link !!! */ @@ -1108,7 +1100,7 @@ fec_stop(struct net_device *ndev) * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. */ - if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) { writel(0, fep->hwp + FEC_ECNTRL); } else { writel(1, fep->hwp + FEC_ECNTRL); @@ -1118,7 +1110,7 @@ fec_stop(struct net_device *ndev) writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); /* We have to keep ENET enabled to have MII interrupt stay working */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { writel(2, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } @@ -1307,7 +1299,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff } static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, - struct bufdesc *bdp, u32 length) + struct bufdesc *bdp, u32 length, bool swap) { struct fec_enet_private *fep = netdev_priv(ndev); struct sk_buff *new_skb; @@ -1322,7 +1314,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); - memcpy(new_skb->data, (*skb)->data, length); + if (!swap) + memcpy(new_skb->data, (*skb)->data, length); + else + swap_buffer2(new_skb->data, (*skb)->data, length); *skb = new_skb; return true; @@ -1337,8 +1332,6 @@ static int fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned short status; @@ -1352,6 +1345,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) u16 vlan_tag; int index = 0; bool is_copybreak; + bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; #ifdef CONFIG_M532x flush_cache_all(); @@ -1415,7 +1409,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) * include that when passing upstream as it messes up * bridging applications. */ - is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4); + is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, + need_swap); if (!is_copybreak) { skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (unlikely(!skb_new)) { @@ -1430,7 +1425,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) prefetch(skb->data - NET_IP_ALIGN); skb_put(skb, pkt_len - 4); data = skb->data; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (!is_copybreak && need_swap) swap_buffer(data, pkt_len); /* Extract the enhanced buffer descriptor */ @@ -1581,7 +1576,8 @@ fec_enet_interrupt(int irq, void *dev_id) complete(&fep->mdio_done); } - fec_ptp_check_pps_event(fep); + if (fep->ptp_clock) + fec_ptp_check_pps_event(fep); return ret; } @@ -1864,8 +1860,6 @@ failed_clk_ipg: static int fec_enet_mii_probe(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct phy_device *phy_dev = NULL; char mdio_bus_id[MII_BUS_ID_SIZE]; char phy_name[MII_BUS_ID_SIZE + 3]; @@ -1911,7 +1905,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) } /* mask with MAC supported features */ - if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { + if (fep->quirks & FEC_QUIRK_HAS_GBIT) { phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->supported &= ~SUPPORTED_1000baseT_Half; #if !defined(CONFIG_M5272) @@ -1939,8 +1933,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) static struct mii_bus *fec0_mii_bus; struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct device_node *node; int err = -ENXIO, i; @@ -1960,7 +1952,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) * mdio interface in board design, and need to be configured by * fec0 mii_bus. */ - if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { + if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { /* fec1 uses fec0 mii_bus */ if (mii_cnt && fec0_mii_bus) { fep->mii_bus = fec0_mii_bus; @@ -1981,7 +1973,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) * document. */ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + if (fep->quirks & FEC_QUIRK_ENET_MAC) fep->phy_speed--; fep->phy_speed <<= 1; writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); @@ -2023,7 +2015,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) mii_cnt++; /* save fec0 mii_bus */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + if (fep->quirks & FEC_QUIRK_ENET_MAC) fec0_mii_bus = fep->mii_bus; return 0; @@ -2292,11 +2284,9 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) static void fec_enet_itr_coal_set(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); int rx_itr, tx_itr; - if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) return; /* Must be greater than zero to avoid unpredictable behavior */ @@ -2331,10 +2321,8 @@ static int fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) return -EOPNOTSUPP; ec->rx_coalesce_usecs = fep->rx_time_itr; @@ -2350,12 +2338,9 @@ static int fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - unsigned int cycle; - if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) return -EOPNOTSUPP; if (ec->rx_max_coalesced_frames > 255) { @@ -2935,8 +2920,6 @@ static const struct net_device_ops fec_netdev_ops = { static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_rx_q *rxq; struct bufdesc *cbd_base; @@ -3015,11 +2998,11 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); - if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) + if (fep->quirks & FEC_QUIRK_HAS_VLAN) /* enable hw VLAN support */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; - if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { + if (fep->quirks & FEC_QUIRK_HAS_CSUM) { ndev->gso_max_segs = FEC_MAX_TSO_SEGS; /* enable hw accelerator */ @@ -3028,7 +3011,7 @@ static int fec_enet_init(struct net_device *ndev) fep->csum_flags |= FLAG_RX_CSUM_ENABLED; } - if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) { fep->tx_align = 0; fep->rx_align = 0x3f; } @@ -3128,10 +3111,6 @@ fec_probe(struct platform_device *pdev) int num_tx_qs; int num_rx_qs; - of_id = of_match_device(fec_dt_ids, &pdev->dev); - if (of_id) - pdev->id_entry = of_id->data; - fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); /* Init network device */ @@ -3145,13 +3124,17 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); + of_id = of_match_device(fec_dt_ids, &pdev->dev); + if (of_id) + pdev->id_entry = of_id->data; + fep->quirks = pdev->id_entry->driver_data; + fep->num_rx_queues = num_rx_qs; fep->num_tx_queues = num_tx_qs; #if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ - if (pdev->id_entry && - (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) + if (fep->quirks & FEC_QUIRK_HAS_GBIT) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; #endif @@ -3168,8 +3151,6 @@ fec_probe(struct platform_device *pdev) fep->pdev = pdev; fep->dev_id = dev_id++; - fep->bufdesc_ex = 0; - platform_set_drvdata(pdev, ndev); phy_node = of_parse_phandle(np, "phy-handle", 0); @@ -3222,12 +3203,11 @@ fec_probe(struct platform_device *pdev) if (IS_ERR(fep->clk_ref)) fep->clk_ref = NULL; + fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); - fep->bufdesc_ex = - pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; if (IS_ERR(fep->clk_ptp)) { fep->clk_ptp = NULL; - fep->bufdesc_ex = 0; + fep->bufdesc_ex = false; } ret = fec_enet_clk_enable(ndev, true); @@ -3342,12 +3322,11 @@ static int __maybe_unused fec_suspend(struct device *dev) netif_device_detach(ndev); netif_tx_unlock_bh(ndev); fec_stop(ndev); + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); } rtnl_unlock(); - fec_enet_clk_enable(ndev, false); - pinctrl_pm_select_sleep_state(&fep->pdev->dev); - if (fep->reg_phy) regulator_disable(fep->reg_phy); @@ -3366,13 +3345,14 @@ static int __maybe_unused fec_resume(struct device *dev) return ret; } - pinctrl_pm_select_default_state(&fep->pdev->dev); - ret = fec_enet_clk_enable(ndev, true); - if (ret) - goto failed_clk; - rtnl_lock(); if (netif_running(ndev)) { + pinctrl_pm_select_default_state(&fep->pdev->dev); + ret = fec_enet_clk_enable(ndev, true); + if (ret) { + rtnl_unlock(); + goto failed_clk; + } fec_restart(ndev); netif_tx_lock_bh(ndev); netif_device_attach(ndev); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 3d4e08be1709..b34214e2df5f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -341,6 +341,9 @@ static void restart(struct net_device *dev) FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ } + /* Restore multicast and promiscuous settings */ + set_multicast_list(dev); + /* * Enable interrupts we wish to service. */ diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index f30411f0701f..7a184e8816a4 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -355,6 +355,9 @@ static void restart(struct net_device *dev) if (fep->phydev->duplex) S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); + /* Restore multicast and promiscuous settings */ + set_multicast_list(dev); + S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4fdf0aa16978..86dccb26fecc 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -173,10 +173,12 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, static int gfar_init_bds(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *txbdp; struct rxbd8 *rxbdp; + u32 *rfbptr; int i, j; for (i = 0; i < priv->num_tx_queues; i++) { @@ -201,6 +203,7 @@ static int gfar_init_bds(struct net_device *ndev) txbdp->status |= TXBD_WRAP; } + rfbptr = ®s->rfbptr0; for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; rx_queue->cur_rx = rx_queue->rx_bd_base; @@ -227,6 +230,8 @@ static int gfar_init_bds(struct net_device *ndev) rxbdp++; } + rx_queue->rfbptr = rfbptr; + rfbptr += 2; } return 0; @@ -336,6 +341,20 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv) } } +static void gfar_init_rqprm(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; + int i; + + baddr = ®s->rqprm0; + for (i = 0; i < priv->num_rx_queues; i++) { + gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | + (DEFAULT_RX_LFC_THR << FBTHR_SHIFT)); + baddr++; + } +} + static void gfar_rx_buff_size_config(struct gfar_private *priv) { int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; @@ -396,6 +415,13 @@ static void gfar_mac_rx_config(struct gfar_private *priv) if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; + /* Clear the LFC bit */ + gfar_write(®s->rctrl, rctrl); + /* Init flow control threshold values */ + gfar_init_rqprm(priv); + gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); + rctrl |= RCTRL_LFC; + /* Init rctrl based on our settings */ gfar_write(®s->rctrl, rctrl); } @@ -1687,6 +1713,9 @@ static int init_phy(struct net_device *dev) priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); priv->phydev->advertising = priv->phydev->supported; + /* Add support for flow control, but don't advertise it by default */ + priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + return 0; } @@ -2856,6 +2885,10 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) /* Setup the new bdp */ gfar_new_rxbdp(rx_queue, bdp, newskb); + /* Update Last Free RxBD pointer for LFC */ + if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) + gfar_write(rx_queue->rfbptr, (u32)bdp); + /* Update to the next pointer */ bdp = next_bd(bdp, base, rx_queue->rx_ring_size); @@ -3370,7 +3403,11 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = mii_advertise_flowctrl(phydev->advertising); + lcl_adv = 0; + if (phydev->advertising & ADVERTISED_Pause) + lcl_adv |= ADVERTISE_PAUSE_CAP; + if (phydev->advertising & ADVERTISED_Asym_Pause) + lcl_adv |= ADVERTISE_PAUSE_ASYM; flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) @@ -3386,6 +3423,9 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; struct phy_device *phydev = priv->phydev; + struct gfar_priv_rx_q *rx_queue = NULL; + int i; + struct rxbd8 *bdp; if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) return; @@ -3394,6 +3434,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) u32 tempval1 = gfar_read(®s->maccfg1); u32 tempval = gfar_read(®s->maccfg2); u32 ecntrl = gfar_read(®s->ecntrl); + u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW); if (phydev->duplex != priv->oldduplex) { if (!(phydev->duplex)) @@ -3438,6 +3479,26 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); tempval1 |= gfar_get_flowctrl_cfg(priv); + /* Turn last free buffer recording on */ + if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + bdp = rx_queue->cur_rx; + /* skip to previous bd */ + bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, + rx_queue->rx_bd_base, + rx_queue->rx_ring_size); + + if (rx_queue->rfbptr) + gfar_write(rx_queue->rfbptr, (u32)bdp); + } + + priv->tx_actual_en = 1; + } + + if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) + priv->tx_actual_en = 0; + gfar_write(®s->maccfg1, tempval1); gfar_write(®s->maccfg2, tempval); gfar_write(®s->ecntrl, ecntrl); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 2805cfbf1765..b581b8823a2a 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -99,6 +99,10 @@ extern const char gfar_driver_version[]; #define GFAR_MAX_FIFO_STARVE 511 #define GFAR_MAX_FIFO_STARVE_OFF 511 +#define FBTHR_SHIFT 24 +#define DEFAULT_RX_LFC_THR 16 +#define DEFAULT_LFC_PTVVAL 4 + #define DEFAULT_RX_BUFFER_SIZE 1536 #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) @@ -145,9 +149,7 @@ extern const char gfar_driver_version[]; | SUPPORTED_Autoneg \ | SUPPORTED_MII) -#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \ - | SUPPORTED_Pause \ - | SUPPORTED_Asym_Pause) +#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full /* TBI register addresses */ #define MII_TBICON 0x11 @@ -275,6 +277,7 @@ extern const char gfar_driver_version[]; #define RCTRL_TS_ENABLE 0x01000000 #define RCTRL_PAL_MASK 0x001f0000 +#define RCTRL_LFC 0x00004000 #define RCTRL_VLEX 0x00002000 #define RCTRL_FILREN 0x00001000 #define RCTRL_GHTX 0x00000400 @@ -851,7 +854,32 @@ struct gfar { u8 res23c[248]; u32 attr; /* 0x.bf8 - Attributes Register */ u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ - u8 res24[688]; + u32 rqprm0; /* 0x.c00 - Receive queue parameters register 0 */ + u32 rqprm1; /* 0x.c04 - Receive queue parameters register 1 */ + u32 rqprm2; /* 0x.c08 - Receive queue parameters register 2 */ + u32 rqprm3; /* 0x.c0c - Receive queue parameters register 3 */ + u32 rqprm4; /* 0x.c10 - Receive queue parameters register 4 */ + u32 rqprm5; /* 0x.c14 - Receive queue parameters register 5 */ + u32 rqprm6; /* 0x.c18 - Receive queue parameters register 6 */ + u32 rqprm7; /* 0x.c1c - Receive queue parameters register 7 */ + u8 res24[36]; + u32 rfbptr0; /* 0x.c44 - Last free RxBD pointer for ring 0 */ + u8 res24a[4]; + u32 rfbptr1; /* 0x.c4c - Last free RxBD pointer for ring 1 */ + u8 res24b[4]; + u32 rfbptr2; /* 0x.c54 - Last free RxBD pointer for ring 2 */ + u8 res24c[4]; + u32 rfbptr3; /* 0x.c5c - Last free RxBD pointer for ring 3 */ + u8 res24d[4]; + u32 rfbptr4; /* 0x.c64 - Last free RxBD pointer for ring 4 */ + u8 res24e[4]; + u32 rfbptr5; /* 0x.c6c - Last free RxBD pointer for ring 5 */ + u8 res24f[4]; + u32 rfbptr6; /* 0x.c74 - Last free RxBD pointer for ring 6 */ + u8 res24g[4]; + u32 rfbptr7; /* 0x.c7c - Last free RxBD pointer for ring 7 */ + u8 res24h[4]; + u8 res24x[556]; u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */ u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */ u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */ @@ -1011,6 +1039,7 @@ struct gfar_priv_rx_q { /* RX Coalescing values */ unsigned char rxcoalescing; unsigned long rxic; + u32 *rfbptr; }; enum gfar_irqinfo_id { @@ -1101,6 +1130,7 @@ struct gfar_private { unsigned int num_tx_queues; unsigned int num_rx_queues; unsigned int num_grps; + int tx_actual_en; /* Network Statistics */ struct gfar_extra_stats extra_stats; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 76d70708f864..3e1a9c1a67a9 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -579,8 +579,13 @@ static int gfar_spauseparam(struct net_device *dev, u32 tempval; tempval = gfar_read(®s->maccfg1); tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); - if (priv->tx_pause_en) + + priv->tx_actual_en = 0; + if (priv->tx_pause_en) { + priv->tx_actual_en = 1; tempval |= MACCFG1_TX_FLOW; + } + if (priv->rx_pause_en) tempval |= MACCFG1_RX_FLOW; gfar_write(®s->maccfg1, tempval); diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index 76a6e0c77d69..ae6e30d39f0f 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -490,7 +490,8 @@ static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus, eid = hp100_read_id(ioaddr); if (eid == NULL) { /* bad checksum? */ - printk(KERN_WARNING "hp100_probe: bad ID checksum at base port 0x%x\n", ioaddr); + printk(KERN_WARNING "%s: bad ID checksum at base port 0x%x\n", + __func__, ioaddr); goto out2; } @@ -498,7 +499,9 @@ static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus, for (i = uc = 0; i < 7; i++) uc += hp100_inb(LAN_ADDR + i); if (uc != 0xff) { - printk(KERN_WARNING "hp100_probe: bad lan address checksum at port 0x%x)\n", ioaddr); + printk(KERN_WARNING + "%s: bad lan address checksum at port 0x%x)\n", + __func__, ioaddr); err = -EIO; goto out2; } diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 87bd953cc2ee..3f3fba9e4650 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2323,16 +2323,11 @@ static int emac_check_deps(struct emac_instance *dev, static void emac_put_deps(struct emac_instance *dev) { - if (dev->mal_dev) - of_dev_put(dev->mal_dev); - if (dev->zmii_dev) - of_dev_put(dev->zmii_dev); - if (dev->rgmii_dev) - of_dev_put(dev->rgmii_dev); - if (dev->mdio_dev) - of_dev_put(dev->mdio_dev); - if (dev->tah_dev) - of_dev_put(dev->tah_dev); + of_dev_put(dev->mal_dev); + of_dev_put(dev->zmii_dev); + of_dev_put(dev->rgmii_dev); + of_dev_put(dev->mdio_dev); + of_dev_put(dev->tah_dev); } static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action, @@ -2371,8 +2366,7 @@ static int emac_wait_deps(struct emac_instance *dev) bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier); err = emac_check_deps(dev, deps) ? 0 : -ENODEV; for (i = 0; i < EMAC_DEP_COUNT; i++) { - if (deps[i].node) - of_node_put(deps[i].node); + of_node_put(deps[i].node); if (err && deps[i].ofdev) of_dev_put(deps[i].ofdev); } @@ -2383,8 +2377,7 @@ static int emac_wait_deps(struct emac_instance *dev) dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev; dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev; } - if (deps[EMAC_DEP_PREV_IDX].ofdev) - of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev); + of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev); return err; } @@ -3113,8 +3106,7 @@ static void __exit emac_exit(void) /* Destroy EMAC boot list */ for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++) - if (emac_boot_list[i]) - of_node_put(emac_boot_list[i]); + of_node_put(emac_boot_list[i]); } module_init(emac_init); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 5f6aded512f5..862d1989ae1c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1075,7 +1075,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_CSUM | NETIF_F_SG); - netdev->priv_flags |= IFF_UNICAST_FLT; + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); @@ -3133,12 +3136,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, * packets may get corrupted during padding by HW. * To WA this issue, pad all small packets manually. */ - if (skb->len < ETH_ZLEN) { - if (skb_pad(skb, ETH_ZLEN - skb->len)) - return NETDEV_TX_OK; - skb->len = ETH_ZLEN; - skb_set_tail_pointer(skb, ETH_ZLEN); - } + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; mss = skb_shinfo(skb)->gso_size; /* The controller does a simple calculation to diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 247335d2c7ec..88936aa0029d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3449,15 +3449,12 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; + u32 rss_key[10]; int i; - static const u32 rsskey[10] = { - 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0, - 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe - }; - /* Fill out hash function seed */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i < 10; i++) - ew32(RSSRK(i), rsskey[i]); + ew32(RSSRK(i), rss_key[i]); /* Direct all traffic to queue 0 */ for (i = 0; i < 32; i++) @@ -5557,12 +5554,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* The minimum packet size with TCTL.PSP set is 17 bytes so * pad skb in order to meet this minimum size requirement */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) - return NETDEV_TX_OK; - skb->len = 17; - skb_set_tail_pointer(skb, 17); - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; mss = skb_shinfo(skb)->gso_size; if (mss) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2d04464e6aa3..651f53bc7376 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -916,11 +916,15 @@ static u32 fm10k_get_rssrk_size(struct net_device *netdev) return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; } -static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key) +static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) { struct fm10k_intfc *interface = netdev_priv(netdev); int i, err; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + err = fm10k_get_reta(netdev, indir); if (err || !key) return err; @@ -932,12 +936,16 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key) } static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_hw *hw = &interface->hw; int i, err; + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + err = fm10k_set_reta(netdev, indir); if (err || !key) return err; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index e645af412e76..91516aed373e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -83,7 +83,7 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, return true; /* alloc new page for storage */ - page = alloc_page(GFP_ATOMIC | __GFP_COLD); + page = dev_alloc_page(); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; return false; @@ -578,14 +578,9 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, if (skb_is_nonlinear(skb)) fm10k_pull_tail(rx_ring, rx_desc, skb); - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; - - if (skb_pad(skb, pad_len)) - return true; - __skb_put(skb, pad_len); - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; return false; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index a0cb74ab3dc6..4f5892cc32d7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1551,15 +1551,11 @@ void fm10k_down(struct fm10k_intfc *interface) static int fm10k_sw_init(struct fm10k_intfc *interface, const struct pci_device_id *ent) { - static const u32 seed[FM10K_RSSRK_SIZE] = { 0xda565a6d, 0xc20e5b25, - 0x3d256741, 0xb08fa343, - 0xcb2bcad0, 0xb4307bae, - 0xa32dcb77, 0x0cf23080, - 0x3bb7426a, 0xfa01acbe }; const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data]; struct fm10k_hw *hw = &interface->hw; struct pci_dev *pdev = interface->pdev; struct net_device *netdev = interface->netdev; + u32 rss_key[FM10K_RSSRK_SIZE]; unsigned int rss; int err; @@ -1673,8 +1669,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, /* initialize vxlan_port list */ INIT_LIST_HEAD(&interface->vxlan_port); - /* initialize RSS key */ - memcpy(interface->rssrk, seed, sizeof(seed)); + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + memcpy(interface->rssrk, rss_key, sizeof(rss_key)); /* Start off interface as being down */ set_bit(__FM10K_DOWN, &interface->state); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index f1e33f896439..fc50f6461b13 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -87,7 +87,7 @@ #define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ #endif /* I40E_FCOE */ #define I40E_MAX_AQ_BUF_SIZE 4096 -#define I40E_AQ_LEN 32 +#define I40E_AQ_LEN 128 #define I40E_AQ_WORK_LIMIT 16 #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_MSG_ENABLE 4 @@ -146,6 +146,7 @@ enum i40e_state_t { __I40E_DOWN_REQUESTED, __I40E_FD_FLUSH_REQUESTED, __I40E_RESET_FAILED, + __I40E_PORT_TX_SUSPENDED, }; enum i40e_interrupt_policy { @@ -269,7 +270,8 @@ struct i40e_pf { u16 msg_enable; char misc_int_name[IFNAMSIZ + 9]; u16 adminq_work_limit; /* num of admin receive queue desc to process */ - int service_timer_period; + unsigned long service_timer_period; + unsigned long service_timer_previous; struct timer_list service_timer; struct work_struct service_task; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 72f5d25a222f..77f6254a89ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -51,7 +51,7 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) static void i40e_adminq_init_regs(struct i40e_hw *hw) { /* set head and tail registers in our local struct */ - if (hw->mac.type == I40E_MAC_VF) { + if (i40e_is_vf(hw)) { hw->aq.asq.tail = I40E_VF_ATQT1; hw->aq.asq.head = I40E_VF_ATQH1; hw->aq.asq.len = I40E_VF_ATQLEN1; @@ -617,7 +617,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) /* pre-emptive resource lock release */ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); - hw->aq.nvm_busy = false; + hw->aq.nvm_release_on_done = false; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; ret_code = i40e_aq_set_hmc_resource_profile(hw, I40E_HMC_PROFILE_DEFAULT, @@ -754,12 +755,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, goto asq_send_command_exit; } - if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n"); - status = I40E_ERR_NVM; - goto asq_send_command_exit; - } - details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { *details = *cmd_details; @@ -853,7 +848,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, */ if (!details->async && !details->postpone) { u32 total_delay = 0; - u32 delay_len = 10; do { /* AQ designers suggest use of head for better @@ -861,9 +855,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, */ if (i40e_asq_done(hw)) break; - /* ugh! delay while spin_lock */ - udelay(delay_len); - total_delay += delay_len; + usleep_range(1000, 2000); + total_delay++; } while (total_delay < hw->aq.asq_cmd_timeout); } @@ -903,9 +896,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; } - if (!status && i40e_is_nvm_update_op(desc)) - hw->aq.nvm_busy = true; - asq_send_command_error: mutex_unlock(&hw->aq.asq_mutex); asq_send_command_exit: @@ -958,9 +948,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQRX: Queue is empty.\n"); ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } @@ -982,10 +969,10 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, e->desc = *desc; datalen = le16_to_cpu(desc->datalen); - e->msg_size = min(datalen, e->msg_size); - if (e->msg_buf != NULL && (e->msg_size != 0)) + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, - e->msg_size); + e->msg_len); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, @@ -1021,7 +1008,6 @@ clean_arq_element_out: mutex_unlock(&hw->aq.arq_mutex); if (i40e_is_nvm_update_op(&e->desc)) { - hw->aq.nvm_busy = false; if (hw->aq.nvm_release_on_done) { i40e_release_nvm(hw); hw->aq.nvm_release_on_done = false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index ba38a89c79d6..564d0b0192f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -28,6 +28,7 @@ #define _I40E_ADMINQ_H_ #include "i40e_osdep.h" +#include "i40e_status.h" #include "i40e_adminq_cmd.h" #define I40E_ADMINQ_DESC(R, i) \ @@ -76,7 +77,8 @@ struct i40e_asq_cmd_details { /* ARQ event information */ struct i40e_arq_event_info { struct i40e_aq_desc desc; - u16 msg_size; + u16 msg_len; + u16 buf_len; u8 *msg_buf; }; @@ -93,7 +95,6 @@ struct i40e_adminq_info { u16 fw_min_ver; /* firmware minor version */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_busy; bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ @@ -108,7 +109,7 @@ struct i40e_adminq_info { * i40e_aq_rc_to_posix - convert errors to user-land codes * aq_rc: AdminQ error code to convert **/ -static inline int i40e_aq_rc_to_posix(u16 aq_rc) +static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ @@ -136,12 +137,18 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc) -EFBIG, /* I40E_AQ_RC_EFBIG */ }; + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + return -ERANGE; return aq_to_posix[aq_rc]; } /* general information */ #define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */ +#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 15f289f2917f..8835aeeff23e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -33,8 +33,8 @@ * This file needs to comply with the Linux Kernel coding style. */ -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0002 struct i40e_aq_desc { __le16 flags; @@ -66,216 +66,217 @@ struct i40e_aq_desc { */ /* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ }; /* Admin Queue command opcodes */ enum i40e_admin_queue_opc { /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, + i40e_aqc_opc_clear_pxe_mode = 0x0110, /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, + i40e_aqc_opc_event_lan_overflow = 0x1001, /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, - i40e_aqc_opc_debug_modify_internals = 0xFF09, + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, + i40e_aqc_opc_debug_modify_internals = 0xFF09, }; /* command structures and indirect data structures */ @@ -302,7 +303,7 @@ enum i40e_admin_queue_opc { /* This macro is used extensively to ensure that command structures are 16 * bytes in length as they have to map to the raw array of that size. */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) /* internal (0x00XX) commands */ @@ -320,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); /* Send driver version (indirect 0x0002) */ struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); /* Queue Shutdown (direct 0x0003) */ struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); @@ -351,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); @@ -373,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); */ struct i40e_aqc_list_capabilites { u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; @@ -384,123 +385,123 @@ struct i40e_aqc_list_capabilites { I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; }; /* list of caps */ -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 - __le16 table_id; - __le32 pfpm_proxyfc; - __le32 ip_addr; - u8 mac_addr[6]; + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; }; /* Set NS Proxy Table Entry Command (indirect 0x0105) */ struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; }; /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; }; /* Manage MAC Address Read Command (indirect 0x0107) */ struct i40e_aqc_mac_address_read { __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); @@ -516,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); @@ -544,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); * command */ struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); @@ -556,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; }; struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; }; /* Get Switch Configuration (indirect 0x0200) @@ -591,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp { * the first in the array is the header, remainder are elements */ struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; }; /* Add Statistics (direct 0x0201) * Remove Statistics (direct 0x0202) */ struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); /* Get Switch Resource Allocation (indirect 0x0204) */ struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; }; /* Add VSI (indirect 0x0210) @@ -671,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp { * uses the same completion and data structure as Add VSI */ struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); @@ -706,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); struct i40e_aqc_vsi_properties_data { /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 - u8 queueing_opt_reserved[3]; + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; + u8 up_enable_bits; + u8 sched_reserved; /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress table */ - u8 cmd_reserved[8]; + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; + __le16 qs_handle[8]; #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; }; I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); @@ -830,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); * (IS_CTRL_PORT only works on add PV) */ struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); @@ -859,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); */ struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); /* Add VEB (direct 0x0230) */ struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 - u8 enable_tcs; - u8 reserved[9]; +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; + u8 reserved[6]; + __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); @@ -909,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); @@ -928,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); /* used for the command for most vlan commands */ struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); /* indirect data for command and response */ struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_macvlan_completion { @@ -978,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); */ struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; }; /* Add VLAN (indirect 0x0252) @@ -998,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data { * use the generic i40e_aqc_macvlan for the command */ struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; + __le16 vlan_tag; + u8 vlan_flags; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; }; /* Set VSI Promiscuous Modes (direct 0x0254) */ struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; + __le16 promiscuous_flags; + __le16 valid_flags; /* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); @@ -1059,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; + __le16 tag; + __le16 queue_number; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); @@ -1084,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; + __le16 tag; + u8 reserved[12]; }; /* Add multicast E-Tag (direct 0x0257) @@ -1097,22 +1097,22 @@ struct i40e_aqc_remove_tag { * and no external data */ struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; }; @@ -1120,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); @@ -1145,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); * and the generic direct completion structure */ struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; + __le16 queue; + u8 reserved[2]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); @@ -1179,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); * and the generic indirect completion structure */ struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); struct i40e_aqc_add_remove_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; union { struct { u8 reserved[12]; @@ -1205,49 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { u8 data[16]; } v6; } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 /* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 /* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 /* 0x0007 reserved */ /* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; }; struct i40e_aqc_remove_cloud_filters_completion { @@ -1269,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); struct i40e_aqc_add_delete_mirror_rule { __le16 seid; __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 __le16 num_entries; __le16 destination; /* VSI for add, rule id for delete */ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ @@ -1286,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); @@ -1302,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); * the command and response use the same descriptor structure */ struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); @@ -1321,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); * this generic struct to pass the SEID in param0 */ struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); @@ -1336,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp { /* Configure VSI BW limits (direct 0x0400) */ struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); @@ -1350,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; }; /* Query vsi bw configuration (indirect 0x0408) */ struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; }; /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); @@ -1411,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); * Disable Physical Port ETS (indirect 0x0415) */ struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; }; /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure Switching Component Bandwidth Allocation per Tc * (indirect 0x0417) */ struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; }; /* Query Switching Component Configuration (indirect 0x0418) */ struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; }; /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; + __le16 tc_bw_max[2]; + u8 reserved3[32]; }; /* Query Switching Component Bandwidth Allocation per Traffic Type * (indirect 0x041A) */ struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Suspend/resume port TX traffic @@ -1490,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp { * (indirect 0x041D) */ struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ }; /* Get and set the active HMC resource profile and status. * (direct 0x0500) and (direct 0x0501) */ struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); enum i40e_aq_hmc_profile { /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, }; -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 enum i40e_aq_phy_type { I40E_PHY_TYPE_SGMII = 0x0, @@ -1578,147 +1578,147 @@ struct i40e_aqc_module_desc { }; struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 reserved[3]; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; }; /* Set PHY Config (direct 0x0601) */ struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; + __le32 phy_type; + u8 link_speed; + u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ #define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 reserved[3]; + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); /* Set MAC Config command data structure (direct 0x0603) */ struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); /* Restart Auto-Negotiation (direct 0x605) */ struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); /* Set event mask command (direct 0x613) */ struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); @@ -1728,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); * Get Link Partner AN advt register (direct 0x0616) */ struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); /* Set Loopback mode (0x0618) */ struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { - u8 command_flags; + u8 command_flags; #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ @@ -1757,15 +1757,15 @@ struct i40e_aqc_set_phy_debug { #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; /* NVM Read command (indirect 0x0701) @@ -1773,40 +1773,40 @@ enum i40e_aq_phy_reg_type { * NVM Update commands (indirect 0x0703) */ struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; + __le16 cmd_flags; #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 #define ANVM_READ_SINGLE_FEATURE 0 #define ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - u8 reserved[2]; - __le32 address_high; - __le32 address_low; + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + u8 reserved[2]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); /* NVM Config Write (indirect 0x0705) */ struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); @@ -1831,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field { * Send to Peer PF command (indirect 0x0803) */ struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); @@ -1870,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); * uses i40e_aq_desc */ struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; + __le16 cmd_flags; #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { - __le32 mode; + __le32 mode; #define I40E_AQ_ALTERNATE_MODE_NONE 0 #define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); @@ -1896,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); /* Lan Queue Overflow Event (direct, 0x1001) */ struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); /* Get LLDP MIB (indirect 0x0A00) */ struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); @@ -1931,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); * also used for the event (with type in the command field) */ struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); @@ -1945,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); * Delete LLDP TLV (indirect 0x0A04) */ struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); /* Update LLDP TLV (indirect 0x0A03) */ struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); @@ -1981,57 +1981,97 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); /* Start LLDP (direct 0x0A06) */ struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); -/* Apply MIB changes (0x0A07) - * uses the generic struc as it contains no data +/* Get CEE DCBX Oper Config (0x0A07) + * uses the generic descriptor struct + * returns below as indirect response */ +#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 +#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) +#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 +#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) +#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) +#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 +#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) +#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 +#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) +#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +struct i40e_aqc_get_cee_dcb_cfg_v1_resp { + u8 reserved1; + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 reserved2; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + u8 reserved3; + __le16 oper_app_prio; + u8 reserved4; + __le16 tlv_status; +}; + +I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp); + +struct i40e_aqc_get_cee_dcb_cfg_resp { + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + __le16 oper_app_prio; + __le32 tlv_status; + u8 reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); + /* Add Udp Tunnel command and completion (direct 0x0B00) */ struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 - u8 reserved1[10]; + u8 reserved1[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); struct i40e_aqc_add_udp_tunnel_completion { - __le16 udp_port; - u8 filter_entry_index; - u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 - u8 total_filters; - u8 reserved[11]; + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); /* remove UDP Tunnel command (0x0B01) */ struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 reserved2[13]; + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved1[11]; + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); @@ -2044,11 +2084,11 @@ struct i40e_aqc_tunnel_key_structure { u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 /* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 @@ -2061,21 +2101,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - u8 param_value2[8]; + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + u8 param_value2[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); @@ -2087,18 +2127,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); /* set test more (0xFF01, internal) */ struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); @@ -2151,21 +2191,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); #define I40E_AQ_CLUSTER_ID_ALTRAM 11 struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 30056b25d94e..3d741ee99a2c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -50,6 +50,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: @@ -549,7 +550,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { i40e_status i40e_init_shared_code(struct i40e_hw *hw) { i40e_status status = 0; - u32 reg; + u32 port, ari, func_rid; i40e_set_mac_type(hw); @@ -562,18 +563,17 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) hw->phy.get_link_info = true; - /* Determine port number */ - reg = rd32(hw, I40E_PFGEN_PORTNUM); - reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> - I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT); - hw->port = (u8)reg; - - /* Determine the PF number based on the PCI fn */ - reg = rd32(hw, I40E_GLPCI_CAPSUP); - if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK) - hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func); + /* Determine port number and PF number*/ + port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) + >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; + hw->port = (u8)port; + ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> + I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; + func_rid = rd32(hw, I40E_PF_FUNC_RID); + if (ari) + hw->pf_id = (u8)(func_rid & 0xff); else - hw->pf_id = (u8)hw->bus.func; + hw->pf_id = (u8)(func_rid & 0x7); status = i40e_init_nvm(hw); return status; @@ -790,7 +790,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) } #define I40E_PF_RESET_WAIT_COUNT_A0 200 -#define I40E_PF_RESET_WAIT_COUNT 100 +#define I40E_PF_RESET_WAIT_COUNT 110 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure @@ -1420,6 +1420,33 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse) } /** + * i40e_aq_set_phy_int_mask + * @hw: pointer to the hw struct + * @mask: interrupt mask to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set link interrupt mask. + **/ +i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, + u16 mask, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_int_mask *cmd = + (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_int_mask); + + cmd->event_mask = cpu_to_le16(mask); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct @@ -2632,6 +2659,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, } /** + * i40e_aq_get_cee_dcb_config + * @hw: pointer to the hw struct + * @buff: response buffer that stores CEE operational configuration + * @buff_size: size of the buffer passed + * @cmd_details: pointer to command details structure or NULL + * + * Get CEE DCBX mode operational configuration from firmware + **/ +i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, + cmd_details); + + return status; +} + +/** * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct * @udp_port: the UDP port to add @@ -3189,6 +3244,26 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } /** + * i40e_aq_resume_port_tx + * @hw: pointer to the hardware structure + * @cmd_details: pointer to command details structure or NULL + * + * Resume port's Tx traffic + **/ +i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_set_pci_config_data - store PCI bus info * @hw: pointer to hardware structure * @link_status: the link status word from PCI config space diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 036570d76176..3ce43588592d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -59,7 +59,7 @@ i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { - struct i40e_ieee_ets_config *etscfg; + struct i40e_dcb_ets_config *etscfg; u8 *buf = tlv->tlvinfo; u16 offset = 0; u8 priority; @@ -407,6 +407,166 @@ free_mem: } /** + * i40e_cee_to_dcb_v1_config + * @cee_cfg: pointer to CEE v1 response configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE v1 configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_v1_config( + struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, tc, err, sync, oper; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add APPs if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** + * i40e_cee_to_dcb_config + * @cee_cfg: pointer to CEE configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_config( + struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, tc, err, sync, oper; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add APPs if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** * i40e_get_dcb_config * @hw: pointer to the hw struct * @@ -415,7 +575,44 @@ free_mem: i40e_status i40e_get_dcb_config(struct i40e_hw *hw) { i40e_status ret = 0; + struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; + struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; + + /* If Firmware version < v4.33 IEEE only */ + if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || + (hw->aq.fw_maj_ver < 4)) + goto ieee; + + /* If Firmware version == v4.33 use old CEE struct */ + if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, + sizeof(cee_v1_cfg), NULL); + if (!ret) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + i40e_cee_to_dcb_v1_config(&cee_v1_cfg, + &hw->local_dcbx_config); + } + } else { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg, + sizeof(cee_cfg), NULL); + if (!ret) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + i40e_cee_to_dcb_config(&cee_cfg, + &hw->local_dcbx_config); + } + } + + /* CEE mode not enabled try querying IEEE data */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + goto ieee; + else + goto out; +ieee: + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; /* Get Local DCB Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, &hw->local_dcbx_config); @@ -426,6 +623,10 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = 0; + out: return ret; } @@ -439,10 +640,27 @@ out: i40e_status i40e_init_dcb(struct i40e_hw *hw) { i40e_status ret = 0; + struct i40e_lldp_variables lldp_cfg; + u8 adminstatus = 0; if (!hw->func_caps.dcb) return ret; + /* Read LLDP NVM area */ + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + if (ret) + return ret; + + /* Get the LLDP AdminStatus for the current port */ + adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); + adminstatus &= 0xF; + + /* LLDP agent disabled */ + if (!adminstatus) { + hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; + return ret; + } + /* Get DCBX status */ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); if (ret) @@ -454,6 +672,8 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw) case I40E_DCBX_STATUS_IN_PROGRESS: /* Get current DCBX configuration */ ret = i40e_get_dcb_config(hw); + if (ret) + return ret; break; case I40E_DCBX_STATUS_DISABLED: return ret; @@ -470,3 +690,33 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw) return ret; } + +/** + * i40e_read_lldp_cfg - read LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * + * Reads the LLDP configuration data from NVM + **/ +i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg) +{ + i40e_status ret = 0; + u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR); + + if (!lldp_cfg) + return I40E_ERR_PARAM; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset, + sizeof(struct i40e_lldp_variables), + (u8 *)lldp_cfg, + true, NULL); + i40e_release_nvm(hw); + +err_lldp_cfg: + return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 34cf1c30c7ff..e137e3fac8ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -65,6 +65,11 @@ #define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) #define I40E_IEEE_ETS_PRIO_1_SHIFT 4 #define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT) +#define I40E_CEE_PGID_PRIO_0_SHIFT 0 +#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT) +#define I40E_CEE_PGID_PRIO_1_SHIFT 4 +#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT) +#define I40E_CEE_PGID_STRICT 15 /* Defines for IEEE TSA types */ #define I40E_IEEE_TSA_STRICT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 00bc0cdb3a03..183dcb63ce98 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -207,7 +207,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) * VSI **/ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, - struct i40e_ieee_app_priority_table *app) + struct i40e_dcb_app_priority_table *app) { struct net_device *dev = vsi->netdev; struct dcb_app sapp; @@ -229,7 +229,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, * Delete given APP from all the VSIs for given PF **/ static void i40e_dcbnl_del_app(struct i40e_pf *pf, - struct i40e_ieee_app_priority_table *app) + struct i40e_dcb_app_priority_table *app) { int v, err; for (v = 0; v < pf->num_alloc_vsi; v++) { @@ -252,7 +252,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf, * Find given APP in the DCB configuration **/ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, - struct i40e_ieee_app_priority_table *app) + struct i40e_dcb_app_priority_table *app) { int i; @@ -277,7 +277,7 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, void i40e_dcbnl_flush_apps(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) { - struct i40e_ieee_app_priority_table app; + struct i40e_dcb_app_priority_table app; struct i40e_dcbx_config *dcbxcfg; struct i40e_hw *hw = &pf->hw; int i; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 7067f4b9159c..433a55886ad2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -773,7 +773,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, { struct i40e_tx_desc *txd; union i40e_rx_desc *rxd; - struct i40e_ring ring; + struct i40e_ring *ring; struct i40e_vsi *vsi; int i; @@ -792,29 +792,32 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, vsi_seid); return; } - if (is_rx_ring) - ring = *vsi->rx_rings[ring_id]; - else - ring = *vsi->tx_rings[ring_id]; + + ring = kmemdup(is_rx_ring + ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id], + sizeof(*ring), GFP_KERNEL); + if (!ring) + return; + if (cnt == 2) { dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); - for (i = 0; i < ring.count; i++) { + for (i = 0; i < ring->count; i++) { if (!is_rx_ring) { - txd = I40E_TX_DESC(&ring, i); + txd = I40E_TX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { - rxd = I40E_RX_DESC(&ring, i); + rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { - rxd = I40E_RX_DESC(&ring, i); + rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, @@ -823,26 +826,26 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, } } } else if (cnt == 3) { - if (desc_n >= ring.count || desc_n < 0) { + if (desc_n >= ring->count || desc_n < 0) { dev_info(&pf->pdev->dev, "descriptor %d not found\n", desc_n); return; } if (!is_rx_ring) { - txd = I40E_TX_DESC(&ring, desc_n); + txd = I40E_TX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { - rxd = I40E_RX_DESC(&ring, desc_n); + rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { - rxd = I40E_RX_DESC(&ring, desc_n); + rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, @@ -852,6 +855,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, } else { dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); } + kfree(ring); } /** @@ -895,90 +899,6 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, } /** - * i40e_dbg_dump_stats - handles dump stats write into command datum - * @pf: the i40e_pf created in command write - * @stats: the stats structure to be dumped - **/ -static void i40e_dbg_dump_stats(struct i40e_pf *pf, - struct i40e_hw_port_stats *stats) -{ - int i; - - dev_info(&pf->pdev->dev, " stats:\n"); - dev_info(&pf->pdev->dev, - " crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n", - stats->crc_errors, stats->illegal_bytes, stats->error_bytes); - dev_info(&pf->pdev->dev, - " mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n", - stats->mac_local_faults, stats->mac_remote_faults, - stats->rx_length_errors); - dev_info(&pf->pdev->dev, - " link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n", - stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx); - dev_info(&pf->pdev->dev, - " link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n", - stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127); - dev_info(&pf->pdev->dev, - " rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n", - stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023); - dev_info(&pf->pdev->dev, - " rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n", - stats->rx_size_big, stats->rx_undersize, stats->rx_jabber); - dev_info(&pf->pdev->dev, - " rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n", - stats->rx_fragments, stats->rx_oversize, stats->tx_size_64); - dev_info(&pf->pdev->dev, - " tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n", - stats->tx_size_127, stats->tx_size_255, stats->tx_size_511); - dev_info(&pf->pdev->dev, - " tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n", - stats->tx_size_1023, stats->tx_size_big, - stats->mac_short_packet_dropped); - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xon_rx[i], - i+1, stats->priority_xon_rx[i+1], - i+2, stats->priority_xon_rx[i+2], - i+3, stats->priority_xon_rx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xoff_rx[i], - i+1, stats->priority_xoff_rx[i+1], - i+2, stats->priority_xoff_rx[i+2], - i+3, stats->priority_xoff_rx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xon_tx[i], - i+1, stats->priority_xon_tx[i+1], - i+2, stats->priority_xon_tx[i+2], - i+3, stats->priority_xon_rx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xoff_tx[i], - i+1, stats->priority_xoff_tx[i+1], - i+2, stats->priority_xoff_tx[i+2], - i+3, stats->priority_xoff_tx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xon_2_xoff[i], - i+1, stats->priority_xon_2_xoff[i+1], - i+2, stats->priority_xon_2_xoff[i+2], - i+3, stats->priority_xon_2_xoff[i+3]); - } - - i40e_dbg_dump_eth_stats(pf, &stats->eth); -} - -/** * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb * @pf: the i40e_pf created in command write * @seid: the seid the user put in @@ -1342,11 +1262,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, "dump desc aq\n"); } - } else if (strncmp(&cmd_buf[5], "stats", 5) == 0) { - dev_info(&pf->pdev->dev, "pf stats:\n"); - i40e_dbg_dump_stats(pf, &pf->stats); - dev_info(&pf->pdev->dev, "pf stats_offsets:\n"); - i40e_dbg_dump_stats(pf, &pf->stats_offsets); } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { dev_info(&pf->pdev->dev, "core reset count: %d\n", pf->corer_count); @@ -1402,6 +1317,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, bw_data = NULL; dev_info(&pf->pdev->dev, + "port dcbx_mode=%d\n", cfg->dcbx_mode); + dev_info(&pf->pdev->dev, "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", cfg->etscfg.willing, cfg->etscfg.cbs, cfg->etscfg.maxtcs); @@ -1464,8 +1381,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else { dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n"); - dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n"); - dev_info(&pf->pdev->dev, "dump stats\n"); + dev_info(&pf->pdev->dev, "dump switch\n"); + dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); dev_info(&pf->pdev->dev, "dump reset stats\n"); dev_info(&pf->pdev->dev, "dump port\n"); dev_info(&pf->pdev->dev, @@ -1580,7 +1497,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[11], - "%hx %hx %hx %hx %x %x %x %x %x %x", + "%hi %hi %hi %hi %i %i %i %i %i %i", &desc->flags, &desc->opcode, &desc->datalen, &desc->retval, &desc->cookie_high, &desc->cookie_low, @@ -1628,7 +1545,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[20], - "%hx %hx %hx %hx %x %x %x %x %x %x %hd", + "%hi %hi %hi %hi %i %i %i %i %i %i %hi", &desc->flags, &desc->opcode, &desc->datalen, &desc->retval, &desc->cookie_high, &desc->cookie_low, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 1dda467ae1ac..951e8767fc50 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -40,8 +40,9 @@ struct i40e_stats { .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } + #define I40E_NETDEV_STAT(_net_stat) \ - I40E_STAT(struct net_device_stats, #_net_stat, _net_stat) + I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) #define I40E_PF_STAT(_name, _stat) \ I40E_STAT(struct i40e_pf, _name, _stat) #define I40E_VSI_STAT(_name, _stat) \ @@ -264,6 +265,14 @@ static int i40e_get_settings(struct net_device *netdev, ecmd->supported = SUPPORTED_10000baseKR_Full; ecmd->advertising = ADVERTISED_10000baseKR_Full; break; + case I40E_DEV_ID_10G_BASE_T: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full; + break; default: /* all the rest are 10G/1G */ ecmd->supported = SUPPORTED_10000baseT_Full | @@ -322,9 +331,13 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_10GBASE_T: ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full; + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full; + ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: @@ -335,14 +348,22 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_1000BASE_KX: case I40E_PHY_TYPE_1000BASE_T: ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; + ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_SGMII: @@ -426,6 +447,9 @@ no_valid_phy_type: case I40E_LINK_SPEED_1GB: ethtool_cmd_speed_set(ecmd, SPEED_1000); break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; default: break; } @@ -528,7 +552,7 @@ static int i40e_set_settings(struct net_device *netdev, } /* If autoneg is currently enabled */ if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { - config.abilities = abilities.abilities | + config.abilities = abilities.abilities & ~I40E_AQ_PHY_ENABLE_AN; change = true; } @@ -621,11 +645,19 @@ static void i40e_get_pauseparam(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; pause->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); + /* PFC enabled so report LFC as off */ + if (dcbx_cfg->pfc.pfcenable) { + pause->rx_pause = 0; + pause->tx_pause = 0; + return; + } + if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { pause->rx_pause = 1; } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { @@ -649,6 +681,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; i40e_status status; u8 aq_failures; @@ -670,8 +703,9 @@ static int i40e_set_pauseparam(struct net_device *netdev, netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); } - if (hw->fc.current_mode == I40E_FC_PFC) { - netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n"); + if (dcbx_cfg->pfc.pfcenable) { + netdev_info(netdev, + "Priority flow control enabled. Cannot set link flow control.\n"); return -EOPNOTSUPP; } @@ -788,7 +822,7 @@ static int i40e_get_eeprom(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; - int ret_val = 0, len; + int ret_val = 0, len, offset; u8 *eeprom_buff; u16 i, sectors; bool last; @@ -801,19 +835,21 @@ static int i40e_get_eeprom(struct net_device *netdev, /* check for NVMUpdate access method */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic && eeprom->magic != magic) { + struct i40e_nvm_access *cmd; int errno; /* make sure it is the right magic for NVMUpdate */ if ((eeprom->magic >> 16) != hw->device_id) return -EINVAL; - ret_val = i40e_nvmupd_command(hw, - (struct i40e_nvm_access *)eeprom, - bytes, &errno); + cmd = (struct i40e_nvm_access *)eeprom; + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); if (ret_val) dev_info(&pf->pdev->dev, - "NVMUpdate read failed err=%d status=0x%x\n", - ret_val, hw->aq.asq_last_status); + "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", + ret_val, hw->aq.asq_last_status, errno, + (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), + cmd->offset, cmd->data_size); return errno; } @@ -842,20 +878,29 @@ static int i40e_get_eeprom(struct net_device *netdev, len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); last = true; } - ret_val = i40e_aq_read_nvm(hw, 0x0, - eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), - len, + offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), + ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len, (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), last, NULL); - if (ret_val) { + if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { + dev_info(&pf->pdev->dev, + "read NVM failed, invalid offset 0x%x\n", + offset); + break; + } else if (ret_val && + hw->aq.asq_last_status == I40E_AQ_RC_EACCES) { dev_info(&pf->pdev->dev, - "read NVM failed err=%d status=0x%x\n", - ret_val, hw->aq.asq_last_status); - goto release_nvm; + "read NVM failed, access, offset 0x%x\n", + offset); + break; + } else if (ret_val) { + dev_info(&pf->pdev->dev, + "read NVM failed offset %d err=%d status=0x%x\n", + offset, ret_val, hw->aq.asq_last_status); + break; } } -release_nvm: i40e_release_nvm(hw); memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); free_buff: @@ -883,6 +928,7 @@ static int i40e_set_eeprom(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; + struct i40e_nvm_access *cmd; int ret_val = 0; int errno; u32 magic; @@ -900,12 +946,14 @@ static int i40e_set_eeprom(struct net_device *netdev, test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) return -EBUSY; - ret_val = i40e_nvmupd_command(hw, (struct i40e_nvm_access *)eeprom, - bytes, &errno); - if (ret_val) + cmd = (struct i40e_nvm_access *)eeprom; + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); + if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) dev_info(&pf->pdev->dev, - "NVMUpdate write failed err=%d status=0x%x\n", - ret_val, hw->aq.asq_last_status); + "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", + ret_val, hw->aq.asq_last_status, errno, + (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), + cmd->offset, cmd->data_size); return errno; } @@ -1292,6 +1340,10 @@ static int i40e_get_ts_info(struct net_device *dev, { struct i40e_pf *pf = i40e_netdev_to_pf(dev); + /* only report HW timestamping if PTP is enabled */ + if (!(pf->flags & I40E_FLAG_PTP)) + return ethtool_op_get_ts_info(dev, info); + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | @@ -1355,6 +1407,9 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data) netif_info(pf, hw, netdev, "eeprom test\n"); *data = i40e_diag_eeprom_test(&pf->hw); + /* forcebly clear the NVM Update state machine */ + pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; + return *data; } @@ -1367,7 +1422,10 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data) netif_info(pf, hw, netdev, "interrupt test\n"); wr32(&pf->hw, I40E_PFINT_DYN_CTL0, (I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); usleep_range(1000, 2000); *data = (swc_old == pf->sw_int_count); @@ -1551,13 +1609,10 @@ static int i40e_set_coalesce(struct net_device *netdev, vsi->rx_itr_setting = ec->rx_coalesce_usecs; } else if (ec->rx_coalesce_usecs == 0) { vsi->rx_itr_setting = ec->rx_coalesce_usecs; - i40e_irq_dynamic_disable(vsi, vector); if (ec->use_adaptive_rx_coalesce) - netif_info(pf, drv, netdev, - "Rx-secs=0, need to disable adaptive-Rx for a complete disable\n"); + netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); } else { - netif_info(pf, drv, netdev, - "Invalid value, Rx-usecs range is 0, 8-8160\n"); + netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; } @@ -1566,13 +1621,11 @@ static int i40e_set_coalesce(struct net_device *netdev, vsi->tx_itr_setting = ec->tx_coalesce_usecs; } else if (ec->tx_coalesce_usecs == 0) { vsi->tx_itr_setting = ec->tx_coalesce_usecs; - i40e_irq_dynamic_disable(vsi, vector); if (ec->use_adaptive_tx_coalesce) - netif_info(pf, drv, netdev, - "Tx-secs=0, need to disable adaptive-Tx for a complete disable\n"); + netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); } else { netif_info(pf, drv, netdev, - "Invalid value, Tx-usecs range is 0, 8-8160\n"); + "Invalid value, tx-usecs range is 0-8160\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 5d01db1d789b..a8b8bd95108d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -343,7 +343,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) **/ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) { - struct i40e_ieee_app_priority_table app; + struct i40e_dcb_app_priority_table app; struct i40e_hw *hw = &pf->hw; u8 enabled_tc = 0; u8 tc, i; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ed5f1c15fb0f..0a7ea4c5f9d3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -38,8 +38,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 0 -#define DRV_VERSION_BUILD 11 +#define DRV_VERSION_MINOR 2 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -74,6 +74,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, /* required last entry */ {0, } }; @@ -812,7 +813,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; + struct i40e_ring *p; u32 rx_page, rx_buf; + u64 bytes, packets; + unsigned int start; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -836,10 +840,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_buf = 0; rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { - struct i40e_ring *p; - u64 bytes, packets; - unsigned int start; - /* locate Tx ring */ p = ACCESS_ONCE(vsi->tx_rings[q]); @@ -2382,6 +2382,35 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) } /** + * i40e_config_xps_tx_ring - Configure XPS for a Tx ring + * @ring: The Tx ring to configure + * + * This enables/disables XPS for a given Tx descriptor ring + * based on the TCs enabled for the VSI that ring belongs to. + **/ +static void i40e_config_xps_tx_ring(struct i40e_ring *ring) +{ + struct i40e_vsi *vsi = ring->vsi; + cpumask_var_t mask; + + if (ring->q_vector && ring->netdev) { + /* Single TC mode enable XPS */ + if (vsi->tc_config.numtc <= 1 && + !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { + netif_set_xps_queue(ring->netdev, + &ring->q_vector->affinity_mask, + ring->queue_index); + } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + /* Disable XPS to allow selection based on TC */ + bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); + netif_set_xps_queue(ring->netdev, mask, + ring->queue_index); + free_cpumask_var(mask); + } + } +} + +/** * i40e_configure_tx_ring - Configure a transmit ring context and rest * @ring: The Tx ring to configure * @@ -2404,13 +2433,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) ring->atr_sample_rate = 0; } - /* initialize XPS */ - if (ring->q_vector && ring->netdev && - vsi->tc_config.numtc <= 1 && - !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) - netif_set_xps_queue(ring->netdev, - &ring->q_vector->affinity_mask, - ring->queue_index); + /* configure XPS */ + i40e_config_xps_tx_ring(ring); /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(tx_ctx)); @@ -2462,10 +2486,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) } /* Now associate this queue with this PCI function */ - if (vsi->type == I40E_VSI_VMDQ2) + if (vsi->type == I40E_VSI_VMDQ2) { qtx_ctl = I40E_QTX_CTL_VM_QUEUE; - else + qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & + I40E_QTX_CTL_VFVM_INDX_MASK; + } else { qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + } + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); @@ -3440,7 +3468,7 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; - udelay(10); + usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; @@ -3466,7 +3494,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) /* warn the TX unit of coming changes */ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); if (!enable) - udelay(10); + usleep_range(10, 20); for (j = 0; j < 50; j++) { tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); @@ -3488,6 +3516,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) } wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); + /* No waiting for the Tx queue to disable */ + if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) + continue; /* wait for the change to finish */ ret = i40e_pf_txq_wait(pf, pf_q, enable); @@ -3526,7 +3557,7 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; - udelay(10); + usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; @@ -3855,6 +3886,15 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) if (test_bit(__I40E_DOWN, &vsi->state)) return; + /* No need to disable FCoE VSI when Tx suspended */ + if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && + vsi->type == I40E_VSI_FCOE) { + dev_dbg(&vsi->back->pdev->dev, + "%s: VSI seid %d skipping FCoE VSI disable\n", + __func__, vsi->seid); + return; + } + set_bit(__I40E_NEEDS_RESTART, &vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) { vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); @@ -3907,6 +3947,57 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) } } +#ifdef CONFIG_I40E_DCB +/** + * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled + * @vsi: the VSI being configured + * + * This function waits for the given VSI's Tx queues to be disabled. + **/ +static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int i, pf_q, ret; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + /* Check and wait for the disable status of the queue */ + ret = i40e_pf_txq_wait(pf, pf_q, false); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: VSI seid %d Tx ring %d disable timeout\n", + __func__, vsi->seid, pf_q); + return ret; + } + } + + return 0; +} + +/** + * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled + * @pf: the PF + * + * This function waits for the Tx queues to be in disabled state for all the + * VSIs that are managed by this PF. + **/ +static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) +{ + int v, ret = 0; + + for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { + /* No need to wait for FCoE VSI queues */ + if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { + ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); + if (ret) + break; + } + } + + return ret; +} + +#endif /** * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config * @dcbcfg: the corresponding DCBx configuration structure @@ -4378,6 +4469,31 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) } /** + * i40e_resume_port_tx - Resume port Tx + * @pf: PF struct + * + * Resume a port's Tx and issue a PF reset in case of failure to + * resume. + **/ +static int i40e_resume_port_tx(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int ret; + + ret = i40e_aq_resume_port_tx(hw, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "AQ command Resume Port Tx failed = %d\n", + pf->hw.aq.asq_last_status); + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } + + return ret; +} + +/** * i40e_init_pf_dcb - Initialize DCB configuration * @pf: PF being configured * @@ -4413,6 +4529,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) /* Enable DCB tagging only when more than one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; + dev_dbg(&pf->pdev->dev, + "DCBX offload is supported for this PF.\n"); } } else { dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n", @@ -4449,6 +4567,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) case I40E_LINK_SPEED_1GB: strlcpy(speed, "1000 Mbps", SPEED_SIZE); break; + case I40E_LINK_SPEED_100MB: + strncpy(speed, "100 Mbps", SPEED_SIZE); + break; default: break; } @@ -4479,12 +4600,8 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) static int i40e_up_complete(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - u8 set_fc_aq_fail = 0; int err; - /* force flow control off */ - i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_vsi_configure_msix(vsi); else @@ -4753,9 +4870,11 @@ int i40e_vsi_open(struct i40e_vsi *vsi) goto err_set_queues; } else if (vsi->type == I40E_VSI_FDIR) { - snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", - dev_driver_string(&pf->pdev->dev)); + snprintf(int_name, sizeof(int_name) - 1, "%s-%s-fdir", + dev_driver_string(&pf->pdev->dev), + dev_name(&pf->pdev->dev)); err = i40e_vsi_request_irq(vsi, int_name); + } else { err = -EINVAL; goto err_setup_rx; @@ -4995,6 +5114,8 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); } + dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, + need_reconfig); return need_reconfig; } @@ -5022,11 +5143,16 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Ignore if event is not for Nearest Bridge */ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + dev_dbg(&pf->pdev->dev, + "%s: LLDP event mib bridge type 0x%x\n", __func__, type); if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) return ret; /* Check MIB Type and return if event for Remote MIB update */ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; + dev_dbg(&pf->pdev->dev, + "%s: LLDP event mib type %s\n", __func__, + type ? "remote" : "local"); if (type == I40E_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, @@ -5035,12 +5161,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, goto exit; } - /* Convert/store the DCBX data from LLDPDU temporarily */ memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); - ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg); + /* Store the old configuration */ + tmp_dcbx_cfg = *dcbx_cfg; + + /* Get updated DCBX data from firmware */ + ret = i40e_get_dcb_config(&pf->hw); if (ret) { - /* Error in LLDPDU parsing return */ - dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n"); + dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); goto exit; } @@ -5050,12 +5178,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, goto exit; } - need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg); + need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg); - i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg); - - /* Overwrite the new configuration */ - *dcbx_cfg = tmp_dcbx_cfg; + i40e_dcbnl_flush_apps(pf, dcbx_cfg); if (!need_reconfig) goto exit; @@ -5066,13 +5191,24 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, else pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); /* Reconfiguration needed quiesce all VSIs */ i40e_pf_quiesce_all_vsi(pf); /* Changes in configuration update VEB/VSI */ i40e_dcb_reconfigure(pf); - i40e_pf_unquiesce_all_vsi(pf); + ret = i40e_resume_port_tx(pf); + + clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + /* In case of error no point in resuming VSIs */ + if (ret) + goto exit; + + /* Wait for the PF's Tx queues to be disabled */ + ret = i40e_pf_wait_txq_disabled(pf); + if (!ret) + i40e_pf_unquiesce_all_vsi(pf); exit: return ret; } @@ -5212,6 +5348,9 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) int flush_wait_retry = 50; int reg; + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) + return; + if (time_after(jiffies, pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); @@ -5273,6 +5412,9 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) if (test_bit(__I40E_DOWN, &pf->state)) return; + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) + return; + if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) && (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) && (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count)) @@ -5310,8 +5452,6 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) break; case I40E_VSI_SRIOV: - break; - case I40E_VSI_VMDQ2: case I40E_VSI_CTRL: case I40E_VSI_MIRROR: @@ -5353,14 +5493,21 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) static void i40e_link_event(struct i40e_pf *pf) { bool new_link, old_link; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + + /* set this to force the get_link_status call to refresh state */ + pf->hw.phy.get_link_info = true; - new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP); old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); + new_link = i40e_get_link_status(&pf->hw); - if (new_link == old_link) + if (new_link == old_link && + (test_bit(__I40E_DOWN, &vsi->state) || + new_link == netif_carrier_ok(vsi->netdev))) return; - if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) - i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link); + + if (!test_bit(__I40E_DOWN, &vsi->state)) + i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. @@ -5368,7 +5515,7 @@ static void i40e_link_event(struct i40e_pf *pf) if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); else - i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link); + i40e_vsi_link_event(vsi, new_link); if (pf->vf) i40e_vc_notify_link_state(pf); @@ -5418,11 +5565,17 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, (I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); } else { u16 vec = vsi->base_vector - 1; u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK); for (i = 0; i < vsi->num_q_vectors; i++, vec++) wr32(&vsi->back->hw, I40E_PFINT_DYN_CTLN(vec), val); @@ -5433,7 +5586,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) } /** - * i40e_watchdog_subtask - Check and bring link up + * i40e_watchdog_subtask - periodic checks not using event driven response * @pf: board private structure **/ static void i40e_watchdog_subtask(struct i40e_pf *pf) @@ -5445,6 +5598,15 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) test_bit(__I40E_CONFIG_BUSY, &pf->state)) return; + /* make sure we don't do these things too often */ + if (time_before(jiffies, (pf->service_timer_previous + + pf->service_timer_period))) + return; + pf->service_timer_previous = jiffies; + + i40e_check_hang_subtask(pf); + i40e_link_event(pf); + /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ @@ -5525,33 +5687,20 @@ static void i40e_handle_link_event(struct i40e_pf *pf, memcpy(&pf->hw.phy.link_info_old, hw_link_info, sizeof(pf->hw.phy.link_info_old)); + /* Do a new status request to re-enable LSE reporting + * and load new status information into the hw struct + * This completely ignores any state information + * in the ARQ event info, instead choosing to always + * issue the AQ update link status command. + */ + i40e_link_event(pf); + /* check for unqualified module, if link is down */ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP))) dev_err(&pf->pdev->dev, "The driver failed to link because an unqualified module was detected.\n"); - - /* update link status */ - hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; - hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; - hw_link_info->link_info = status->link_info; - hw_link_info->an_info = status->an_info; - hw_link_info->ext_info = status->ext_info; - hw_link_info->lse_enable = - le16_to_cpu(status->command_flags) & - I40E_AQ_LSE_ENABLE; - - /* process the event */ - i40e_link_event(pf); - - /* Do a new status request to re-enable LSE reporting - * and load new status information into the hw struct, - * then see if the status changed while processing the - * initial event. - */ - i40e_update_link_info(&pf->hw, true); - i40e_link_event(pf); } /** @@ -5607,13 +5756,12 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) if (oldval != val) wr32(&pf->hw, pf->hw.aq.asq.len, val); - event.msg_size = I40E_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = I40E_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return; do { - event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */ ret = i40e_clean_arq_element(hw, &event, &pending); if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) break; @@ -5634,7 +5782,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) le32_to_cpu(event.desc.cookie_high), le32_to_cpu(event.desc.cookie_low), event.msg_buf, - event.msg_size); + event.msg_len); break; case i40e_aqc_opc_lldp_update_mib: dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); @@ -5740,6 +5888,9 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; + /* Enable LB mode for the main VSI now that it is on a VEB */ + i40e_enable_pf_switch_lb(pf); + /* create the remaining VSIs attached to this VEB */ for (v = 0; v < pf->num_alloc_vsi; v++) { if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) @@ -5967,6 +6118,7 @@ static void i40e_send_version(struct i40e_pf *pf) static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) { struct i40e_hw *hw = &pf->hw; + u8 set_fc_aq_fail = 0; i40e_status ret; u32 v; @@ -6038,6 +6190,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (ret) goto end_core_reset; + /* driver is only interested in link up/down and module qualification + * reports from firmware + */ + ret = i40e_aq_set_phy_int_mask(&pf->hw, + I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + if (ret) + dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); + + /* make sure our flow control settings are restored */ + ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); + if (ret) + dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); + /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only * need to rebuild the switch model in the HW. @@ -6092,6 +6258,13 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } + msleep(75); + ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (ret) { + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); @@ -6149,12 +6322,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) if (reg & I40E_GL_MDET_TX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; - u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> + u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> I40E_GL_MDET_TX_VF_NUM_SHIFT; - u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >> + u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; - u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> - I40E_GL_MDET_TX_QUEUE_SHIFT; + u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> + I40E_GL_MDET_TX_QUEUE_SHIFT) - + pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", event, queue, pf_num, vf_num); @@ -6165,10 +6339,11 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) if (reg & I40E_GL_MDET_RX_VALID_MASK) { u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; - u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >> + u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; - u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> - I40E_GL_MDET_RX_QUEUE_SHIFT; + u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> + I40E_GL_MDET_RX_QUEUE_SHIFT) - + pf->hw.func_caps.base_queue; if (netif_msg_rx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", event, queue, func); @@ -6298,7 +6473,6 @@ static void i40e_service_task(struct work_struct *work) i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); - i40e_check_hang_subtask(pf); i40e_sync_filters_subtask(pf); #ifdef CONFIG_I40E_VXLAN i40e_sync_vxlan_filters_subtask(pf); @@ -6676,6 +6850,7 @@ static int i40e_init_msix(struct i40e_pf *pf) { i40e_status err = 0; struct i40e_hw *hw = &pf->hw; + int other_vecs = 0; int v_budget, i; int vec; @@ -6701,10 +6876,10 @@ static int i40e_init_msix(struct i40e_pf *pf) */ pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); pf->num_vmdq_msix = pf->num_vmdq_qps; - v_budget = 1 + pf->num_lan_msix; - v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix); + other_vecs = 1; + other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix); if (pf->flags & I40E_FLAG_FD_SB_ENABLED) - v_budget++; + other_vecs++; #ifdef I40E_FCOE if (pf->flags & I40E_FLAG_FCOE_ENABLED) { @@ -6714,7 +6889,9 @@ static int i40e_init_msix(struct i40e_pf *pf) #endif /* Scale down if necessary, and the rings will share vectors */ - v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors); + pf->num_lan_msix = min_t(int, pf->num_lan_msix, + (hw->func_caps.num_msix_vectors - other_vecs)); + v_budget = pf->num_lan_msix + other_vecs; pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); @@ -6964,20 +7141,16 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) **/ static int i40e_config_rss(struct i40e_pf *pf) { - /* Set of random keys generated using kernel random number generator */ - static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687, - 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, - 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, - 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be}; + u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; struct i40e_hw *hw = &pf->hw; u32 lut = 0; int i, j; u64 hena; u32 reg_val; - /* Fill out hash function seed */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), seed[i]); + wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]); /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | @@ -7341,7 +7514,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev, #endif static int i40e_get_phys_port_id(struct net_device *netdev, - struct netdev_phys_port_id *ppid) + struct netdev_phys_item_id *ppid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -7356,18 +7529,18 @@ static int i40e_get_phys_port_id(struct net_device *netdev, return 0; } -#ifdef HAVE_FDB_OPS -#ifdef USE_CONST_DEV_UC_CHAR +/** + * i40e_ndo_fdb_add - add an entry to the hardware database + * @ndm: the input from the stack + * @tb: pointer to array of nladdr (unused) + * @dev: the net device pointer + * @addr: the MAC address entry being added + * @flags: instructions from stack about fdb operation + */ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, - u16 flags) -#else -static int i40e_ndo_fdb_add(struct ndmsg *ndm, - struct net_device *dev, - unsigned char *addr, + const unsigned char *addr, u16 vid, u16 flags) -#endif { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_pf *pf = np->vsi->back; @@ -7398,55 +7571,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, return err; } -#ifndef USE_DEFAULT_FDB_DEL_DUMP -#ifdef USE_CONST_DEV_UC_CHAR -static int i40e_ndo_fdb_del(struct ndmsg *ndm, - struct net_device *dev, - const unsigned char *addr) -#else -static int i40e_ndo_fdb_del(struct ndmsg *ndm, - struct net_device *dev, - unsigned char *addr) -#endif -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - struct i40e_pf *pf = np->vsi->back; - int err = -EOPNOTSUPP; - - if (ndm->ndm_state & NUD_PERMANENT) { - netdev_info(dev, "FDB only supports static addresses\n"); - return -EINVAL; - } - - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { - if (is_unicast_ether_addr(addr)) - err = dev_uc_del(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_del(dev, addr); - else - err = -EINVAL; - } - - return err; -} - -static int i40e_ndo_fdb_dump(struct sk_buff *skb, - struct netlink_callback *cb, - struct net_device *dev, - struct net_device *filter_dev, - int idx) -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - struct i40e_pf *pf = np->vsi->back; - - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) - idx = ndo_dflt_fdb_dump(skb, cb, dev, filter_dev, idx); - - return idx; -} - -#endif /* USE_DEFAULT_FDB_DEL_DUMP */ -#endif /* HAVE_FDB_OPS */ static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -7480,13 +7604,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_del_vxlan_port = i40e_del_vxlan_port, #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, -#ifdef HAVE_FDB_OPS .ndo_fdb_add = i40e_ndo_fdb_add, -#ifndef USE_DEFAULT_FDB_DEL_DUMP - .ndo_fdb_del = i40e_ndo_fdb_del, - .ndo_fdb_dump = i40e_ndo_fdb_dump, -#endif -#endif }; /** @@ -7682,6 +7800,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = 0x1; /* regular data port */ ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; @@ -7931,8 +8053,8 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) vsi->num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { dev_info(&pf->pdev->dev, - "failed to get queue tracking for VSI %d, err=%d\n", - vsi->seid, vsi->base_vector); + "failed to get tracking for %d vectors for VSI %d, err=%d\n", + vsi->num_q_vectors, vsi->seid, vsi->base_vector); i40e_vsi_free_q_vectors(vsi); ret = -ENOENT; goto vector_setup_out; @@ -7968,8 +8090,9 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { - dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", - vsi->seid, ret); + dev_info(&pf->pdev->dev, + "failed to get tracking for %d queues for VSI %d err=%d\n", + vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; @@ -8066,7 +8189,15 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); - + if (veb) { + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { + dev_info(&vsi->back->pdev->dev, + "%s: New VSI creation error, uplink seid of LAN VSI expected.\n", + __func__); + return NULL; + } + i40e_enable_pf_switch_lb(pf); + } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; @@ -8098,8 +8229,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { - dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", - vsi->seid, ret); + dev_info(&pf->pdev->dev, + "failed to get tracking for %d queues for VSI %d err=%d\n", + vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; @@ -8206,6 +8338,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); veb->bw_max_quanta = ets_data.tc_bw_max; veb->is_abs_credits = bw_data.absolute_credits_enable; + veb->enabled_tc = ets_data.tc_valid_bits; tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -8719,6 +8852,14 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? true : false); + /* fill in link information and enable LSE reporting */ + i40e_update_link_info(&pf->hw, true); + i40e_link_event(pf); + + /* Initialize user-specific link properties */ + pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & + I40E_AQ_AN_COMPLETED) ? true : false); + i40e_ptp_init(pf); return ret; @@ -8987,6 +9128,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.func = PCI_FUNC(pdev->devfn); pf->instance = pfs_found; + if (debug != -1) { + pf->msg_enable = pf->hw.debug_mask; + pf->msg_enable = debug; + } + /* do a special CORER for clearing PXE mode once at init */ if (hw->revision_id == 0 && (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { @@ -9012,9 +9158,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; + snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1, - "%s-pf%d:misc", - dev_driver_string(&pf->pdev->dev), pf->hw.pf_id); + "%s-%s:misc", + dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); err = i40e_init_shared_code(hw); if (err) { @@ -9158,6 +9305,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } + /* driver is only interested in link up/down and module qualification + * reports from firmware + */ + err = i40e_aq_set_phy_int_mask(&pf->hw, + I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + if (err) + dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); + + msleep(75); + err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (err) { + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector * ends up disabled forever. diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 25c4f9a3011f..3e70f2e45a47 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -61,7 +61,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) } else { /* Blank programming mode */ nvm->blank_nvm_mode = true; ret_code = I40E_ERR_NVM_BLANK_MODE; - hw_dbg(hw, "NVM init error: unsupported blank mode.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); } return ret_code; @@ -80,46 +80,45 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, { i40e_status ret_code = 0; u64 gtime, timeout; - u64 time = 0; + u64 time_left = 0; if (hw->nvm.blank_nvm_mode) goto i40e_i40e_acquire_nvm_exit; ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, - 0, &time, NULL); + 0, &time_left, NULL); /* Reading the Global Device Timer */ gtime = rd32(hw, I40E_GLVFGEN_TIMER); /* Store the timeout */ - hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime; + hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; - if (ret_code) { - /* Set the polling timeout */ - if (time > I40E_MAX_NVM_TIMEOUT) - timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) - + gtime; - else - timeout = hw->nvm.hw_semaphore_timeout; + if (ret_code) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", + access, time_left, ret_code, hw->aq.asq_last_status); + + if (ret_code && time_left) { /* Poll until the current NVM owner timeouts */ - while (gtime < timeout) { + timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; + while ((gtime < timeout) && time_left) { usleep_range(10000, 20000); + gtime = rd32(hw, I40E_GLVFGEN_TIMER); ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, - access, 0, &time, + access, 0, &time_left, NULL); if (!ret_code) { hw->nvm.hw_semaphore_timeout = - I40E_MS_TO_GTIME(time) + gtime; + I40E_MS_TO_GTIME(time_left) + gtime; break; } - gtime = rd32(hw, I40E_GLVFGEN_TIMER); } if (ret_code) { hw->nvm.hw_semaphore_timeout = 0; - hw->nvm.hw_semaphore_wait = - I40E_MS_TO_GTIME(time) + gtime; - hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n", - time); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", + time_left, ret_code, hw->aq.asq_last_status); } } @@ -160,7 +159,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) udelay(5); } if (ret_code == I40E_ERR_TIMEOUT) - hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n"); + i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); return ret_code; } @@ -179,7 +178,9 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u32 sr_reg; if (offset >= hw->nvm.sr_size) { - hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: offset %d beyond Shadow RAM limit %d\n", + offset, hw->nvm.sr_size); ret_code = I40E_ERR_PARAM; goto read_nvm_exit; } @@ -202,8 +203,9 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, } } if (ret_code) - hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", - offset); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", + offset); read_nvm_exit: return ret_code; @@ -263,14 +265,20 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, * Firmware will check the module-based model. */ if ((offset + words) > hw->nvm.sr_size) - hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) /* We can write only up to 4KB (one sector), in one AQ write */ - hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) /* A single write cannot spread over two sectors */ - hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); else ret_code = i40e_aq_update_nvm(hw, module_pointer, 2 * offset, /*bytes*/ @@ -438,6 +446,22 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val) return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } +static char *i40e_nvm_update_state_str[] = { + "I40E_NVMUPD_INVALID", + "I40E_NVMUPD_READ_CON", + "I40E_NVMUPD_READ_SNT", + "I40E_NVMUPD_READ_LCB", + "I40E_NVMUPD_READ_SA", + "I40E_NVMUPD_WRITE_ERA", + "I40E_NVMUPD_WRITE_CON", + "I40E_NVMUPD_WRITE_SNT", + "I40E_NVMUPD_WRITE_LCB", + "I40E_NVMUPD_WRITE_SA", + "I40E_NVMUPD_CSUM_CON", + "I40E_NVMUPD_CSUM_SA", + "I40E_NVMUPD_CSUM_LCB", +}; + /** * i40e_nvmupd_command - Process an NVM update command * @hw: pointer to hardware structure @@ -471,6 +495,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, default: /* invalid state, should never happen */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: no such state %d\n", hw->nvmupd_state); status = I40E_NOT_SUPPORTED; *errno = -ESRCH; break; @@ -501,7 +527,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_READ_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); i40e_release_nvm(hw); @@ -511,17 +538,22 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_READ_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); - hw->nvmupd_state = I40E_NVMUPD_STATE_READING; + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_READING; } break; case I40E_NVMUPD_WRITE_ERA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_erase(hw, cmd, errno); if (status) @@ -534,7 +566,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); if (status) @@ -547,22 +580,28 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; } break; case I40E_NVMUPD_CSUM_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_update_nvm_checksum(hw); if (status) { *errno = hw->aq.asq_last_status ? - i40e_aq_rc_to_posix(hw->aq.asq_last_status) : + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : -EIO; i40e_release_nvm(hw); } else { @@ -572,6 +611,9 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, break; default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in init state\n", + i40e_nvm_update_state_str[upd_cmd]); status = I40E_ERR_NVM; *errno = -ESRCH; break; @@ -611,6 +653,9 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, break; default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in reading state.\n", + i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; *errno = -ESRCH; break; @@ -644,33 +689,38 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_LCB: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); - if (!status) { + if (!status) hw->aq.nvm_release_on_done = true; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - } + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; case I40E_NVMUPD_CSUM_CON: status = i40e_update_nvm_checksum(hw); - if (status) + if (status) { *errno = hw->aq.asq_last_status ? - i40e_aq_rc_to_posix(hw->aq.asq_last_status) : + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } break; case I40E_NVMUPD_CSUM_LCB: status = i40e_update_nvm_checksum(hw); - if (status) { + if (status) *errno = hw->aq.asq_last_status ? - i40e_aq_rc_to_posix(hw->aq.asq_last_status) : + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : -EIO; - } else { + else hw->aq.nvm_release_on_done = true; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - } + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in writing state.\n", + i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; *errno = -ESRCH; break; @@ -702,8 +752,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, /* limits on data size */ if ((cmd->data_size < 1) || (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { - hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n", - cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command data_size %d\n", + cmd->data_size); *errno = -EFAULT; return I40E_NVMUPD_INVALID; } @@ -755,12 +806,16 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, } break; } + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->aq.nvm_release_on_done); if (upd_cmd == I40E_NVMUPD_INVALID) { *errno = -EFAULT; - hw_dbg(hw, - "i40e_nvmupd_validate_command returns %d errno: %d\n", - upd_cmd, *errno); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *errno); } return upd_cmd; } @@ -785,14 +840,18 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); - hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, NULL); - hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status); - if (status) - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } return status; } @@ -816,13 +875,17 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); - hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, last, NULL); - hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status); - if (status) - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } return status; } @@ -847,13 +910,18 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); - hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); + status = i40e_aq_update_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, NULL); - hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status); - if (status) - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 0988b5c1fe87..2fb4306597e8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -84,6 +84,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset); +i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, @@ -173,6 +175,9 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 protocol_index, u8 *filter_index, @@ -228,6 +233,10 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg); /* i40e_common */ i40e_status i40e_init_shared_code(struct i40e_hw *hw); i40e_status i40e_pf_reset(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 537b6216971d..6d1ec926aa37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -382,11 +382,17 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) incval = I40E_PTP_1GB_INCVAL; break; case I40E_LINK_SPEED_100MB: - dev_warn(&pf->pdev->dev, - "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n", - __func__); + { + static int warn_once; + + if (!warn_once) { + dev_warn(&pf->pdev->dev, + "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n"); + warn_once++; + } incval = 0; break; + } case I40E_LINK_SPEED_40GB: default: incval = I40E_PTP_40GB_INCVAL; @@ -418,6 +424,9 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) { struct hwtstamp_config *config = &pf->tstamp_config; + if (!(pf->flags & I40E_FLAG_PTP)) + return -EOPNOTSUPP; + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : 0; } @@ -438,22 +447,12 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, struct hwtstamp_config *config) { struct i40e_hw *hw = &pf->hw; - u32 pf_id, tsyntype, regval; + u32 tsyntype, regval; /* Reserved for future extensions. */ if (config->flags) return -EINVAL; - /* Confirm that 1588 is supported on this PF. */ - pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> - I40E_PRTTSYN_CTL0_PF_ID_SHIFT; - if (hw->pf_id != pf_id) { - dev_err(&pf->pdev->dev, - "PF %d attempted to control timestamp mode on port %d, which is owned by PF %d\n", - hw->pf_id, hw->port, pf_id); - return -EPERM; - } - switch (config->tx_type) { case HWTSTAMP_TX_OFF: pf->ptp_tx = false; @@ -556,6 +555,9 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) struct hwtstamp_config config; int err; + if (!(pf->flags & I40E_FLAG_PTP)) + return -EOPNOTSUPP; + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; @@ -625,8 +627,22 @@ void i40e_ptp_init(struct i40e_pf *pf) { struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; struct i40e_hw *hw = &pf->hw; + u32 pf_id; long err; + /* Only one PF is assigned to control 1588 logic per port. Do not + * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID + */ + pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> + I40E_PRTTSYN_CTL0_PF_ID_SHIFT; + if (hw->pf_id != pf_id) { + pf->flags &= ~I40E_FLAG_PTP; + dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", + __func__, + netdev->name); + return; + } + /* we have to initialize the lock first, since we can't control * when the user will enter the PHC device entry points */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 3195d82e4942..04b441460bbd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2399,12 +2399,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* hardware can't handle really short frames, hardware padding works * beyond this point */ - if (unlikely(skb->len < I40E_MIN_TX_LEN)) { - if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) - return NETDEV_TX_OK; - skb->len = I40E_MIN_TX_LEN; - skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); - } + if (skb_put_padto(skb, I40E_MIN_TX_LEN)) + return NETDEV_TX_OK; return i40e_xmit_frame_ring(skb, tx_ring); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index d7a625a6a14f..e60d3accb2e2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -30,10 +30,7 @@ /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ -#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ -#define I40E_MAX_IRATE 0x03F -#define I40E_MIN_IRATE 0x001 -#define I40E_IRATE_USEC_RESOLUTION 4 +#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 #define I40E_ITR_20K 0x0019 #define I40E_ITR_8K 0x003E diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index ce04d9093db6..c1f2eb963357 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -43,6 +43,7 @@ #define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -260,8 +261,7 @@ enum i40e_aq_resource_access_type { }; struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */ - u64 hw_semaphore_wait; /* - || - */ + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ u32 timeout; /* [ms] */ u16 sr_size; /* Shadow RAM size in words */ bool blank_nvm_mode; /* is NVM empty (no FW present)*/ @@ -380,9 +380,18 @@ struct i40e_fc_info { #define I40E_MAX_USER_PRIORITY 8 #define I40E_DCBX_MAX_APPS 32 #define I40E_LLDPDU_SIZE 1500 - -/* IEEE 802.1Qaz ETS Configuration data */ -struct i40e_ieee_ets_config { +#define I40E_TLV_STATUS_OPER 0x1 +#define I40E_TLV_STATUS_SYNC 0x2 +#define I40E_TLV_STATUS_ERR 0x4 +#define I40E_CEE_OPER_MAX_APPS 3 +#define I40E_APP_PROTOID_FCOE 0x8906 +#define I40E_APP_PROTOID_ISCSI 0x0cbc +#define I40E_APP_PROTOID_FIP 0x8914 +#define I40E_APP_SEL_ETHTYPE 0x1 +#define I40E_APP_SEL_TCPIP 0x2 + +/* CEE or IEEE 802.1Qaz ETS Configuration data */ +struct i40e_dcb_ets_config { u8 willing; u8 cbs; u8 maxtcs; @@ -391,34 +400,30 @@ struct i40e_ieee_ets_config { u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; }; -/* IEEE 802.1Qaz ETS Recommendation data */ -struct i40e_ieee_ets_recommend { - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz PFC Configuration data */ -struct i40e_ieee_pfc_config { +/* CEE or IEEE 802.1Qaz PFC Configuration data */ +struct i40e_dcb_pfc_config { u8 willing; u8 mbc; u8 pfccap; u8 pfcenable; }; -/* IEEE 802.1Qaz Application Priority data */ -struct i40e_ieee_app_priority_table { +/* CEE or IEEE 802.1Qaz Application Priority data */ +struct i40e_dcb_app_priority_table { u8 priority; u8 selector; u16 protocolid; }; struct i40e_dcbx_config { + u8 dcbx_mode; +#define I40E_DCBX_MODE_CEE 0x1 +#define I40E_DCBX_MODE_IEEE 0x2 u32 numapps; - struct i40e_ieee_ets_config etscfg; - struct i40e_ieee_ets_recommend etsrec; - struct i40e_ieee_pfc_config pfc; - struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; + struct i40e_dcb_ets_config etscfg; + struct i40e_dcb_ets_config etsrec; + struct i40e_dcb_pfc_config pfc; + struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; }; /* Port hardware description */ @@ -476,6 +481,11 @@ struct i40e_hw { u32 debug_mask; }; +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return hw->mac.type == I40E_MAC_VF; +} + struct i40e_driver_version { u8 major_version; u8 minor_version; @@ -1371,6 +1381,18 @@ enum i40e_reset_type { I40E_RESET_EMPR = 3, }; +/* IEEE 802.1AB LLDP Agent Variables from NVM */ +#define I40E_NVM_LLDP_CFG_PTR 0xD +struct i40e_lldp_variables { + u16 length; + u16 adminstatus; + u16 msgfasttx; + u16 msgtxinterval; + u16 txparams; + u16 timers; + u16 crc8; +}; + /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 70951d2edcad..61dd1b187624 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -79,6 +79,7 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_VIRTCHNL_OP_GET_STATS, I40E_VIRTCHNL_OP_FCOE, + I40E_VIRTCHNL_OP_CONFIG_RSS, /* PF sends status change events to vfs using * the following op. */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4eeed267e4b7..5bae89550657 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -674,7 +674,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) * that the requested op was completed * successfully */ - udelay(10); + usleep_range(10, 20); reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { rsd = true; @@ -707,7 +707,6 @@ complete_reset: wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); i40e_flush(hw); } -#ifdef CONFIG_PCI_IOV /** * i40e_enable_pf_switch_lb @@ -715,7 +714,7 @@ complete_reset: * * enable switch loop back or die - no point in a return value **/ -static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) +void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; @@ -742,7 +741,6 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) __func__, vsi->back->hw.aq.asq_last_status); } } -#endif /** * i40e_disable_pf_switch_lb @@ -1869,6 +1867,12 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) return 0; + /* re-enable vflr interrupt cause */ + reg = rd32(hw, I40E_PFINT_ICR0_ENA); + reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, reg); + i40e_flush(hw); + clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; @@ -1885,12 +1889,6 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) } } - /* re-enable vflr interrupt cause */ - reg = rd32(hw, I40E_PFINT_ICR0_ENA); - reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; - wr32(hw, I40E_PFINT_ICR0_ENA, reg); - i40e_flush(hw); - return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 0adc61e1052d..9452f5247cff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -126,5 +126,6 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); +void i40e_enable_pf_switch_lb(struct i40e_pf *pf); #endif /* _I40E_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index f206be917842..c1d25f8c1abc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -49,7 +49,7 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) static void i40e_adminq_init_regs(struct i40e_hw *hw) { /* set head and tail registers in our local struct */ - if (hw->mac.type == I40E_MAC_VF) { + if (i40e_is_vf(hw)) { hw->aq.asq.tail = I40E_VF_ATQT1; hw->aq.asq.head = I40E_VF_ATQH1; hw->aq.asq.len = I40E_VF_ATQLEN1; @@ -801,7 +801,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, */ if (!details->async && !details->postpone) { u32 total_delay = 0; - u32 delay_len = 10; do { /* AQ designers suggest use of head for better @@ -809,9 +808,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, */ if (i40evf_asq_done(hw)) break; - /* ugh! delay while spin_lock */ - udelay(delay_len); - total_delay += delay_len; + usleep_range(1000, 2000); + total_delay++; } while (total_delay < hw->aq.asq_cmd_timeout); } @@ -838,9 +836,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } - if (i40e_is_nvm_update_op(desc)) - hw->aq.nvm_busy = true; - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, @@ -907,9 +902,6 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQRX: Queue is empty.\n"); ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } @@ -931,13 +923,10 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, e->desc = *desc; datalen = le16_to_cpu(desc->datalen); - e->msg_size = min(datalen, e->msg_size); - if (e->msg_buf != NULL && (e->msg_size != 0)) + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, - e->msg_size); - - if (i40e_is_nvm_update_op(&e->desc)) - hw->aq.nvm_busy = false; + e->msg_len); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index 91a5c5bd80f3..6c31bf22c2c3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -28,6 +28,7 @@ #define _I40E_ADMINQ_H_ #include "i40e_osdep.h" +#include "i40e_status.h" #include "i40e_adminq_cmd.h" #define I40E_ADMINQ_DESC(R, i) \ @@ -76,7 +77,8 @@ struct i40e_asq_cmd_details { /* ARQ event information */ struct i40e_arq_event_info { struct i40e_aq_desc desc; - u16 msg_size; + u16 msg_len; + u16 buf_len; u8 *msg_buf; }; @@ -93,7 +95,6 @@ struct i40e_adminq_info { u16 fw_min_ver; /* firmware minor version */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_busy; bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ @@ -108,7 +109,7 @@ struct i40e_adminq_info { * i40e_aq_rc_to_posix - convert errors to user-land codes * aq_rc: AdminQ error code to convert **/ -static inline int i40e_aq_rc_to_posix(u16 aq_rc) +static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ @@ -136,12 +137,18 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc) -EFBIG, /* I40E_AQ_RC_EFBIG */ }; + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + return -ERANGE; return aq_to_posix[aq_rc]; } /* general information */ #define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */ +#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index e656ea7a7920..ff1b16370da9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -33,8 +33,8 @@ * This file needs to comply with the Linux Kernel coding style. */ -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0002 #define I40E_FW_API_VERSION_A0_MINOR 0x0000 struct i40e_aq_desc { @@ -67,216 +67,216 @@ struct i40e_aq_desc { */ /* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ }; /* Admin Queue command opcodes */ enum i40e_admin_queue_opc { /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, + i40e_aqc_opc_clear_pxe_mode = 0x0110, /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, + i40e_aqc_opc_event_lan_overflow = 0x1001, /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, - i40e_aqc_opc_debug_modify_internals = 0xFF09, + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, + i40e_aqc_opc_debug_modify_internals = 0xFF09, }; /* command structures and indirect data structures */ @@ -303,7 +303,7 @@ enum i40e_admin_queue_opc { /* This macro is used extensively to ensure that command structures are 16 * bytes in length as they have to map to the raw array of that size. */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) /* internal (0x00XX) commands */ @@ -321,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); /* Send driver version (indirect 0x0002) */ struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); /* Queue Shutdown (direct 0x0003) */ struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); @@ -352,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); @@ -374,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); */ struct i40e_aqc_list_capabilites { u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; @@ -385,123 +385,123 @@ struct i40e_aqc_list_capabilites { I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; }; /* list of caps */ -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 - __le16 table_id; - __le32 pfpm_proxyfc; - __le32 ip_addr; - u8 mac_addr[6]; + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; }; /* Set NS Proxy Table Entry Command (indirect 0x0105) */ struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; }; /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; }; /* Manage MAC Address Read Command (indirect 0x0107) */ struct i40e_aqc_mac_address_read { __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); @@ -517,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); @@ -545,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); * command */ struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); @@ -557,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; }; struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; }; /* Get Switch Configuration (indirect 0x0200) @@ -592,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp { * the first in the array is the header, remainder are elements */ struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; }; /* Add Statistics (direct 0x0201) * Remove Statistics (direct 0x0202) */ struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); /* Get Switch Resource Allocation (indirect 0x0204) */ struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; }; /* Add VSI (indirect 0x0210) @@ -672,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp { * uses the same completion and data structure as Add VSI */ struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); @@ -707,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); struct i40e_aqc_vsi_properties_data { /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 - u8 queueing_opt_reserved[3]; + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; + u8 up_enable_bits; + u8 sched_reserved; /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress table */ - u8 cmd_reserved[8]; + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; + __le16 qs_handle[8]; #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; }; I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); @@ -831,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); * (IS_CTRL_PORT only works on add PV) */ struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); @@ -860,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); */ struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); /* Add VEB (direct 0x0230) */ struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 - u8 enable_tcs; - u8 reserved[9]; +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; + u8 reserved[6]; + __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); @@ -910,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); @@ -929,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); /* used for the command for most vlan commands */ struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); /* indirect data for command and response */ struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_macvlan_completion { @@ -979,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); */ struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; }; /* Add VLAN (indirect 0x0252) @@ -999,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data { * use the generic i40e_aqc_macvlan for the command */ struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; + __le16 vlan_tag; + u8 vlan_flags; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; }; /* Set VSI Promiscuous Modes (direct 0x0254) */ struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; + __le16 promiscuous_flags; + __le16 valid_flags; /* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); @@ -1060,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; + __le16 tag; + __le16 queue_number; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); @@ -1085,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; + __le16 tag; + u8 reserved[12]; }; /* Add multicast E-Tag (direct 0x0257) @@ -1098,22 +1097,22 @@ struct i40e_aqc_remove_tag { * and no external data */ struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; }; @@ -1121,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); @@ -1146,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); * and the generic direct completion structure */ struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; + __le16 queue; + u8 reserved[2]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); @@ -1180,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); * and the generic indirect completion structure */ struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); struct i40e_aqc_add_remove_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; union { struct { u8 reserved[12]; @@ -1206,52 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { u8 data[16]; } v6; } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) -#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007 /* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 /* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 /* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 /* 0x0007 reserved */ /* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; }; struct i40e_aqc_remove_cloud_filters_completion { @@ -1273,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); struct i40e_aqc_add_delete_mirror_rule { __le16 seid; __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 __le16 num_entries; __le16 destination; /* VSI for add, rule id for delete */ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ @@ -1290,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); @@ -1306,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); * the command and response use the same descriptor structure */ struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); @@ -1325,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); * this generic struct to pass the SEID in param0 */ struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); @@ -1340,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp { /* Configure VSI BW limits (direct 0x0400) */ struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); @@ -1354,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; }; /* Query vsi bw configuration (indirect 0x0408) */ struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; }; /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); @@ -1415,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); * Disable Physical Port ETS (indirect 0x0415) */ struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; }; /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure Switching Component Bandwidth Allocation per Tc * (indirect 0x0417) */ struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; }; /* Query Switching Component Configuration (indirect 0x0418) */ struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; }; /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; + __le16 tc_bw_max[2]; + u8 reserved3[32]; }; /* Query Switching Component Bandwidth Allocation per Traffic Type * (indirect 0x041A) */ struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Suspend/resume port TX traffic @@ -1494,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp { * (indirect 0x041D) */ struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ }; /* Get and set the active HMC resource profile and status. * (direct 0x0500) and (direct 0x0501) */ struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); enum i40e_aq_hmc_profile { /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, }; -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 enum i40e_aq_phy_type { I40E_PHY_TYPE_SGMII = 0x0, @@ -1582,147 +1578,147 @@ struct i40e_aqc_module_desc { }; struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 reserved[3]; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; }; /* Set PHY Config (direct 0x0601) */ struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; + __le32 phy_type; + u8 link_speed; + u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ #define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 reserved[3]; + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); /* Set MAC Config command data structure (direct 0x0603) */ struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); /* Restart Auto-Negotiation (direct 0x605) */ struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); /* Set event mask command (direct 0x613) */ struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); @@ -1732,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); * Get Link Partner AN advt register (direct 0x0616) */ struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); /* Set Loopback mode (0x0618) */ struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { - u8 command_flags; + u8 command_flags; #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ @@ -1761,15 +1757,15 @@ struct i40e_aqc_set_phy_debug { #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; /* NVM Read command (indirect 0x0701) @@ -1777,40 +1773,40 @@ enum i40e_aq_phy_reg_type { * NVM Update commands (indirect 0x0703) */ struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; + __le16 cmd_flags; #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 #define ANVM_READ_SINGLE_FEATURE 0 #define ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - u8 reserved[2]; - __le32 address_high; - __le32 address_low; + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + u8 reserved[2]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); /* NVM Config Write (indirect 0x0705) */ struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); @@ -1835,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field { * Send to Peer PF command (indirect 0x0803) */ struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); @@ -1874,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); * uses i40e_aq_desc */ struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; + __le16 cmd_flags; #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { - __le32 mode; + __le32 mode; #define I40E_AQ_ALTERNATE_MODE_NONE 0 #define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); @@ -1900,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); /* Lan Queue Overflow Event (direct, 0x1001) */ struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); /* Get LLDP MIB (indirect 0x0A00) */ struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); @@ -1935,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); * also used for the event (with type in the command field) */ struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); @@ -1949,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); * Delete LLDP TLV (indirect 0x0A04) */ struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); /* Update LLDP TLV (indirect 0x0A03) */ struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); @@ -1985,9 +1981,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); /* Start LLDP (direct 0x0A06) */ struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); @@ -1998,13 +1994,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); /* Add Udp Tunnel command and completion (direct 0x0B00) */ struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 - u8 reserved1[10]; + u8 reserved1[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); @@ -2013,8 +2009,8 @@ struct i40e_aqc_add_udp_tunnel_completion { __le16 udp_port; u8 filter_entry_index; u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 u8 total_filters; u8 reserved[11]; }; @@ -2023,23 +2019,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); /* remove UDP Tunnel command (0x0B01) */ struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 pf_filters; - u8 total_filters; - u8 reserved2[11]; + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved; - u8 tunnels_free; - u8 reserved1[9]; + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); @@ -2068,11 +2060,11 @@ struct i40e_aqc_tunnel_key_structure { u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 /* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 @@ -2085,21 +2077,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - u8 param_value2[8]; + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + u8 param_value2[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); @@ -2111,18 +2103,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); /* set test more (0xFF01, internal) */ struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); @@ -2175,21 +2167,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); #define I40E_AQ_CLUSTER_ID_ALTRAM 11 struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 952560551964..28c40c57d4f5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -50,6 +50,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index f6dcf9dd9290..c7f29626eada 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -30,10 +30,7 @@ /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ -#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ -#define I40E_MAX_IRATE 0x03F -#define I40E_MIN_IRATE 0x001 -#define I40E_IRATE_USEC_RESOLUTION 4 +#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 #define I40E_ITR_20K 0x0019 #define I40E_ITR_8K 0x003E diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 15376436cead..68aec11f6523 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -43,6 +43,7 @@ #define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -259,8 +260,7 @@ enum i40e_aq_resource_access_type { }; struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */ - u64 hw_semaphore_wait; /* - || - */ + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ u32 timeout; /* [ms] */ u16 sr_size; /* Shadow RAM size in words */ bool blank_nvm_mode; /* is NVM empty (no FW present)*/ @@ -475,6 +475,11 @@ struct i40e_hw { u32 debug_mask; }; +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return hw->mac.type == I40E_MAC_VF; +} + struct i40e_driver_version { u8 major_version; u8 minor_version; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index cd18d5689006..e0c8208138f4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -79,6 +79,7 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_VIRTCHNL_OP_GET_STATS, I40E_VIRTCHNL_OP_FCOE, + I40E_VIRTCHNL_OP_CONFIG_RSS, /* PF sends status change events to vfs using * the following op. */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 30ef519d4b91..981224743c73 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -191,6 +191,7 @@ struct i40evf_adapter { struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; struct list_head vlan_filter_list; char misc_vector_name[IFNAMSIZ + 9]; + int num_active_queues; /* TX */ struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; @@ -243,7 +244,7 @@ struct i40evf_adapter { struct i40e_hw hw; /* defined in i40e_type.h */ enum i40evf_state_t state; - volatile unsigned long crit_section; + unsigned long crit_section; struct work_struct watchdog_task; bool netdev_registered; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index efee6b290c0f..69b97bac182c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -58,8 +58,8 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { #define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) #define I40EVF_QUEUE_STATS_LEN(_dev) \ - (((struct i40evf_adapter *) \ - netdev_priv(_dev))->vsi_res->num_queue_pairs \ + (((struct i40evf_adapter *)\ + netdev_priv(_dev))->num_active_queues \ * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) #define I40EVF_STATS_LEN(_dev) \ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) @@ -121,11 +121,11 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; data[i] = *(u64 *)p; } - for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) { + for (j = 0; j < adapter->num_active_queues; j++) { data[i++] = adapter->tx_rings[j]->stats.packets; data[i++] = adapter->tx_rings[j]->stats.bytes; } - for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) { + for (j = 0; j < adapter->num_active_queues; j++) { data[i++] = adapter->rx_rings[j]->stats.packets; data[i++] = adapter->rx_rings[j]->stats.bytes; } @@ -151,13 +151,13 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); p += ETH_GSTRING_LEN; } - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); @@ -175,6 +175,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) static u32 i40evf_get_msglevel(struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } @@ -189,6 +190,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev) static void i40evf_set_msglevel(struct net_device *netdev, u32 data) { struct i40evf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -219,7 +221,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev, * but the number of rings is not reported. **/ static void i40evf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct i40evf_adapter *adapter = netdev_priv(netdev); @@ -280,7 +282,7 @@ static int i40evf_set_ringparam(struct net_device *netdev, * this functionality. **/ static int i40evf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_vsi *vsi = &adapter->vsi; @@ -308,7 +310,7 @@ static int i40evf_get_coalesce(struct net_device *netdev, * Change current coalescing settings. **/ static int i40evf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; @@ -430,7 +432,7 @@ static int i40evf_get_rxnfc(struct net_device *netdev, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = adapter->vsi_res->num_queue_pairs; + cmd->data = adapter->num_active_queues; ret = 0; break; case ETHTOOL_GRXFH: @@ -598,12 +600,12 @@ static void i40evf_get_channels(struct net_device *netdev, struct i40evf_adapter *adapter = netdev_priv(netdev); /* Report maximum channels */ - ch->max_combined = adapter->vsi_res->num_queue_pairs; + ch->max_combined = adapter->num_active_queues; ch->max_other = NONQ_VECS; ch->other_count = NONQ_VECS; - ch->combined_count = adapter->vsi_res->num_queue_pairs; + ch->combined_count = adapter->num_active_queues; } /** @@ -621,17 +623,23 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) * i40evf_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table - * @key: hash key (will be %NULL until get_rxfh_key_size is implemented) + * @key: hash key * * Reads the indirection table directly from the hardware. Always returns 0. **/ -static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; u32 hlut_val; int i, j; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); indir[j++] = hlut_val & 0xff; @@ -646,19 +654,26 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) * i40evf_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table - * @key: hash key (will be %NULL until get_rxfh_key_size is implemented) + * @key: hash key * * Returns -EINVAL if the table specifies an inavlid queue id, otherwise * returns 0 after programming the table. **/ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; u32 hlut_val; int i, j; + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { hlut_val = indir[j++]; hlut_val |= indir[j++] << 8; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index c51bc7a33bc5..cabaf599f562 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.0.5" +#define DRV_VERSION "1.0.6" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -185,6 +185,7 @@ static void i40evf_tx_timeout(struct net_device *netdev) static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; + wr32(hw, I40E_VFINT_DYN_CTL01, 0); /* read flush */ @@ -200,6 +201,7 @@ static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; + wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK); @@ -226,7 +228,6 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter) } /* read flush */ rd32(hw, I40E_VFGEN_RSTAT); - } /** @@ -253,8 +254,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) * @adapter: board private structure * @mask: bitmap of vectors to trigger **/ -static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, - u32 mask) +static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) { struct i40e_hw *hw = &adapter->hw; int i; @@ -397,8 +397,8 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) int q_vectors; int v_start = 0; int rxr_idx = 0, txr_idx = 0; - int rxr_remaining = adapter->vsi_res->num_queue_pairs; - int txr_remaining = adapter->vsi_res->num_queue_pairs; + int rxr_remaining = adapter->num_active_queues; + int txr_remaining = adapter->num_active_queues; int i, j; int rqpv, tqpv; int err = 0; @@ -551,6 +551,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) { int i; int q_vectors; + q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (i = 0; i < q_vectors; i++) { @@ -584,7 +585,8 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + + for (i = 0; i < adapter->num_active_queues; i++) adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); } @@ -629,7 +631,7 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) rx_buf_len = ALIGN(max_frame, 1024); } - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i); adapter->rx_rings[i]->rx_buf_len = rx_buf_len; } @@ -667,9 +669,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) struct i40evf_vlan_filter *f; f = i40evf_find_vlan(adapter, vlan); - if (NULL == f) { + if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (NULL == f) + if (!f) return NULL; f->vlan = vlan; @@ -705,7 +707,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) * @vid: VLAN tag **/ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) { struct i40evf_adapter *adapter = netdev_priv(netdev); @@ -720,7 +722,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, * @vid: VLAN tag **/ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) { struct i40evf_adapter *adapter = netdev_priv(netdev); @@ -772,9 +774,9 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, udelay(1); f = i40evf_find_filter(adapter, macaddr); - if (NULL == f) { + if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (NULL == f) { + if (!f) { clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); return NULL; @@ -881,6 +883,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { struct napi_struct *napi; + q_vector = adapter->q_vector[q_idx]; napi = &q_vector->napi; napi_enable(napi); @@ -918,8 +921,9 @@ static void i40evf_configure(struct i40evf_adapter *adapter) i40evf_configure_rx(adapter); adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = adapter->rx_rings[i]; + i40evf_alloc_rx_buffers(ring, ring->count); ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); @@ -950,7 +954,7 @@ static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) i40evf_clean_rx_ring(adapter->rx_rings[i]); } @@ -962,7 +966,7 @@ static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) i40evf_clean_tx_ring(adapter->tx_rings[i]); } @@ -1064,7 +1068,7 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter) if (!adapter->vsi_res) return; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { if (adapter->tx_rings[i]) kfree_rcu(adapter->tx_rings[i], rcu); adapter->tx_rings[i] = NULL; @@ -1084,11 +1088,11 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *tx_ring; struct i40e_ring *rx_ring; - tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); + tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL); if (!tx_ring) goto err_out; @@ -1130,7 +1134,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) err = -EIO; goto out; } - pairs = adapter->vsi_res->num_queue_pairs; + pairs = adapter->num_active_queues; /* It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors @@ -1172,14 +1176,14 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); if (!q_vector) goto err_out; q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, - i40evf_napi_poll, NAPI_POLL_WEIGHT); + i40evf_napi_poll, NAPI_POLL_WEIGHT); adapter->q_vector[q_idx] = q_vector; } @@ -1210,7 +1214,7 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) int napi_vectors; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; - napi_vectors = adapter->vsi_res->num_queue_pairs; + napi_vectors = adapter->num_active_queues; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct i40e_q_vector *q_vector = adapter->q_vector[q_idx]; @@ -1265,8 +1269,8 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) } dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", - (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" : - "Disabled", adapter->vsi_res->num_queue_pairs); + (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", + adapter->num_active_queues); return 0; err_alloc_queues: @@ -1284,6 +1288,7 @@ err_set_interrupt: static void i40evf_watchdog_timer(unsigned long data) { struct i40evf_adapter *adapter = (struct i40evf_adapter *)data; + schedule_work(&adapter->watchdog_task); /* timer will be rescheduled in watchdog task */ } @@ -1295,8 +1300,8 @@ static void i40evf_watchdog_timer(unsigned long data) static void i40evf_watchdog_task(struct work_struct *work) { struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, - watchdog_task); + struct i40evf_adapter, + watchdog_task); struct i40e_hw *hw = &adapter->hw; uint32_t rstat_val; @@ -1334,7 +1339,7 @@ static void i40evf_watchdog_task(struct work_struct *work) /* check for reset */ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + I40E_VFGEN_RSTAT_VFR_STATE_MASK; if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && (rstat_val != I40E_VFR_VFACTIVE) && (rstat_val != I40E_VFR_COMPLETED)) { @@ -1425,7 +1430,7 @@ static int next_queue(struct i40evf_adapter *adapter, int j) { j += 1; - return j >= adapter->vsi_res->num_queue_pairs ? 0 : j; + return j >= adapter->num_active_queues ? 0 : j; } /** @@ -1434,23 +1439,23 @@ static int next_queue(struct i40evf_adapter *adapter, int j) **/ static void i40evf_configure_rss(struct i40evf_adapter *adapter) { + u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1]; struct i40e_hw *hw = &adapter->hw; u32 lut = 0; int i, j; u64 hena; - /* Set of random keys generated using kernel random number generator */ - static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = { - 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127, - 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0, - 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e, - 0x4954b126 }; + /* No RSS for single queue. */ + if (adapter->num_active_queues == 1) { + wr32(hw, I40E_VFQF_HENA(0), 0); + wr32(hw, I40E_VFQF_HENA(1), 0); + return; + } /* Hash type is configured by the PF - we just supply the key */ - - /* Fill out hash function seed */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), seed[i]); + wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]); /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ hena = I40E_DEFAULT_RSS_HENA; @@ -1458,7 +1463,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ - j = adapter->vsi_res->num_queue_pairs; + j = adapter->num_active_queues; for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { j = next_queue(adapter, j); lut = j; @@ -1494,7 +1499,7 @@ static void i40evf_reset_task(struct work_struct *work) while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) - udelay(500); + usleep_range(500, 1000); if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); @@ -1508,8 +1513,7 @@ static void i40evf_reset_task(struct work_struct *work) if ((rstat_val != I40E_VFR_VFACTIVE) && (rstat_val != I40E_VFR_COMPLETED)) break; - else - msleep(I40EVF_RESET_WAIT_MS); + msleep(I40EVF_RESET_WAIT_MS); } if (i == I40EVF_RESET_WAIT_COUNT) { adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; @@ -1523,8 +1527,7 @@ static void i40evf_reset_task(struct work_struct *work) if ((rstat_val == I40E_VFR_VFACTIVE) || (rstat_val == I40E_VFR_COMPLETED)) break; - else - msleep(I40EVF_RESET_WAIT_MS); + msleep(I40EVF_RESET_WAIT_MS); } if (i == I40EVF_RESET_WAIT_COUNT) { struct i40evf_mac_filter *f, *ftmp; @@ -1575,12 +1578,12 @@ continue_reset: /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) dev_warn(&adapter->pdev->dev, - "%s: Failed to destroy the Admin Queue resources\n", - __func__); + "%s: Failed to destroy the Admin Queue resources\n", + __func__); err = i40evf_init_adminq(hw); if (err) dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", - __func__, err); + __func__, err); adapter->aq_pending = 0; adapter->aq_required = 0; @@ -1632,8 +1635,8 @@ static void i40evf_adminq_task(struct work_struct *work) if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) return; - event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return; @@ -1645,13 +1648,9 @@ static void i40evf_adminq_task(struct work_struct *work) i40evf_virtchnl_completion(adapter, v_msg->v_opcode, v_msg->v_retval, event.msg_buf, - event.msg_size); - if (pending != 0) { - dev_info(&adapter->pdev->dev, - "%s: ARQ: Pending events %d\n", - __func__, pending); + event.msg_len); + if (pending != 0) memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); - } } while (pending); /* check for error indications */ @@ -1705,10 +1704,9 @@ static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) if (adapter->tx_rings[i]->desc) i40evf_free_tx_resources(adapter->tx_rings[i]); - } /** @@ -1725,7 +1723,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) { int i, err = 0; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { adapter->tx_rings[i]->count = adapter->tx_desc_count; err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); if (!err) @@ -1753,7 +1751,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) { int i, err = 0; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i]->count = adapter->rx_desc_count; err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); if (!err) @@ -1776,7 +1774,7 @@ static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) if (adapter->rx_rings[i]->desc) i40evf_free_rx_resources(adapter->rx_rings[i]); } @@ -1980,7 +1978,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) if ((rstat == I40E_VFR_VFACTIVE) || (rstat == I40E_VFR_COMPLETED)) return 0; - udelay(10); + usleep_range(10, 20); } return -EBUSY; } @@ -2022,7 +2020,7 @@ static void i40evf_init_task(struct work_struct *work) err = i40evf_check_reset_complete(hw); if (err) { dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", - err); + err); goto err; } hw->aq.num_arq_entries = I40EVF_AQ_LEN; @@ -2047,6 +2045,8 @@ static void i40evf_init_task(struct work_struct *work) case __I40EVF_INIT_VERSION_CHECK: if (!i40evf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); + i40evf_shutdown_adminq(hw); + adapter->state = __I40EVF_STARTUP; goto err; } @@ -2054,7 +2054,7 @@ static void i40evf_init_task(struct work_struct *work) err = i40evf_verify_api_ver(adapter); if (err) { dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n", - err); + err); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { dev_info(&pdev->dev, "Resending request\n"); err = i40evf_send_api_ver(adapter); @@ -2080,8 +2080,11 @@ static void i40evf_init_task(struct work_struct *work) goto err; } err = i40evf_get_vf_config(adapter); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto restart; + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { + dev_info(&pdev->dev, "Resending VF config request\n"); + err = i40evf_send_vf_config_msg(adapter); + goto err; + } if (err) { dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); @@ -2138,7 +2141,7 @@ static void i40evf_init_task(struct work_struct *work) ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (NULL == f) + if (!f) goto err_sw_init; ether_addr_copy(f->macaddr, adapter->hw.mac.addr); @@ -2152,6 +2155,9 @@ static void i40evf_init_task(struct work_struct *work) adapter->watchdog_timer.data = (unsigned long)adapter; mod_timer(&adapter->watchdog_timer, jiffies + 1); + adapter->num_active_queues = min_t(int, + adapter->vsi_res->num_queue_pairs, + (int)(num_online_cpus())); adapter->tx_desc_count = I40EVF_DEFAULT_TXD; adapter->rx_desc_count = I40EVF_DEFAULT_RXD; err = i40evf_init_interrupt_scheme(adapter); @@ -2500,8 +2506,9 @@ static struct pci_driver i40evf_driver = { static int __init i40evf_init_module(void) { int ret; + pr_info("i40evf: %s - version %s\n", i40evf_driver_string, - i40evf_driver_version); + i40evf_driver_version); pr_info("%s\n", i40evf_copyright); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 66d12f5b4ca8..5fde5a7f4591 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -89,27 +89,37 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) struct i40e_virtchnl_version_info *pf_vvi; struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; + enum i40e_virtchnl_ops op; i40e_status err; - event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; goto out; } - err = i40evf_clean_arq_element(hw, &event, NULL); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto out_alloc; + while (1) { + err = i40evf_clean_arq_element(hw, &event, NULL); + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + if (err) + goto out_alloc; + op = + (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == I40E_VIRTCHNL_OP_VERSION) + break; + } + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); if (err) goto out_alloc; - if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != - I40E_VIRTCHNL_OP_VERSION) { + if (op != I40E_VIRTCHNL_OP_VERSION) { dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", - le32_to_cpu(event.desc.cookie_high)); + op); err = -EIO; goto out_alloc; } @@ -153,42 +163,34 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; - u16 len; + enum i40e_virtchnl_ops op; i40e_status err; + u16 len; len = sizeof(struct i40e_virtchnl_vf_resource) + I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); - event.msg_size = len; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = len; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; goto out; } - err = i40evf_clean_arq_element(hw, &event, NULL); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto out_alloc; - - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); - if (err) { - dev_err(&adapter->pdev->dev, - "%s: Error returned from PF, %d, %d\n", __func__, - le32_to_cpu(event.desc.cookie_high), - le32_to_cpu(event.desc.cookie_low)); - err = -EIO; - goto out_alloc; + while (1) { + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + err = i40evf_clean_arq_element(hw, &event, NULL); + if (err) + goto out_alloc; + op = + (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES) + break; } - if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != - I40E_VIRTCHNL_OP_GET_VF_RESOURCES) { - dev_err(&adapter->pdev->dev, - "%s: Invalid response from PF, %d, %d\n", __func__, - le32_to_cpu(event.desc.cookie_high), - le32_to_cpu(event.desc.cookie_low)); - err = -EIO; - goto out_alloc; - } - memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len)); + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); i40e_vf_parse_hw_config(hw, adapter->vf_res); out_alloc: @@ -207,7 +209,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) { struct i40e_virtchnl_vsi_queue_config_info *vqci; struct i40e_virtchnl_queue_pair_info *vqpi; - int pairs = adapter->vsi_res->num_queue_pairs; + int pairs = adapter->num_active_queues; int i, len; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { @@ -273,7 +275,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; + vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; @@ -299,7 +301,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; + vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; @@ -393,7 +395,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -454,7 +456,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -516,7 +518,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -576,7 +578,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -635,6 +637,7 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) void i40evf_request_stats(struct i40evf_adapter *adapter) { struct i40e_virtchnl_queue_select vqs; + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* no error message, this isn't crucial */ return; @@ -709,19 +712,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, "%s: Unknown event %d from pf\n", __func__, vpe->event); break; - } return; } - if (v_opcode != adapter->current_op) { - dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n", - __func__, adapter->current_op, v_opcode); - /* We're probably completely screwed at this point, but clear - * the current op and try to carry on.... - */ - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; - return; - } + if (v_opcode != adapter->current_op) + dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n", + adapter->current_op, v_opcode); if (v_retval) { dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", __func__, v_retval, v_opcode); @@ -773,8 +769,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); break; default: - dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n", - __func__, v_opcode); + dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n", + v_opcode); break; } /* switch v_opcode */ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 02cfd3b14762..d5673eb90c54 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2842,11 +2842,16 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev) return IGB_RETA_SIZE; } -static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) { struct igb_adapter *adapter = netdev_priv(netdev); int i; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; for (i = 0; i < IGB_RETA_SIZE; i++) indir[i] = adapter->rss_indir_tbl[i]; @@ -2889,13 +2894,20 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter) } static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int i; u32 num_queues; + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + num_queues = adapter->rss_queues; switch (hw->mac.type) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a21b14495ebd..f04ad13f7159 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1012,7 +1012,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) /* igb_get_stats64() might access the rings on this vector, * we must wait a grace period before freeing it. */ - kfree_rcu(q_vector, rcu); + if (q_vector) + kfree_rcu(q_vector, rcu); } /** @@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter) adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; for (i = 0; i < adapter->num_q_vectors; i++) { - napi_synchronize(&(adapter->q_vector[i]->napi)); - napi_disable(&(adapter->q_vector[i]->napi)); + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } } @@ -3372,14 +3375,11 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; u32 j, num_rx_queues; - static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, - 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, - 0xA32DCB77, 0x0CF23080, 0x3BB7426A, - 0xFA01ACBE }; + u32 rss_key[10]; - /* Fill out hash function seeds */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (j = 0; j < 10; j++) - wr32(E1000_RSSRK(j), rsskey[j]); + wr32(E1000_RSSRK(j), rss_key[j]); num_rx_queues = adapter->rss_queues; @@ -3717,7 +3717,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - igb_free_tx_resources(adapter->tx_ring[i]); + if (adapter->tx_ring[i]) + igb_free_tx_resources(adapter->tx_ring[i]); } void igb_unmap_and_free_tx_resource(struct igb_ring *ring, @@ -3782,7 +3783,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - igb_clean_tx_ring(adapter->tx_ring[i]); + if (adapter->tx_ring[i]) + igb_clean_tx_ring(adapter->tx_ring[i]); } /** @@ -3819,7 +3821,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_rx_queues; i++) - igb_free_rx_resources(adapter->rx_ring[i]); + if (adapter->rx_ring[i]) + igb_free_rx_resources(adapter->rx_ring[i]); } /** @@ -3874,7 +3877,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_rx_queues; i++) - igb_clean_rx_ring(adapter->rx_ring[i]); + if (adapter->rx_ring[i]) + igb_clean_rx_ring(adapter->rx_ring[i]); } /** @@ -5087,12 +5091,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, /* The minimum packet size with TCTL.PSP set is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) - return NETDEV_TX_OK; - skb->len = 17; - skb_set_tail_pointer(skb, 17); - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); } @@ -6537,6 +6537,9 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, if (unlikely(page_to_nid(page) != numa_node_id())) return false; + if (unlikely(page->pfmemalloc)) + return false; + #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely(page_count(page) != 1)) @@ -6603,7 +6606,8 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* we can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(page) == numa_node_id())) + if (likely((page_to_nid(page) == numa_node_id()) && + !page->pfmemalloc)) return true; /* this page cannot be reused so discard it */ @@ -6842,14 +6846,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, if (skb_is_nonlinear(skb)) igb_pull_tail(rx_ring, rx_desc, skb); - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; - - if (skb_pad(skb, pad_len)) - return true; - __skb_put(skb, pad_len); - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; return false; } @@ -6984,7 +6983,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; /* alloc new page for storage */ - page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL); + page = dev_alloc_page(); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; return false; @@ -7400,6 +7399,8 @@ static int igb_resume(struct device *dev) pci_restore_state(pdev); pci_save_state(pdev); + if (!pci_device_is_present(pdev)) + return -ENODEV; err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index be2989e60009..35e6fa643c7e 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5032a602d5c9..b6137be43920 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -300,16 +300,17 @@ enum ixgbe_ring_f_enum { RING_F_ARRAY_SIZE /* must be last in enum set */ }; -#define IXGBE_MAX_RSS_INDICES 16 -#define IXGBE_MAX_VMDQ_INDICES 64 -#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ -#define IXGBE_MAX_FCOE_INDICES 8 -#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define IXGBE_MAX_L2A_QUEUES 4 -#define IXGBE_BAD_L2A_QUEUE 3 -#define IXGBE_MAX_MACVLANS 31 -#define IXGBE_MAX_DCBMACVLANS 8 +#define IXGBE_MAX_RSS_INDICES 16 +#define IXGBE_MAX_RSS_INDICES_X550 64 +#define IXGBE_MAX_VMDQ_INDICES 64 +#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ +#define IXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define IXGBE_MAX_L2A_QUEUES 4 +#define IXGBE_BAD_L2A_QUEUE 3 +#define IXGBE_MAX_MACVLANS 31 +#define IXGBE_MAX_DCBMACVLANS 8 struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ @@ -553,11 +554,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } -static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value) -{ - writel(value, ring->tail); -} - #define IXGBE_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) #define IXGBE_TX_DESC(R, i) \ @@ -769,6 +765,21 @@ struct ixgbe_adapter { unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ }; +static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + return IXGBE_MAX_RSS_INDICES; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + return IXGBE_MAX_RSS_INDICES_X550; + default: + return 0; + } +} + struct ixgbe_fdir_filter { struct hlist_node fdir_node; union ixgbe_atr_input filter; @@ -804,11 +815,15 @@ enum ixgbe_boards { board_82598, board_82599, board_X540, + board_X550, + board_X550EM_x, }; extern struct ixgbe_info ixgbe_82598_info; extern struct ixgbe_info ixgbe_82599_info; extern struct ixgbe_info ixgbe_X540_info; +extern struct ixgbe_info ixgbe_X550_info; +extern struct ixgbe_info ixgbe_X550EM_x_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index b5f484bf3fda..9c66babd4edd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1625,7 +1625,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum * @hw: pointer to hardware structure **/ -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) { u16 i; u16 j; @@ -1636,7 +1636,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) /* Include 0x0-0x3F in the checksum */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (hw->eeprom.ops.read(hw, i, &word) != 0) { + if (hw->eeprom.ops.read(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); break; } @@ -1645,24 +1645,35 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) /* Include all data from pointers except for the fw pointer */ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { - hw->eeprom.ops.read(hw, i, &pointer); + if (hw->eeprom.ops.read(hw, i, &pointer)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* If the pointer seems invalid */ + if (pointer == 0xFFFF || pointer == 0) + continue; + + if (hw->eeprom.ops.read(hw, pointer, &length)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } - /* Make sure the pointer seems valid */ - if (pointer != 0xFFFF && pointer != 0) { - hw->eeprom.ops.read(hw, pointer, &length); + if (length == 0xFFFF || length == 0) + continue; - if (length != 0xFFFF && length != 0) { - for (j = pointer+1; j <= pointer+length; j++) { - hw->eeprom.ops.read(hw, j, &word); - checksum += word; - } + for (j = pointer + 1; j <= pointer + length; j++) { + if (hw->eeprom.ops.read(hw, j, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; } + checksum += word; } } checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return checksum; + return (s32)checksum; } /** @@ -1686,26 +1697,33 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } - if (status == 0) { - checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; - hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + checksum = (u16)(status & 0xffff); - /* - * Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) - status = IXGBE_ERR_EEPROM_CHECKSUM; - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - } else { + status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) { hw_dbg(hw, "EEPROM read failed\n"); + return status; } + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + return status; } @@ -1724,15 +1742,19 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); - - if (status == 0) { - checksum = hw->eeprom.ops.calc_checksum(hw); - status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, - checksum); - } else { + if (status) { hw_dbg(hw, "EEPROM read failed\n"); + return status; } + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); + return status; } @@ -2469,7 +2491,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) * Acquires the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr = 0; u32 swmask = mask; @@ -2514,7 +2536,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) * Releases the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr; u32 swmask = mask; @@ -2799,6 +2821,8 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; @@ -3192,17 +3216,27 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *link_up = false; } - if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_10G_82599) - *speed = IXGBE_LINK_SPEED_10GB_FULL; - else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_1G_82599) + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + if ((hw->mac.type >= ixgbe_mac_X550) && + (links_reg & IXGBE_LINKS_SPEED_NON_STD)) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; - else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_100_82599) - *speed = IXGBE_LINK_SPEED_100_FULL; - else + break; + case IXGBE_LINKS_SPEED_100_82599: + if ((hw->mac.type >= ixgbe_mac_X550) && + (links_reg & IXGBE_LINKS_SPEED_NON_STD)) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + else + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + default: *speed = IXGBE_LINK_SPEED_UNKNOWN; + } return 0; } @@ -3434,23 +3468,34 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * @buffer: contains the command to write and where the return status will * be placed * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. * * Communicates with the manageability block. On success return 0 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. **/ -static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, - u32 length) +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, + bool return_data) { - u32 hicr, i, bi; + u32 hicr, i, bi, fwsts; u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - u8 buf_len, dword_len; + u16 buf_len, dword_len; - if (length == 0 || length & 0x3 || - length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - hw_dbg(hw, "Buffer length failure.\n"); + if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Set bit 9 of FWSTS clearing FW reset indication */ + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); + IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); + /* Check that the host interface is enabled. */ hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if ((hicr & IXGBE_HICR_EN) == 0) { @@ -3458,7 +3503,12 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, return IXGBE_ERR_HOST_INTERFACE_COMMAND; } - /* Calculate length in DWORDs */ + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + hw_dbg(hw, "Buffer length failure, not aligned to dword"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + dword_len = length >> 2; /* @@ -3472,7 +3522,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); - for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { + for (i = 0; i < timeout; i++) { hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_C)) break; @@ -3480,12 +3530,15 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, } /* Check command successful completion. */ - if (i == IXGBE_HI_COMMAND_TIMEOUT || + if ((timeout != 0 && i == timeout) || (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { hw_dbg(hw, "Command has failed with no status valid.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + if (!return_data) + return 0; + /* Calculate length in DWORDs */ dword_len = hdr_size >> 2; @@ -3556,7 +3609,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, - sizeof(fw_cmd)); + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); if (ret_val != 0) continue; @@ -3583,7 +3638,8 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, **/ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) { - u32 gcr_ext, hlreg0; + u32 gcr_ext, hlreg0, i, poll; + u16 value; /* * If double reset is not requested then all transactions should @@ -3600,6 +3656,23 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); + /* wait for a last completion before clearing buffers */ + IXGBE_WRITE_FLUSH(hw); + usleep_range(3000, 6000); + + /* Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + usleep_range(100, 200); + value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); + if (ixgbe_removed(hw->hw_addr)) + break; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + break; + } + /* initiate cleaning flow for buffers in the PCIe transaction layer */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 2ae5d4b8fc93..8cfadcb2676e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -64,7 +64,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val); s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); @@ -84,8 +84,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); @@ -110,6 +110,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 48f35fc963f8..a507a6fe3624 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -286,6 +286,8 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, bwgid, ptype); case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, bwgid, ptype, prio_tc); default: @@ -302,6 +304,8 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) return ixgbe_dcb_config_pfc_82598(hw, pfc_en); case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); default: break; @@ -357,6 +361,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, @@ -385,6 +391,8 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_dcb_read_rtrup2tc_82599(hw, map); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 58a7f5312a96..2707bda37418 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -180,6 +180,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: for (j = 0; j < netdev->addr_len; j++, i++) perm_addr[i] = adapter->hw.mac.san_addr[j]; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 3ce4a258f945..e5be0dd508de 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -342,12 +342,16 @@ static int ixgbe_set_settings(struct net_device *netdev, if (old == advertised) return err; /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + hw->mac.autotry_restart = true; err = hw->mac.ops.setup_link(hw, advertised, true); if (err) { e_info(probe, "setup link failed with code %d\n", err); hw->mac.ops.setup_link(hw, old, true); } + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); } else { /* in this case we currently only support 10Gb/FULL */ u32 speed = ethtool_cmd_speed(ecmd); @@ -507,6 +511,8 @@ static void ixgbe_get_regs(struct net_device *netdev, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); break; @@ -618,6 +624,8 @@ static void ixgbe_get_regs(struct net_device *netdev, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); for (i = 0; i < 8; i++) @@ -1402,6 +1410,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: toggle = 0x7FFFF30F; test = reg_test_82599; break; @@ -1640,6 +1650,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_ctl &= ~IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); @@ -1676,6 +1688,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -1729,12 +1743,16 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); - /* X540 needs to set the MACC.FLU bit to force link up */ - if (adapter->hw.mac.type == ixgbe_mac_X540) { + /* X540 and X550 needs to set the MACC.FLU bit to force link up */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); - } else { + break; + default: if (hw->mac.orig_autoc) { reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); @@ -2772,7 +2790,14 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, /* if we changed something we need to update flags */ if (flags2 != adapter->flags2) { struct ixgbe_hw *hw = &adapter->hw; - u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + u32 mrqc; + unsigned int pf_pool = adapter->num_vfs; + + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool)); + else + mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); if ((flags2 & UDP_RSS_FLAGS) && !(adapter->flags2 & UDP_RSS_FLAGS)) @@ -2795,7 +2820,11 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc); + else + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } return 0; @@ -2829,6 +2858,8 @@ static int ixgbe_get_ts_info(struct net_device *dev, struct ixgbe_adapter *adapter = netdev_priv(dev); switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = @@ -2896,7 +2927,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) max_combined = IXGBE_MAX_FDIR_INDICES; } else { /* support up to 16 queues with RSS */ - max_combined = IXGBE_MAX_RSS_INDICES; + max_combined = ixgbe_max_rss_indices(adapter); } return max_combined; @@ -2944,6 +2975,7 @@ static int ixgbe_set_channels(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); unsigned int count = ch->combined_count; + u8 max_rss_indices = ixgbe_max_rss_indices(adapter); /* verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) @@ -2960,9 +2992,9 @@ static int ixgbe_set_channels(struct net_device *dev, /* update feature limits from largest to smallest supported values */ adapter->ring_feature[RING_F_FDIR].limit = count; - /* cap RSS limit at 16 */ - if (count > IXGBE_MAX_RSS_INDICES) - count = IXGBE_MAX_RSS_INDICES; + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; adapter->ring_feature[RING_F_RSS].limit = count; #ifdef IXGBE_FCOE diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index ce40c77381e9..68e1e757ecef 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -126,6 +126,8 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index fec5212d4337..fbd52924ee34 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -42,6 +42,7 @@ #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> +#include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if.h> #include <linux/if_vlan.h> @@ -50,6 +51,15 @@ #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> +#ifdef CONFIG_OF +#include <linux/of_net.h> +#endif + +#ifdef CONFIG_SPARC +#include <asm/idprom.h> +#include <asm/prom.h> +#endif + #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" @@ -65,15 +75,17 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "3.19.1-k" +#define DRV_VERSION "4.0.1-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2014 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { - [board_82598] = &ixgbe_82598_info, - [board_82599] = &ixgbe_82599_info, - [board_X540] = &ixgbe_X540_info, + [board_82598] = &ixgbe_82598_info, + [board_82599] = &ixgbe_82599_info, + [board_X540] = &ixgbe_X540_info, + [board_X550] = &ixgbe_X550_info, + [board_X550EM_x] = &ixgbe_X550EM_x_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -115,6 +127,9 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, /* required last entry */ {0, } }; @@ -835,6 +850,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -871,6 +888,8 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -1412,41 +1431,21 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, skb->ip_summed = CHECKSUM_UNNECESSARY; } -static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) -{ - rx_ring->next_to_use = val; - - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = val; - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - ixgbe_write_tail(rx_ring, val); -} - static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *bi) { struct page *page = bi->page; - dma_addr_t dma = bi->dma; + dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ - if (likely(dma)) + if (likely(page)) return true; /* alloc new page for storage */ - if (likely(!page)) { - page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, - bi->skb, ixgbe_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->rx_stats.alloc_rx_page_failed++; - return false; - } - bi->page = page; + page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; } /* map page for use */ @@ -1459,13 +1458,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, */ if (dma_mapping_error(rx_ring->dev, dma)) { __free_pages(page, ixgbe_rx_pg_order(rx_ring)); - bi->page = NULL; rx_ring->rx_stats.alloc_rx_page_failed++; return false; } bi->dma = dma; + bi->page = page; bi->page_offset = 0; return true; @@ -1509,16 +1508,28 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) i -= rx_ring->count; } - /* clear the hdr_addr for the next_to_use descriptor */ - rx_desc->read.hdr_addr = 0; + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; cleaned_count--; } while (cleaned_count); i += rx_ring->count; - if (rx_ring->next_to_use != i) - ixgbe_release_rx_desc(rx_ring, i); + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } } static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, @@ -1763,14 +1774,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, return false; #endif - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; - - if (skb_pad(skb, pad_len)) - return true; - __skb_put(skb, pad_len); - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; return false; } @@ -1795,9 +1801,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - new_buff->page = old_buff->page; - new_buff->dma = old_buff->dma; - new_buff->page_offset = old_buff->page_offset; + *new_buff = *old_buff; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, @@ -1806,6 +1810,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, DMA_FROM_DEVICE); } +static inline bool ixgbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + /** * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on @@ -1841,12 +1850,12 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - /* we can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(page) == numa_node_id())) + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!ixgbe_page_is_reserved(page))) return true; /* this page cannot be reused so discard it */ - put_page(page); + __free_pages(page, ixgbe_rx_pg_order(rx_ring)); return false; } @@ -1854,7 +1863,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, rx_buffer->page_offset, size, truesize); /* avoid re-using remote pages */ - if (unlikely(page_to_nid(page) != numa_node_id())) + if (unlikely(ixgbe_page_is_reserved(page))) return false; #if (PAGE_SIZE < 8192) @@ -1864,22 +1873,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, /* flip page offset to other buffer */ rx_buffer->page_offset ^= truesize; - - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - atomic_inc(&page->_count); #else /* move offset up to the next cache line */ rx_buffer->page_offset += truesize; if (rx_buffer->page_offset > last_offset) return false; - - /* bump ref count on page before it is given to the stack */ - get_page(page); #endif + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + return true; } @@ -1942,6 +1948,8 @@ dma_sync: rx_buffer->page_offset, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); + + rx_buffer->skb = NULL; } /* pull page into skb */ @@ -1959,8 +1967,6 @@ dma_sync: } /* clear contents of buffer_info */ - rx_buffer->skb = NULL; - rx_buffer->dma = 0; rx_buffer->page = NULL; return skb; @@ -2155,6 +2161,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: @@ -2264,6 +2272,8 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt @@ -2467,6 +2477,8 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -2493,6 +2505,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); @@ -2525,6 +2539,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, mask |= IXGBE_EIMS_GPI_SDP0; break; case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask |= IXGBE_EIMS_TS; break; default: @@ -2536,7 +2552,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_GPI_SDP1; mask |= IXGBE_EIMS_GPI_SDP2; + /* fall through */ case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_MAILBOX; break; @@ -2544,9 +2563,6 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, break; } - if (adapter->hw.mac.type == ixgbe_mac_X540) - mask |= IXGBE_EIMS_TIMESYNC; - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) mask |= IXGBE_EIMS_FLOW_DIR; @@ -2592,6 +2608,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -2811,6 +2829,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_sfp_event(adapter, eicr); /* Fall through */ case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -2905,6 +2925,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -3190,16 +3212,14 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } -static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed) { struct ixgbe_hw *hw = &adapter->hw; - static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, - 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, - 0x6A3E67EA, 0x14364D17, 0x3BED200D}; - u32 mrqc = 0, reta = 0; - u32 rxcsum; + u32 reta = 0; int i, j; + int reta_entries = 128; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + int indices_multi; /* * Program table for at least 2 queues w/ SR-IOV so that VFs can @@ -3213,16 +3233,69 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); + /* Fill out the redirection table as follows: + * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices + * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index + * X550: 512 (8 bit wide) entries containing 6 bit RSS index + */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + indices_multi = 0x11; + else + indices_multi = 0x1; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + reta_entries = 512; + default: + break; + } + /* Fill out redirection table */ - for (i = 0, j = 0; i < 128; i++, j++) { + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + reta = (reta << 8) | (j * indices_multi); + if ((i & 3) == 3) { + if (i < 128) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + else + IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), + reta); + } + } +} + +static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vfreta = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + int i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]); + + /* Fill out the redirection table */ + for (i = 0, j = 0; i < 64; i++, j++) { if (j == rss_i) j = 0; - /* reta = 4-byte sliding window of - * 0x00..(indices-1)(indices-1)00..etc. */ - reta = (reta << 8) | (j * 0x11); + vfreta = (vfreta << 8) | j; if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), + vfreta); } +} + +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 mrqc = 0, rss_field = 0, vfmrqc = 0; + u32 rss_key[10]; + u32 rxcsum; /* Disable indicating checksum in descriptor, enables RSS hash */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); @@ -3255,17 +3328,35 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) } /* Perform hash on these packet types */ - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | - IXGBE_MRQC_RSS_FIELD_IPV4_TCP | - IXGBE_MRQC_RSS_FIELD_IPV6 | - IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | + IXGBE_MRQC_RSS_FIELD_IPV4_TCP | + IXGBE_MRQC_RSS_FIELD_IPV6 | + IXGBE_MRQC_RSS_FIELD_IPV6_TCP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + unsigned int pf_pool = adapter->num_vfs; + + /* Enable VF RSS mode */ + mrqc |= IXGBE_MRQC_MULTIPLE_RSS; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* Setup RSS through the VF registers */ + ixgbe_setup_vfreta(adapter, rss_key); + vfmrqc = IXGBE_MRQC_RSSEN; + vfmrqc |= rss_field; + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); + } else { + ixgbe_setup_reta(adapter, rss_key); + mrqc |= rss_field; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + } } /** @@ -3534,6 +3625,8 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: case ixgbe_mac_82598EB: /* * For VMDq support of different descriptor types or @@ -3657,6 +3750,8 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -3691,6 +3786,8 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -3936,8 +4033,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev) * if SR-IOV and VMDQ are disabled - otherwise ensure * that hardware VLAN filters remain enabled. */ - if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | - IXGBE_FLAG_SRIOV_ENABLED))) + if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED)) vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); } else { if (netdev->flags & IFF_ALLMULTI) { @@ -4112,6 +4209,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: dv_id = IXGBE_DV_X540(link, tc); break; default: @@ -4170,6 +4269,8 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: dv_id = IXGBE_LOW_DV_X540(tc); break; default: @@ -4308,29 +4409,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer; + struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; - rx_buffer = &rx_ring->rx_buffer_info[i]; if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; - if (IXGBE_CB(skb)->page_released) { + if (IXGBE_CB(skb)->page_released) dma_unmap_page(dev, IXGBE_CB(skb)->dma, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; - } dev_kfree_skb(skb); + rx_buffer->skb = NULL; } - rx_buffer->skb = NULL; - if (rx_buffer->dma) - dma_unmap_page(dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); - rx_buffer->dma = 0; - if (rx_buffer->page) - __free_pages(rx_buffer->page, - ixgbe_rx_pg_order(rx_ring)); + + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); + rx_buffer->page = NULL; } @@ -4606,6 +4704,8 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); @@ -4948,10 +5048,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } - /* Disable the Tx DMA engine on 82599 and X540 */ + /* Disable the Tx DMA engine on 82599 and later MAC */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -5016,7 +5118,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) hw->subsystem_device_id = pdev->subsystem_device; /* Set common capability flags and settings */ - rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); + rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; @@ -5071,6 +5173,12 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550: +#ifdef CONFIG_IXGBE_DCA + adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; +#endif + break; default: break; } @@ -5086,6 +5194,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCB switch (hw->mac.type) { case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; break; @@ -5675,6 +5785,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -5806,6 +5918,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -5819,7 +5933,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540)) { + (hw->mac.type == ixgbe_mac_X540) || + (hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x)) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); @@ -5842,7 +5958,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); break; case ixgbe_mac_X540: - /* OS2BMC stats are X540 only*/ + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* OS2BMC stats are X540 and later */ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); @@ -6110,6 +6228,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) } break; case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: case ixgbe_mac_82599EB: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); @@ -6221,6 +6341,10 @@ static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) if (!adapter->num_vfs) return false; + /* resetting the PF is only needed for MAC before X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + return false; + for (i = 0; i < adapter->num_vfs; i++) { for (j = 0; j < q_per_pool; j++) { u32 h, t; @@ -6256,6 +6380,66 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) } } +#ifdef CONFIG_PCI_IOV +static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, + struct pci_dev *vfdev) +{ + if (!pci_wait_for_pending_transaction(vfdev)) + e_dev_warn("Issuing VFLR with pending transactions\n"); + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + + msleep(100); +} + +static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *vfdev; + u32 gpc; + int pos; + unsigned short vf_id; + + if (!(netif_carrier_ok(adapter->netdev))) + return; + + gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); + if (gpc) /* If incrementing then no need for the check below */ + return; + /* Check to see if a bad DMA write target from an errant or + * malicious VF has caused a PCIe error. If so then we can + * issue a VFLR to the offending VF(s) and then resume without + * requesting a full slot reset. + */ + + if (!pdev) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + + /* get the device ID for the VF */ + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + /* check status reg for all VFs owned by this PF */ + vfdev = pci_get_device(pdev->vendor, vf_id, NULL); + while (vfdev) { + if (vfdev->is_virtfn && (vfdev->physfn == pdev)) { + u16 status_reg; + + pci_read_config_word(vfdev, PCI_STATUS, &status_reg); + if (status_reg & PCI_STATUS_REC_MASTER_ABORT) + /* issue VFLR */ + ixgbe_issue_vf_flr(adapter, vfdev); + } + + vfdev = pci_get_device(pdev->vendor, vf_id, vfdev); + } +} + static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) { u32 ssvpc; @@ -6276,6 +6460,17 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) e_warn(drv, "%u Spoofed packets detected\n", ssvpc); } +#else +static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) +{ +} + +static void +ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) +{ +} +#endif /* CONFIG_PCI_IOV */ + /** * ixgbe_watchdog_subtask - check and bring link up @@ -6296,6 +6491,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) else ixgbe_watchdog_link_is_down(adapter); + ixgbe_check_for_bad_vf(adapter); ixgbe_spoof_check(adapter); ixgbe_update_stats(adapter); @@ -6407,51 +6603,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); } -#ifdef CONFIG_PCI_IOV -static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) -{ - int vf; - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 gpc; - u32 ciaa, ciad; - - gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); - if (gpc) /* If incrementing then no need for the check below */ - return; - /* - * Check to see if a bad DMA write target from an errant or - * malicious VF has caused a PCIe error. If so then we can - * issue a VFLR to the offending VF(s) and then resume without - * requesting a full slot reset. - */ - - for (vf = 0; vf < adapter->num_vfs; vf++) { - ciaa = (vf << 16) | 0x80000000; - /* 32 bit read so align, we really want status at offset 6 */ - ciaa |= PCI_COMMAND; - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599); - ciaa &= 0x7FFFFFFF; - /* disable debug mode asap after reading data */ - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - /* Get the upper 16 bits which will be the PCI status reg */ - ciad >>= 16; - if (ciad & PCI_STATUS_REC_MASTER_ABORT) { - netdev_err(netdev, "VF %d Hung DMA\n", vf); - /* Issue VFLR */ - ciaa = (vf << 16) | 0x80000000; - ciaa |= 0xA8; - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - ciad = 0x00008000; /* VFLR */ - IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad); - ciaa &= 0x7FFFFFFF; - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - } - } -} - -#endif /** * ixgbe_service_timer - Timer Call-back * @data: pointer to adapter cast into an unsigned long @@ -6460,7 +6611,6 @@ static void ixgbe_service_timer(unsigned long data) { struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; unsigned long next_event_offset; - bool ready = true; /* poll faster when waiting for link */ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) @@ -6468,32 +6618,10 @@ static void ixgbe_service_timer(unsigned long data) else next_event_offset = HZ * 2; -#ifdef CONFIG_PCI_IOV - /* - * don't bother with SR-IOV VF DMA hang check if there are - * no VFs or the link is down - */ - if (!adapter->num_vfs || - (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) - goto normal_timer_service; - - /* If we have VFs allocated then we must check for DMA hangs */ - ixgbe_check_for_bad_vf(adapter); - next_event_offset = HZ / 50; - adapter->timer_event_accumulator++; - - if (adapter->timer_event_accumulator >= 100) - adapter->timer_event_accumulator = 0; - else - ready = false; - -normal_timer_service: -#endif /* Reset the timer */ mod_timer(&adapter->service_timer, next_event_offset + jiffies); - if (ready) - ixgbe_service_event_schedule(adapter); + ixgbe_service_event_schedule(adapter); } static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) @@ -6898,8 +7026,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { - /* notify HW of packet */ - ixgbe_write_tail(tx_ring, i); + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); } return; @@ -7197,12 +7329,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, * The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) - return NETDEV_TX_OK; - skb->len = 17; - skb_set_tail_pointer(skb, 17); - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; @@ -7646,7 +7774,7 @@ static int ixgbe_set_features(struct net_device *netdev, static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, + const unsigned char *addr, u16 vid, u16 flags) { /* guarantee we can provide a unique filter for the unicast address */ @@ -7655,7 +7783,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -ENOMEM; } - return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); } static int ixgbe_ndo_bridge_setlink(struct net_device *dev, @@ -7669,6 +7797,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EOPNOTSUPP; br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; @@ -7677,6 +7807,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + mode = nla_get_u16(attr); if (mode == BRIDGE_MODE_VEPA) { reg = 0; @@ -7711,7 +7844,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, else mode = BRIDGE_MODE_VEPA; - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); } static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) @@ -7960,6 +8093,29 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, } /** + * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM + * @adapter: Pointer to adapter struct + */ +static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_OF + struct device_node *dp = pci_device_to_OF_node(adapter->pdev); + struct ixgbe_hw *hw = &adapter->hw; + const unsigned char *addr; + + addr = of_get_mac_address(dp); + if (addr) { + ether_addr_copy(hw->mac.perm_addr, addr); + return; + } +#endif /* CONFIG_OF */ + +#ifdef CONFIG_SPARC + ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr); +#endif /* CONFIG_SPARC */ +} + +/** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbe_pci_tbl @@ -7979,6 +8135,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int i, err, pci_using_dac, expected_gts; unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; + bool disable_dev = false; #ifdef IXGBE_FCOE u16 device_caps; #endif @@ -8040,7 +8197,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); - pci_set_drvdata(pdev, adapter); adapter->netdev = netdev; adapter->pdev = pdev; @@ -8098,6 +8254,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: @@ -8161,6 +8319,8 @@ skip_sriov: switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: netdev->features |= NETIF_F_SCTP_CSUM; netdev->hw_features |= NETIF_F_SCTP_CSUM | NETIF_F_NTUPLE; @@ -8223,6 +8383,8 @@ skip_sriov: goto err_sw_init; } + ixgbe_get_platform_mac_addr(adapter); + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) { @@ -8314,6 +8476,8 @@ skip_sriov: if (err) goto err_register; + pci_set_drvdata(pdev, adapter); + /* power down the optics for 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); @@ -8369,13 +8533,14 @@ err_sw_init: iounmap(adapter->io_addr); kfree(adapter->mac_table); err_ioremap: + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: - if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + if (!adapter || disable_dev) pci_disable_device(pdev); return err; } @@ -8392,8 +8557,14 @@ err_dma: static void ixgbe_remove(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; + struct net_device *netdev; + bool disable_dev; + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + netdev = adapter->netdev; ixgbe_dbg_adapter_exit(adapter); set_bit(__IXGBE_REMOVING, &adapter->state); @@ -8442,11 +8613,12 @@ static void ixgbe_remove(struct pci_dev *pdev) e_dev_info("complete\n"); kfree(adapter->mac_table); + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); - if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + if (disable_dev) pci_disable_device(pdev); } @@ -8514,6 +8686,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, case ixgbe_mac_X540: device_id = IXGBE_X540_VF_DEVICE_ID; break; + case ixgbe_mac_X550: + device_id = IXGBE_DEV_ID_X550_VF; + break; + case ixgbe_mac_X550EM_x: + device_id = IXGBE_DEV_ID_X550EM_X_VF; + break; default: device_id = 0; break; @@ -8533,8 +8711,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, * VFLR. Just clean up the AER in that case. */ if (vfdev) { - e_dev_err("Issuing VFLR to VF %d\n", vf); - pci_write_config_dword(vfdev, 0xA8, 0x00008000); + ixgbe_issue_vf_flr(adapter, vfdev); /* Free device reference count */ pci_dev_put(vfdev); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index cc8f0128286c..9993a471d668 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -305,6 +305,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); break; case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: @@ -426,6 +428,8 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) struct ixgbe_mbx_info *mbx = &hw->mbx; if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X540) return; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index d47b19f27c35..8a2be444113d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -43,13 +43,195 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); -static bool ixgbe_get_i2c_data(u32 *i2cctl); +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); /** + * ixgbe_out_i2c_byte_ack - Send I2C byte with ack + * @hw: pointer to the hardware structure + * @byte: byte to send + * + * Returns an error code on error. + **/ +static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) +{ + s32 status; + + status = ixgbe_clock_out_i2c_byte(hw, byte); + if (status) + return status; + return ixgbe_get_i2c_ack(hw); +} + +/** + * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack + * @hw: pointer to the hardware structure + * @byte: pointer to a u8 to receive the byte + * + * Returns an error code on error. + **/ +static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) +{ + s32 status; + + status = ixgbe_clock_in_i2c_byte(hw, byte); + if (status) + return status; + /* ACK */ + return ixgbe_clock_out_i2c_bit(hw, false); +} + +/** + * ixgbe_ones_comp_byte_add - Perform one's complement addition + * @add1: addend 1 + * @add2: addend 2 + * + * Returns one's complement 8-bit sum. + **/ +static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) +{ + u16 sum = add1 + add2; + + sum = (sum & 0xFF) + (sum >> 8); + return sum & 0xFF; +} + +/** + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 10; + int retry = 0; + u8 csum_byte; + u8 high_bits; + u8 low_bits; + u8 reg_high; + u8 csum; + + reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ~csum; + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + /* Re-start condition */ + ixgbe_i2c_start(hw); + /* Device Address and read indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) + goto fail; + /* Get upper bits */ + if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) + goto fail; + /* Get low bits */ + if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) + goto fail; + /* Get csum */ + if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) + goto fail; + /* NACK */ + if (ixgbe_clock_out_i2c_bit(hw, false)) + goto fail; + ixgbe_i2c_stop(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + *val = (high_bits << 8) | low_bits; + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte read combined error - Retry.\n"); + else + hw_dbg(hw, "I2C byte read combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + int max_retry = 1; + int retry = 0; + u8 reg_high; + u8 csum; + + reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ixgbe_ones_comp_byte_add(csum, val >> 8); + csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); + csum = ~csum; + do { + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write data 15:8 */ + if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) + goto fail; + /* Write data 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + ixgbe_i2c_stop(hw); + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte write combined error - Retry.\n"); + else + hw_dbg(hw, "I2C byte write combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** * ixgbe_identify_phy_generic - Get physical layer module * @hw: pointer to hardware structure * @@ -60,6 +242,15 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) u32 phy_addr; u16 ext_ability = 0; + if (!hw->phy.phy_semaphore_mask) { + hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) & + IXGBE_STATUS_LAN_ID_1; + if (hw->phy.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + } + if (hw->phy.type == ixgbe_phy_unknown) { for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { hw->phy.mdio.prtad = phy_addr; @@ -315,12 +506,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { s32 status; - u16 gssr; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - gssr = IXGBE_GSSR_PHY1_SM; - else - gssr = IXGBE_GSSR_PHY0_SM; + u32 gssr = hw->phy.phy_semaphore_mask; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, @@ -418,7 +604,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { s32 status; - u16 gssr; + u32 gssr; if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) gssr = IXGBE_GSSR_PHY1_SM; @@ -576,6 +762,10 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, *speed |= IXGBE_LINK_SPEED_100_FULL; } + /* Internal PHY does not support 100 Mbps */ + if (hw->mac.type == ixgbe_mac_X550EM_x) + *speed &= ~IXGBE_LINK_SPEED_100_FULL; + return status; } @@ -632,10 +822,12 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, * @hw: pointer to hardware structure * * Restart autonegotiation and PHY and waits for completion. + * This function always returns success, this is nessary since + * it is called via a function pointer that could call other + * functions that could return an error. **/ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { - s32 status; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; @@ -700,8 +892,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); - - return status; + return 0; } /** @@ -1464,15 +1655,10 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, s32 status; u32 max_retry = 10; u32 retry = 0; - u16 swfw_mask = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; *data = 0; - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - swfw_mask = IXGBE_GSSR_PHY1_SM; - else - swfw_mask = IXGBE_GSSR_PHY0_SM; - do { if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; @@ -1550,12 +1736,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, s32 status; u32 max_retry = 1; u32 retry = 0; - u16 swfw_mask = 0; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - swfw_mask = IXGBE_GSSR_PHY1_SM; - else - swfw_mask = IXGBE_GSSR_PHY0_SM; + u32 swfw_mask = hw->phy.phy_semaphore_mask; if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; @@ -1612,7 +1793,7 @@ fail: **/ static void ixgbe_i2c_start(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); @@ -1641,7 +1822,7 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw) **/ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); /* Stop condition must begin with data low and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 0); @@ -1699,9 +1880,9 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) } /* Release SDA line (set high) */ - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - i2cctl |= IXGBE_I2C_DATA_OUT; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); return status; @@ -1717,7 +1898,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { s32 status = 0; u32 i = 0; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 timeout = 10; bool ack = true; @@ -1730,8 +1911,8 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - ack = ixgbe_get_i2c_data(&i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + ack = ixgbe_get_i2c_data(hw, &i2cctl); udelay(1); if (ack == 0) @@ -1760,15 +1941,15 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) **/ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - *data = ixgbe_get_i2c_data(&i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + *data = ixgbe_get_i2c_data(hw, &i2cctl); ixgbe_lower_i2c_clk(hw, &i2cctl); @@ -1788,7 +1969,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); status = ixgbe_set_i2c_data(hw, &i2cctl, data); if (status == 0) { @@ -1824,14 +2005,14 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) u32 i2cctl_r = 0; for (i = 0; i < timeout; i++) { - *i2cctl |= IXGBE_I2C_CLK_OUT; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ udelay(IXGBE_I2C_T_RISE); - i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - if (i2cctl_r & IXGBE_I2C_CLK_IN) + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) break; } } @@ -1846,9 +2027,9 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { - *i2cctl &= ~IXGBE_I2C_CLK_OUT; + *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ @@ -1866,19 +2047,19 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { if (data) - *i2cctl |= IXGBE_I2C_DATA_OUT; + *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); else - *i2cctl &= ~IXGBE_I2C_DATA_OUT; + *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); /* Verify data was set correctly */ - *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - if (data != ixgbe_get_i2c_data(i2cctl)) { + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (data != ixgbe_get_i2c_data(hw, i2cctl)) { hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); return IXGBE_ERR_I2C; } @@ -1893,9 +2074,9 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) * * Returns the I2C data bit value **/ -static bool ixgbe_get_i2c_data(u32 *i2cctl) +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { - if (*i2cctl & IXGBE_I2C_DATA_IN) + if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) return true; return false; } @@ -1909,7 +2090,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl) **/ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 i; ixgbe_i2c_start(hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 54071ed17e3b..434643881287 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -77,6 +77,11 @@ #define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 +#define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define IXGBE_CS4227_EDC_MODE_SR 0x0004 + /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 #define IXGBE_TAF_ASM_PAUSE 0x800 @@ -110,7 +115,6 @@ /* SFP+ SFF-8472 Compliance code */ #define IXGBE_SFF_SFF_8472_UNSUP 0x00 -s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, @@ -157,4 +161,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data); s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); +s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val); +s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 val); #endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 97c85b859536..c76ba90ecc6e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -221,7 +221,8 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); + rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); } else { rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); } @@ -618,6 +619,27 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) return 0; } +static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + int i; + + for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { + u32 reg; + + /* flush previous write */ + IXGBE_WRITE_FLUSH(hw); + + /* indicate to hardware that we want to set drop enable */ + reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; + reg |= i << IXGBE_QDE_IDX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); + } +} + static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; @@ -647,15 +669,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); /* force drop enable for all VF Rx queues */ - for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { - /* flush previous write */ - IXGBE_WRITE_FLUSH(hw); - - /* indicate to hardware that we want to set drop enable */ - reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; - reg |= i << IXGBE_QDE_IDX_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); - } + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); @@ -1079,52 +1093,86 @@ int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) return ixgbe_set_vf_mac(adapter, vf, mac); } +static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err = 0; + + if (adapter->vfinfo[vf].pf_vlan) + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, + vf); + if (err) + goto out; + ixgbe_set_vmvir(adapter, vlan, qos, vf); + ixgbe_set_vmolr(hw, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + adapter->vfinfo[vf].vlan_count++; + + /* enable hide vlan on X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | + IXGBE_QDE_HIDE_VLAN); + + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + +out: + return err; +} + +static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + ixgbe_clear_vmvir(adapter, vf); + ixgbe_set_vmolr(hw, vf, true); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); + if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + + return err; +} + int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) return -EINVAL; if (vlan || qos) { + /* Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ if (adapter->vfinfo[vf].pf_vlan) - err = ixgbe_set_vf_vlan(adapter, false, - adapter->vfinfo[vf].pf_vlan, - vf); - if (err) - goto out; - err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); + err = ixgbe_disable_port_vlan(adapter, vf); if (err) goto out; - ixgbe_set_vmvir(adapter, vlan, qos, vf); - ixgbe_set_vmolr(hw, vf, false); - if (adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - adapter->vfinfo[vf].vlan_count++; - adapter->vfinfo[vf].pf_vlan = vlan; - adapter->vfinfo[vf].pf_qos = qos; - dev_info(&adapter->pdev->dev, - "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF VLAN has been set," - " but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before" - " attempting to use the VF device.\n"); - } + err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); } else { - err = ixgbe_set_vf_vlan(adapter, false, - adapter->vfinfo[vf].pf_vlan, vf); - ixgbe_clear_vmvir(adapter, vf); - ixgbe_set_vmolr(hw, vf, true); - hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); - if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - adapter->vfinfo[vf].pf_vlan = 0; - adapter->vfinfo[vf].pf_qos = 0; + err = ixgbe_disable_port_vlan(adapter, vf); } + out: return err; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index dfd55d83bc03..d101b25dc4b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -74,9 +74,22 @@ #define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 #define IXGBE_DEV_ID_X540T1 0x1560 +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550_VF_HV 0x1564 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED #define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 /* General Registers */ #define IXGBE_CTRL 0x00000 @@ -84,7 +97,8 @@ #define IXGBE_CTRL_EXT 0x00018 #define IXGBE_ESDP 0x00020 #define IXGBE_EODSDP 0x00028 -#define IXGBE_I2CCTL 0x00028 +#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + 0x15F5C : 0x00028)) #define IXGBE_LEDCTL 0x00200 #define IXGBE_FRTIMER 0x00048 #define IXGBE_TCPTIMER 0x0004C @@ -112,10 +126,14 @@ #define IXGBE_VPDDIAG1 0x10208 /* I2CCTL Bit Masks */ -#define IXGBE_I2C_CLK_IN 0x00000001 -#define IXGBE_I2C_CLK_OUT 0x00000002 -#define IXGBE_I2C_DATA_IN 0x00000004 -#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00004000 : 0x00000001) +#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00000200 : 0x00000002) +#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00001000 : 0x00000004) +#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00000400 : 0x00000008) #define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 @@ -290,8 +308,17 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_IMIRVP 0x05AC0 #define IXGBE_VMD_CTL 0x0581C #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ +/* Registers for setting up RSS on X550 with SRIOV + * _p - pool number (0..63) + * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) + */ +#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) +#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) +#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) + /* Flow Director registers */ #define IXGBE_FDIRCTRL 0x0EE00 #define IXGBE_FDIRHKEY 0x0EE68 @@ -725,6 +752,24 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_LDPCECL 0x0E820 #define IXGBE_LDPCECH 0x0E821 +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ + /* Management */ #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ @@ -798,6 +843,12 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_PBACLR_82599 0x11068 #define IXGBE_CIAA_82599 0x11088 #define IXGBE_CIAD_82599 0x1108C +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + IXGBE_CIAA_X550 : IXGBE_CIAA_82599)) +#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + IXGBE_CIAD_X550 : IXGBE_CIAD_82599)) #define IXGBE_PICAUSE 0x110B0 #define IXGBE_PIENA 0x110B8 #define IXGBE_CDQ_MBR_82599 0x110B4 @@ -1120,6 +1171,13 @@ struct ixgbe_thermal_sensor_data { /* MDIO definitions */ +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_TWINAX_DEV 1 + #define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ @@ -1129,9 +1187,23 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ + +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ + +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */ /* MII clause 22/28 definitions */ #define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ @@ -1632,6 +1704,7 @@ enum { #define IXGBE_LINKS_TL_FAULT 0x00001000 #define IXGBE_LINKS_SIGNAL 0x00000F00 +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 #define IXGBE_LINKS_SPEED_82599 0x30000000 #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 @@ -1674,12 +1747,14 @@ enum { #define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ /* SW_FW_SYNC/GSSR definitions */ -#define IXGBE_GSSR_EEP_SM 0x0001 -#define IXGBE_GSSR_PHY0_SM 0x0002 -#define IXGBE_GSSR_PHY1_SM 0x0004 -#define IXGBE_GSSR_MAC_CSR_SM 0x0008 -#define IXGBE_GSSR_FLASH_SM 0x0010 -#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 /* FW Status register bitmask */ #define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ @@ -1713,27 +1788,32 @@ enum { #define IXGBE_PBANUM_LENGTH 11 /* Checksum and EEPROM pointers */ -#define IXGBE_PBANUM_PTR_GUARD 0xFAFA -#define IXGBE_EEPROM_CHECKSUM 0x3F -#define IXGBE_EEPROM_SUM 0xBABA -#define IXGBE_PCIE_ANALOG_PTR 0x03 -#define IXGBE_ATLAS0_CONFIG_PTR 0x04 -#define IXGBE_PHY_PTR 0x04 -#define IXGBE_ATLAS1_CONFIG_PTR 0x05 -#define IXGBE_OPTION_ROM_PTR 0x05 -#define IXGBE_PCIE_GENERAL_PTR 0x06 -#define IXGBE_PCIE_CONFIG0_PTR 0x07 -#define IXGBE_PCIE_CONFIG1_PTR 0x08 -#define IXGBE_CORE0_PTR 0x09 -#define IXGBE_CORE1_PTR 0x0A -#define IXGBE_MAC0_PTR 0x0B -#define IXGBE_MAC1_PTR 0x0C -#define IXGBE_CSR0_CONFIG_PTR 0x0D -#define IXGBE_CSR1_CONFIG_PTR 0x0E -#define IXGBE_FW_PTR 0x0F -#define IXGBE_PBANUM0_PTR 0x15 -#define IXGBE_PBANUM1_PTR 0x16 -#define IXGBE_FREE_SPACE_PTR 0X3E +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 +#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 +#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 +#define IXGBE_PCIE_CONFIG_SIZE 0x08 +#define IXGBE_EEPROM_LAST_WORD 0x41 +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_FREE_SPACE_PTR 0X3E /* External Thermal Sensor Config */ #define IXGBE_ETS_CFG 0x26 @@ -1994,12 +2074,14 @@ enum { #define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 #define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 #define IXGBE_MRQC_L3L4TXSWEN 0x00008000 #define IXGBE_FWSM_TS_ENABLED 0x1 /* Queue Drop Enable */ #define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_HIDE_VLAN 0x00000002 #define IXGBE_QDE_IDX_MASK 0x00007F00 #define IXGBE_QDE_IDX_SHIFT 8 #define IXGBE_QDE_WRITE 0x00010000 @@ -2289,18 +2371,32 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIR_DROP_QUEUE 127 /* Manageablility Host Interface defines */ -#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ -#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ -#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ +#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ /* CEM Support */ -#define FW_CEM_HDR_LEN 0x4 -#define FW_CEM_CMD_DRIVER_INFO 0xDD -#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 -#define FW_CEM_CMD_RESERVED 0x0 -#define FW_CEM_UNUSED_VER 0x0 -#define FW_CEM_MAX_RETRIES 3 -#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0x0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 1024 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 /* Host Interface Command Structures */ struct ixgbe_hic_hdr { @@ -2313,6 +2409,25 @@ struct ixgbe_hic_hdr { u8 checksum; }; +struct ixgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ixgbe_hic_hdr2 { + struct ixgbe_hic_hdr2_req req; + struct ixgbe_hic_hdr2_rsp rsp; +}; + struct ixgbe_hic_drv_info { struct ixgbe_hic_hdr hdr; u8 port_num; @@ -2324,6 +2439,32 @@ struct ixgbe_hic_drv_info { u16 pad2; /* end spacing to ensure length is mult. of dword2 */ }; +/* These need to be dword aligned */ +struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_write_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_disable_rxen { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { @@ -2437,10 +2578,12 @@ struct ixgbe_adv_tx_context_desc { typedef u32 ixgbe_autoneg_advertised; /* Link speed */ typedef u32 ixgbe_link_speed; -#define IXGBE_LINK_SPEED_UNKNOWN 0 -#define IXGBE_LINK_SPEED_100_FULL 0x0008 -#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 -#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 #define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ IXGBE_LINK_SPEED_10GB_FULL) #define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ @@ -2588,6 +2731,8 @@ enum ixgbe_mac_type { ixgbe_mac_82598EB, ixgbe_mac_82599EB, ixgbe_mac_X540, + ixgbe_mac_X550, + ixgbe_mac_X550EM_x, ixgbe_num_macs }; @@ -2596,6 +2741,9 @@ enum ixgbe_phy_type { ixgbe_phy_none, ixgbe_phy_tn, ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, + ixgbe_phy_x550em_ext_t, ixgbe_phy_cu_unknown, ixgbe_phy_qt, ixgbe_phy_xaui, @@ -2839,7 +2987,7 @@ struct ixgbe_eeprom_operations { s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); s32 (*update_checksum)(struct ixgbe_hw *); - u16 (*calc_checksum)(struct ixgbe_hw *); + s32 (*calc_checksum)(struct ixgbe_hw *); }; struct ixgbe_mac_operations { @@ -2861,8 +3009,8 @@ struct ixgbe_mac_operations { s32 (*disable_rx_buff)(struct ixgbe_hw *); s32 (*enable_rx_buff)(struct ixgbe_hw *); s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); - s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); - void (*release_swfw_sync)(struct ixgbe_hw *, u16); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); + void (*release_swfw_sync)(struct ixgbe_hw *, u32); s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); @@ -2908,6 +3056,11 @@ struct ixgbe_mac_operations { s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + + /* DMA Coalescing */ + s32 (*dmac_config)(struct ixgbe_hw *hw); + s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); }; struct ixgbe_phy_operations { @@ -2920,6 +3073,7 @@ struct ixgbe_phy_operations { s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_internal_link)(struct ixgbe_hw *); s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); @@ -2928,6 +3082,8 @@ struct ixgbe_phy_operations { s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); + s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); s32 (*check_overtemp)(struct ixgbe_hw *); }; @@ -2980,6 +3136,8 @@ struct ixgbe_phy_info { bool sfp_setup_needed; u32 revision; enum ixgbe_media_type media_type; + u8 lan_id; + u32 phy_semaphore_mask; bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; enum ixgbe_smart_speed smart_speed; @@ -3086,4 +3244,71 @@ struct ixgbe_info { #define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010)) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C)) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634)) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638)) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00)) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00)) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520)) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00)) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) + +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) + +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) + +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) + +#define IXGBE_KX4_LINK_CNTL_1 0x4C +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 + +#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 +#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ + (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ + (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 +#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 +#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 +#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 + #endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index e88305d5d18d..ba54ff07b438 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -32,6 +32,7 @@ #include "ixgbe.h" #include "ixgbe_phy.h" +#include "ixgbe_x540.h" #define IXGBE_X540_MAX_TX_QUEUES 128 #define IXGBE_X540_MAX_RX_QUEUES 128 @@ -42,17 +43,15 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); -static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); -static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); -static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) { return ixgbe_media_type_copper; } -static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -76,9 +75,8 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) * @speed: new link speed * @autoneg_wait_to_complete: true when waiting for completion is needed **/ -static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); @@ -92,7 +90,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { s32 status; u32 ctrl, i; @@ -179,7 +177,7 @@ mac_reset_top: * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ -static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) { s32 ret_val; @@ -197,7 +195,7 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ -static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; @@ -316,7 +314,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, * * @hw: pointer to hardware structure **/ -static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) { u16 i; u16 j; @@ -324,6 +322,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) u16 length = 0; u16 pointer = 0; u16 word = 0; + u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM; + u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; /* * Do not use hw->eeprom.ops.read because we do not want to take @@ -332,10 +332,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) */ /* Include 0x0-0x3F in the checksum */ - for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (ixgbe_read_eerd_generic(hw, i, &word) != 0) { + for (i = 0; i < checksum_last_word; i++) { + if (ixgbe_read_eerd_generic(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } checksum += word; } @@ -344,11 +344,11 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) * Include all data from pointers 0x3, 0x6-0xE. This excludes the * FW, PHY module, and PCIe Expansion/Option ROM pointers. */ - for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + for (i = ptr_start; i < IXGBE_FW_PTR; i++) { if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) continue; - if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) { + if (ixgbe_read_eerd_generic(hw, i, &pointer)) { hw_dbg(hw, "EEPROM read failed\n"); break; } @@ -358,8 +358,9 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) pointer >= hw->eeprom.word_size) continue; - if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) { + if (ixgbe_read_eerd_generic(hw, pointer, &length)) { hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; break; } @@ -368,10 +369,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) (pointer + length) >= hw->eeprom.word_size) continue; - for (j = pointer+1; j <= pointer+length; j++) { - if (ixgbe_read_eerd_generic(hw, j, &word) != 0) { + for (j = pointer + 1; j <= pointer + length; j++) { + if (ixgbe_read_eerd_generic(hw, j, &word)) { hw_dbg(hw, "EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } checksum += word; } @@ -379,7 +380,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return checksum; + return (s32)checksum; } /** @@ -410,23 +411,34 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; - checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); /* Do not use hw->eeprom.ops.read because we do not want to take * the synchronization semaphores twice here. */ status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) + goto out; - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + hw_dbg(hw, "Invalid EEPROM checksum"); + status = IXGBE_ERR_EEPROM_CHECKSUM; + } /* If the user cares, return the calculated checksum */ if (checksum_val) *checksum_val = checksum; - /* Verify read and calculated checksums are the same */ - if (read_checksum != checksum) - return IXGBE_ERR_EEPROM_CHECKSUM; +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } @@ -457,15 +469,22 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; - checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); /* Do not use hw->eeprom.ops.write because we do not want to * take the synchronization semaphores twice here. */ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); - if (!status) - status = ixgbe_update_flash_X540(hw); + if (status) + goto out; + + status = ixgbe_update_flash_X540(hw); +out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } @@ -544,7 +563,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) * Acquires the SWFW semaphore thought the SW_FW_SYNC register for * the specified function (CSR, PHY0, PHY1, NVM, Flash) **/ -static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swfw_sync; u32 swmask = mask; @@ -612,7 +631,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) * Releases the SWFW semaphore through the SW_FW_SYNC register * for the specified function (CSR, PHY0, PHY1, EVM, Flash) **/ -static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swfw_sync; u32 swmask = mask; @@ -699,7 +718,7 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) * Devices that implement the version 2 interface: * X540 **/ -static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; @@ -735,7 +754,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) * Devices that implement the version 2 interface: * X540 **/ -static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h new file mode 100644 index 000000000000..a1468b1f4d8a --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -0,0 +1,39 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include "ixgbe_type.h" + +s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c new file mode 100644 index 000000000000..ffdd1231f419 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -0,0 +1,1432 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +/** ixgbe_identify_phy_x550em - Get PHY type based on device id + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + + return ixgbe_identify_module_generic(hw); + case IXGBE_DEV_ID_X550EM_X_KX4: + hw->phy.type = ixgbe_phy_x550em_kx4; + break; + case IXGBE_DEV_ID_X550EM_X_KR: + hw->phy.type = ixgbe_phy_x550em_kr; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + return ixgbe_identify_phy_generic(hw); + default: + break; + } + return 0; +} + +static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + return IXGBE_NOT_IMPLEMENTED; +} + +static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + return IXGBE_NOT_IMPLEMENTED; +} + +/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return 0; +} + +/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the + * IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @phy_data: Pointer to read data from the register + **/ +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + u32 i, command, error; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usleep_range(10, 20); + + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + } + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + hw_dbg(hw, "Failed to read, error %x\n", error); + return IXGBE_ERR_PHY; + } + + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + hw_dbg(hw, "Read timed out\n"); + return IXGBE_ERR_PHY; + } + + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + + return 0; +} + +/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface + * command assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status; + struct ixgbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32(offset * 2); + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + if (status) + return status; + + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); + + return 0; +} + +/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); + return status; + } + + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32((offset + current_word) * 2); + buffer.length = cpu_to_be16(words_to_read * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, + false); + if (status) { + hw_dbg(hw, "Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + u32 value = IXGBE_READ_REG(hw, reg); + + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** ixgbe_checksum_ptr_x550 - Checksum one pointer region + * @hw: pointer to hardware structure + * @ptr: pointer offset in eeprom + * @size: size of section pointed by ptr, if 0 first word will be used as size + * @csum: address of checksum to update + * + * Returns error status for any failure + **/ +static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, + u16 size, u16 *csum, u16 *buffer, + u32 buffer_size) +{ + u16 buf[256]; + s32 status; + u16 length, bufsz, i, start; + u16 *local_buffer; + + bufsz = sizeof(buf) / sizeof(buf[0]); + + /* Read a chunk at the pointer location */ + if (!buffer) { + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = buf; + } else { + if (buffer_size < ptr) + return IXGBE_ERR_PARAM; + local_buffer = &buffer[ptr]; + } + + if (size) { + start = 0; + length = size; + } else { + start = 1; + length = local_buffer[0]; + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (ptr + length) >= hw->eeprom.word_size) + return 0; + } + + if (buffer && ((u32)start + (u32)length > buffer_size)) + return IXGBE_ERR_PARAM; + + for (i = start; length; i++, length--) { + if (i == bufsz && !buffer) { + ptr += bufsz; + i = 0; + if (length < bufsz) + bufsz = length; + + /* Read a chunk at the pointer location */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, + bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + } + *csum += local_buffer[i]; + } + return 0; +} + +/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) +{ + u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 pointer, i, size; + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + /* Read pointer area */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, + IXGBE_EEPROM_LAST_WORD + 1, + eeprom_ptrs); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < IXGBE_EEPROM_LAST_WORD) + return IXGBE_ERR_PARAM; + local_buffer = buffer; + } + + /* For X550 hardware include 0x0-0x41 in the checksum, skip the + * checksum word itself + */ + for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + /* Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + pointer = local_buffer[i]; + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + switch (i) { + case IXGBE_PCIE_GENERAL_PTR: + size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; + break; + case IXGBE_PCIE_CONFIG0_PTR: + case IXGBE_PCIE_CONFIG1_PTR: + size = IXGBE_PCIE_CONFIG_SIZE; + break; + default: + size = 0; + break; + } + + status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, + buffer, buffer_size); + if (status) + return status; + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + return ixgbe_calc_checksum_X550(hw, NULL, 0); +} + +/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = IXGBE_ERR_EEPROM_CHECKSUM; + hw_dbg(hw, "Invalid EEPROM checksum"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status; + struct ixgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + buffer.data = data; + buffer.address = cpu_to_be32(offset * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + return status; +} + +/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + hw_dbg(hw, "write ee hostif failed to get semaphore"); + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +{ + s32 status = 0; + union ixgbe_hic_hdr2 buffer; + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + return status; +} + +/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = ixgbe_calc_eeprom_checksum_X550(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + status = ixgbe_update_flash_X550(hw); + + return status; +} + +/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = 0; + u32 i = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + hw_dbg(hw, "EEPROM write buffer - semaphore failed\n"); + return status; + } + + for (i = 0; i < words; i++) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, + data[i]); + if (status) { + hw_dbg(hw, "Eeprom buffered write failed\n"); + break; + } + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + **/ +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + } +} + +/** ixgbe_setup_sfp_modules_X550em - Setup SFP module + * @hw: pointer to hardware structure + */ +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +{ + bool setup_linear; + u16 reg_slice, edc_mode; + s32 ret_val; + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_unknown: + return 0; + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + setup_linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + setup_linear = false; + break; + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; + + /* The CS4227 slice address is the base address + the port-pair reg + * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0. + */ + reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12); + + if (setup_linear) + edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + + /* Configure CS4227 for connection type. */ + ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice, + edc_mode); + + if (ret_val) + ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice, + edc_mode); + + return ret_val; +} + +/** ixgbe_get_link_capabilities_x550em - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + **/ +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = false; + + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return 0; + } + + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + return 0; +} + +/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the + * IOSF device + * + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + u32 i, command, error; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Write IOSF data register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usleep_range(10, 20); + + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + } + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + hw_dbg(hw, "Failed to write, error %x\n", error); + return IXGBE_ERR_PHY; + } + + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + hw_dbg(hw, "Write timed out\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + +/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated KR PHY to use iXFI mode. Used to connect an + * internal and external PHY at a specific speed, without autonegotiation. + **/ +static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + s32 status; + u32 reg_val; + + /* Disable AN and force speed to 10G Serial. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal KR PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Disable training protocol FSM. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Disable Flex from training TXFFE. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Enable override for coefficients. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KX4 PHY. + **/ +s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, ®_val); + if (status) + return status; + + reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 | + IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX); + + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE; + + /* Advertise 10G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4; + + /* Advertise 1G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, reg_val); + + return status; +} + +/** ixgbe_setup_kr_x550em - Configure the KR PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KR PHY. + **/ +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + + /* Advertise 10G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + + /* Advertise 1G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY + * @hw: point to hardware structure + * + * Configures the integrated KR PHY to talk to the external PHY. The base + * driver will call this function when it gets notification via interrupt from + * the external PHY. This function forces the internal PHY into iXFI mode at + * the correct speed. + * + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. + **/ +s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 lasi, autoneg_status, speed; + ixgbe_link_speed force_speed; + + /* Verify that the external link status has changed */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi); + if (status) + return status; + + /* If there was no change in link status, we can just exit */ + if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM)) + return 0; + + /* we read this twice back to back to indicate current status */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (status) + return status; + + /* If link is not up return an error indicating treat link as down */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + return IXGBE_ERR_INVALID_LINK_SETTINGS; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + + /* clear everything but the speed and duplex bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + + switch (speed) { + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + /* Internal PHY does not support anything else */ + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + return ixgbe_setup_ixfi_x550em(hw, &force_speed); +} + +/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + **/ +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + u32 esdp; + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_X550em(hw); + if (phy->sfp_type != ixgbe_sfp_type_unknown) + phy->ops.reset = NULL; + + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + phy->ops.setup_link = ixgbe_setup_kx4_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_kr: + phy->ops.setup_link = ixgbe_setup_kr_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_ext_t: + phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em; + break; + default: + break; + } + return ret_val; +} + +/** ixgbe_get_media_type_X550em - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + * + */ +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + /* Detect if there is a copper PHY attached. */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + return media_type; +} + +/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. + ** @hw: pointer to hardware structure + **/ +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + u32 retries = 2; + + do { + /* decrement retries counter and exit if we hit 0 */ + if (retries < 1) { + hw_dbg(hw, "External PHY not yet finished resetting."); + return IXGBE_ERR_PHY; + } + retries--; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + if (status) + return status; + + /* Verify PHY FW reset has completed */ + } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1); + + /* Set port to low power mode */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + /* Enable the transmitter */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + if (status) + return status; + + reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + reg); + if (status) + return status; + + /* Un-stall the PHY FW */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + reg &= ~IXGBE_MDIO_POWER_UP_STALL; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + return status; +} + +/** ixgbe_reset_hw_X550em - Perform hardware reset + ** @hw: pointer to hardware structure + ** + ** Resets the hardware by resetting the transmit and receive units, masks + ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) + ** reset. + **/ +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl = 0; + u32 i; + bool link_up = false; + + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + /* start the external PHY */ + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = ixgbe_init_ext_t_x550em(hw); + if (status) + return status; + } + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) + hw->phy.ops.reset(hw); + +mac_reset_top: + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + /* Double resets are required for recovery from certain error + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + return status; +} + +#define X550_COMMON_MAC \ + .init_hw = &ixgbe_init_hw_generic, \ + .start_hw = &ixgbe_start_hw_X540, \ + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \ + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \ + .get_mac_addr = &ixgbe_get_mac_addr_generic, \ + .get_device_caps = &ixgbe_get_device_caps_generic, \ + .stop_adapter = &ixgbe_stop_adapter_generic, \ + .get_bus_info = &ixgbe_get_bus_info_generic, \ + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \ + .read_analog_reg8 = NULL, \ + .write_analog_reg8 = NULL, \ + .set_rxpba = &ixgbe_set_rxpba_generic, \ + .check_link = &ixgbe_check_mac_link_generic, \ + .led_on = &ixgbe_led_on_generic, \ + .led_off = &ixgbe_led_off_generic, \ + .blink_led_start = &ixgbe_blink_led_start_X540, \ + .blink_led_stop = &ixgbe_blink_led_stop_X540, \ + .set_rar = &ixgbe_set_rar_generic, \ + .clear_rar = &ixgbe_clear_rar_generic, \ + .set_vmdq = &ixgbe_set_vmdq_generic, \ + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \ + .clear_vmdq = &ixgbe_clear_vmdq_generic, \ + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \ + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \ + .enable_mc = &ixgbe_enable_mc_generic, \ + .disable_mc = &ixgbe_disable_mc_generic, \ + .clear_vfta = &ixgbe_clear_vfta_generic, \ + .set_vfta = &ixgbe_set_vfta_generic, \ + .fc_enable = &ixgbe_fc_enable_generic, \ + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ + .init_uta_tables = &ixgbe_init_uta_tables_generic, \ + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \ + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ + .get_thermal_sensor_data = NULL, \ + .init_thermal_sensor_thresh = NULL, \ + .prot_autoc_read = &prot_autoc_read_generic, \ + .prot_autoc_write = &prot_autoc_write_generic, \ + +static struct ixgbe_mac_operations mac_ops_X550 = { + X550_COMMON_MAC + .reset_hw = &ixgbe_reset_hw_X540, + .get_media_type = &ixgbe_get_media_type_X540, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .setup_link = &ixgbe_setup_mac_link_X540, + .set_rxpba = &ixgbe_set_rxpba_generic, + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .setup_sfp = NULL, +}; + +static struct ixgbe_mac_operations mac_ops_X550EM_x = { + X550_COMMON_MAC + .reset_hw = &ixgbe_reset_hw_X550em, + .get_media_type = &ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + +}; + +#define X550_COMMON_EEP \ + .read = &ixgbe_read_ee_hostif_X550, \ + .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ + .write = &ixgbe_write_ee_hostif_X550, \ + .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \ + .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \ + .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ + .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ + +static struct ixgbe_eeprom_operations eeprom_ops_X550 = { + X550_COMMON_EEP + .init_params = &ixgbe_init_eeprom_params_X550, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { + X550_COMMON_EEP + .init_params = &ixgbe_init_eeprom_params_X540, +}; + +#define X550_COMMON_PHY \ + .identify_sfp = &ixgbe_identify_module_generic, \ + .reset = NULL, \ + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \ + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \ + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \ + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ + .check_overtemp = &ixgbe_tn_check_overtemp, \ + .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, + +static struct ixgbe_phy_operations phy_ops_X550 = { + X550_COMMON_PHY + .init = NULL, + .identify = &ixgbe_identify_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .read_i2c_combined = &ixgbe_read_i2c_combined_generic, + .write_i2c_combined = &ixgbe_write_i2c_combined_generic, +}; + +static struct ixgbe_phy_operations phy_ops_X550EM_x = { + X550_COMMON_PHY + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = NULL, /* defined later */ + .write_reg = NULL, /* defined later */ + .setup_link = NULL, /* defined later */ +}; + +struct ixgbe_info ixgbe_X550_info = { + .mac = ixgbe_mac_X550, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X550, + .eeprom_ops = &eeprom_ops_X550, + .phy_ops = &phy_ops_X550, + .mbx_ops = &mbx_ops_generic, +}; + +struct ixgbe_info ixgbe_X550EM_x_info = { + .mac = ixgbe_mac_X550EM_x, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X550EM_x, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_X550EM_x, + .mbx_ops = &mbx_ops_generic, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 05e4f32d84f7..7412d378b77b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -31,6 +31,8 @@ /* Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED #define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 8 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index ba96cb5b886d..8c44ab25f3fa 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -58,8 +58,9 @@ struct ixgbevf_tx_buffer { }; struct ixgbevf_rx_buffer { - struct sk_buff *skb; dma_addr_t dma; + struct page *page; + unsigned int page_offset; }; struct ixgbevf_stats { @@ -79,7 +80,6 @@ struct ixgbevf_tx_queue_stats { }; struct ixgbevf_rx_queue_stats { - u64 non_eop_descs; u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; u64 csum_err; @@ -92,9 +92,10 @@ struct ixgbevf_ring { void *desc; /* descriptor ring memory */ dma_addr_t dma; /* phys. address of descriptor ring */ unsigned int size; /* length in bytes */ - unsigned int count; /* amount of descriptors */ - unsigned int next_to_use; - unsigned int next_to_clean; + u16 count; /* amount of descriptors */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; union { struct ixgbevf_tx_buffer *tx_buffer_info; @@ -110,12 +111,11 @@ struct ixgbevf_ring { u64 hw_csum_rx_error; u8 __iomem *tail; + struct sk_buff *skb; u16 reg_idx; /* holds the special value that gets the hardware register * offset associated with this ring, which is different * for DCB and RSS modes */ - - u16 rx_buf_len; int queue_index; /* needed for multiqueue queue management */ }; @@ -134,12 +134,10 @@ struct ixgbevf_ring { /* Supported Rx Buffer Sizes */ #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_2K 2048 -#define IXGBEVF_RXBUFFER_4K 4096 -#define IXGBEVF_RXBUFFER_8K 8192 -#define IXGBEVF_RXBUFFER_10K 10240 +#define IXGBEVF_RXBUFFER_2048 2048 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 +#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -307,6 +305,13 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG +/* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) { u16 ntc = ring->next_to_clean; @@ -339,8 +344,10 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) /* board specific private data structure */ struct ixgbevf_adapter { - struct timer_list watchdog_timer; + /* this field must be first, see ixgbevf_process_skb_fields */ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct timer_list watchdog_timer; struct work_struct reset_task; struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; @@ -363,7 +370,6 @@ struct ixgbevf_adapter { struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; - u64 non_eop_descs; int num_msix_vectors; u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; @@ -373,7 +379,7 @@ struct ixgbevf_adapter { */ u32 flags; #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1) + #define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) struct msix_entry *msix_entries; @@ -423,18 +429,17 @@ enum ixbgevf_state_t { __IXGBEVF_WORK_INIT, }; -struct ixgbevf_cb { - struct sk_buff *prev; -}; -#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb) - enum ixgbevf_boards { board_82599_vf, board_X540_vf, + board_X550_vf, + board_X550EM_x_vf, }; extern const struct ixgbevf_info ixgbevf_82599_vf_info; extern const struct ixgbevf_info ixgbevf_X540_vf_info; +extern const struct ixgbevf_info ixgbevf_X550_vf_info; +extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; /* needed by ethtool.c */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 030a219c85e3..62a0d8e0f17d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -66,6 +66,8 @@ static char ixgbevf_copyright[] = static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, [board_X540_vf] = &ixgbevf_X540_vf_info, + [board_X550_vf] = &ixgbevf_X550_vf_info, + [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -79,6 +81,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, /* required last entry */ {0, } }; @@ -143,21 +147,6 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) return value; } -static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring, - u32 val) -{ - rx_ring->next_to_use = val; - - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - ixgbevf_write_tail(rx_ring, val); -} - /** * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors * @adapter: pointer to adapter struct @@ -343,39 +332,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, } /** - * ixgbevf_receive_skb - Send a completed packet up the stack - * @q_vector: structure containing interrupt and ring information - * @skb: packet to send up - * @status: hardware indication of status of receive - * @rx_desc: rx descriptor - **/ -static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, - struct sk_buff *skb, u8 status, - union ixgbe_adv_rx_desc *rx_desc) -{ - struct ixgbevf_adapter *adapter = q_vector->adapter; - bool is_vlan = (status & IXGBE_RXD_STAT_VP); - u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - - if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); - - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - napi_gro_receive(&q_vector->napi, skb); - else - netif_rx(skb); -} - -/** * ixgbevf_rx_skb - Helper function to determine proper Rx method * @q_vector: structure containing interrupt and ring information * @skb: packet to send up - * @status: hardware indication of status of receive - * @rx_desc: rx descriptor **/ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, - struct sk_buff *skb, u8 status, - union ixgbe_adv_rx_desc *rx_desc) + struct sk_buff *skb) { #ifdef CONFIG_NET_RX_BUSY_POLL skb_mark_napi_id(skb, &q_vector->napi); @@ -387,17 +349,17 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, } #endif /* CONFIG_NET_RX_BUSY_POLL */ - ixgbevf_receive_skb(q_vector, skb, status, rx_desc); + napi_gro_receive(&q_vector->napi, skb); } -/** - * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum - * @ring: pointer to Rx descriptor ring structure - * @status_err: hardware indication of status of receive +/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containig ring specific data + * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified - **/ + */ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, - u32 status_err, struct sk_buff *skb) + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { skb_checksum_none_assert(skb); @@ -406,16 +368,16 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, return; /* if IP and error */ - if ((status_err & IXGBE_RXD_STAT_IPCS) && - (status_err & IXGBE_RXDADV_ERR_IPE)) { + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && + ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { ring->rx_stats.csum_err++; return; } - if (!(status_err & IXGBE_RXD_STAT_L4CS)) + if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) return; - if (status_err & IXGBE_RXDADV_ERR_TCPE) { + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { ring->rx_stats.csum_err++; return; } @@ -424,52 +386,408 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, skb->ip_summed = CHECKSUM_UNNECESSARY; } +/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the checksum, VLAN, protocol, and other fields within + * the skb. + */ +static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + ixgbevf_rx_checksum(rx_ring, rx_desc, skb); + + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + unsigned long *active_vlans = netdev_priv(rx_ring->netdev); + + if (test_bit(vid & VLAN_VID_MASK, active_vlans)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +/** + * ixgbevf_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); + + if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + + return true; +} + +static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma = bi->dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + /** * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on + * @cleaned_count: number of buffers to replace **/ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, - int cleaned_count) + u16 cleaned_count) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbevf_rx_buffer *bi; unsigned int i = rx_ring->next_to_use; - while (cleaned_count--) { - rx_desc = IXGBEVF_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_buffer_info[i]; + /* nothing to do or no valid netdev defined */ + if (!cleaned_count || !rx_ring->netdev) + return; - if (!bi->skb) { - struct sk_buff *skb; + rx_desc = IXGBEVF_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); - if (!skb) - goto no_buffers; + do { + if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) + break; - bi->skb = skb; + /* Refresh the desc even if pkt_addr didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); - bi->dma = dma_map_single(rx_ring->dev, skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->dma)) { - dev_kfree_skb(skb); - bi->skb = NULL; - dev_err(rx_ring->dev, "Rx DMA map failed\n"); - break; - } + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; } - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - i++; - if (i == rx_ring->count) - i = 0; + /* clear the hdr_addr for the next_to_use descriptor */ + rx_desc->read.hdr_addr = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + ixgbevf_write_tail(rx_ring, i); } +} + +/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbevf specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; -no_buffers: - rx_ring->rx_stats.alloc_rx_buff_failed++; - if (rx_ring->next_to_use != i) - ixgbevf_release_rx_desc(rx_ring, i); + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/* ixgbevf_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* verify that the packet does not have any known errors */ + if (unlikely(ixgbevf_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbevf_pull_tail(rx_ring, skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *old_buff) +{ + struct ixgbevf_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page = old_buff->page; + new_buff->dma = old_buff->dma; + new_buff->page_offset = old_buff->page_offset; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, + new_buff->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); +} + +static inline bool ixgbevf_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + +/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + */ +static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = IXGBEVF_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + + if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as is */ + if (likely(!ixgbevf_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ixgbevf_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; + +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) + return false; + +#endif + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + + return true; +} + +static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ixgbevf_rx_buffer *rx_buffer; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IXGBEVF_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbevf_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; } static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, @@ -484,78 +802,51 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *rx_ring, int budget) { - union ixgbe_adv_rx_desc *rx_desc, *next_rxd; - struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; - struct sk_buff *skb; - unsigned int i; - u32 len, staterr; - int cleaned_count = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = ixgbevf_desc_unused(rx_ring); + struct sk_buff *skb = rx_ring->skb; - i = rx_ring->next_to_clean; - rx_desc = IXGBEVF_RX_DESC(rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - rx_buffer_info = &rx_ring->rx_buffer_info[i]; + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; - while (staterr & IXGBE_RXD_STAT_DD) { - if (!budget) - break; - budget--; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { + ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } - rmb(); /* read descriptor and rx_buffer_info after status DD */ - len = le16_to_cpu(rx_desc->wb.upper.length); - skb = rx_buffer_info->skb; - prefetch(skb->data - NET_IP_ALIGN); - rx_buffer_info->skb = NULL; + rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); - if (rx_buffer_info->dma) { - dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - skb_put(skb, len); - } + if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) + break; - i++; - if (i == rx_ring->count) - i = 0; + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); - next_rxd = IXGBEVF_RX_DESC(rx_ring, i); - prefetch(next_rxd); - cleaned_count++; + /* retrieve a buffer from the ring */ + skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); - next_buffer = &rx_ring->rx_buffer_info[i]; + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; - if (!(staterr & IXGBE_RXD_STAT_EOP)) { - skb->next = next_buffer->skb; - IXGBE_CB(skb->next)->prev = skb; - rx_ring->rx_stats.non_eop_descs++; - goto next_desc; - } + cleaned_count++; - /* we should not be chaining buffers, if we did drop the skb */ - if (IXGBE_CB(skb)->prev) { - do { - struct sk_buff *this = skb; - skb = IXGBE_CB(skb)->prev; - dev_kfree_skb(this); - } while (skb); - goto next_desc; - } + /* fetch next buffer in frame if non-eop */ + if (ixgbevf_is_non_eop(rx_ring, rx_desc)) + continue; - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { - dev_kfree_skb_irq(skb); - goto next_desc; + /* verify the packet layout is correct */ + if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; } - ixgbevf_rx_checksum(rx_ring, staterr, skb); - /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - total_rx_packets++; - - skb->protocol = eth_type_trans(skb, rx_ring->netdev); /* Workaround hardware that can't do proper VEPA multicast * source pruning. @@ -565,32 +856,23 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ether_addr_equal(rx_ring->netdev->dev_addr, eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); - goto next_desc; + continue; } - ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); + /* populate checksum, VLAN, and protocol */ + ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); -next_desc: - rx_desc->wb.upper.status_error = 0; + ixgbevf_rx_skb(q_vector, skb); - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { - ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - rx_buffer_info = &rx_ring->rx_buffer_info[i]; + /* reset skb pointer */ + skb = NULL; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + /* update budget accounting */ + total_rx_packets++; } - rx_ring->next_to_clean = i; - cleaned_count = ixgbevf_desc_unused(rx_ring); - - if (cleaned_count) - ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -634,12 +916,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - adapter->flags |= IXGBE_FLAG_IN_NETPOLL; ixgbevf_for_each_ring(ring, q_vector->rx) clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget) < per_ring_budget); - adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; #ifdef CONFIG_NET_RX_BUSY_POLL ixgbevf_qv_unlock_napi(q_vector); @@ -1229,19 +1509,15 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) { - struct ixgbevf_ring *rx_ring; struct ixgbe_hw *hw = &adapter->hw; u32 srrctl; - rx_ring = adapter->rx_ring[index]; - srrctl = IXGBE_SRRCTL_DROP_EN; + srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } @@ -1260,40 +1536,6 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); } -static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i; - u16 rx_buf_len; - - /* notify the PF of our intent to use this size of frame */ - ixgbevf_rlpml_set_vf(hw, max_frame); - - /* PF will allow an extra 4 bytes past for vlan tagged frames */ - max_frame += VLAN_HLEN; - - /* - * Allocate buffer sizes that fit well into 32K and - * take into account max frame size of 9.5K - */ - if ((hw->mac.type == ixgbe_mac_X540_vf) && - (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else if (max_frame <= IXGBEVF_RXBUFFER_2K) - rx_buf_len = IXGBEVF_RXBUFFER_2K; - else if (max_frame <= IXGBEVF_RXBUFFER_4K) - rx_buf_len = IXGBEVF_RXBUFFER_4K; - else if (max_frame <= IXGBEVF_RXBUFFER_8K) - rx_buf_len = IXGBEVF_RXBUFFER_8K; - else - rx_buf_len = IXGBEVF_RXBUFFER_10K; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->rx_buf_len = rx_buf_len; -} - #define IXGBEVF_MAX_RX_DESC_POLL 10 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) @@ -1371,12 +1613,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, /* reset ntu and ntc to place SW in sync with hardwdare */ ring->next_to_clean = 0; ring->next_to_use = 0; + ring->next_to_alloc = 0; ixgbevf_configure_srrctl(adapter, reg_idx); - /* prevent DMA from exceeding buffer space available */ - rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; - rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; + /* allow any size packet since we can handle overflow */ + rxdctl &= ~IXGBE_RXDCTL_RLPML_EN; + rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); @@ -1393,11 +1636,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) { int i; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; ixgbevf_setup_psrtype(adapter); - /* set_rx_buffer_len must be called before ring initialization */ - ixgbevf_set_rx_buffer_len(adapter); + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ @@ -1702,32 +1947,32 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) **/ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) { + struct device *dev = rx_ring->dev; unsigned long size; unsigned int i; + /* Free Rx ring sk_buff */ + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* ring already cleared, nothing to do */ if (!rx_ring->rx_buffer_info) return; - /* Free all the Rx ring sk_buffs */ + /* Free all the Rx ring pages */ for (i = 0; i < rx_ring->count; i++) { - struct ixgbevf_rx_buffer *rx_buffer_info; + struct ixgbevf_rx_buffer *rx_buffer; - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - if (rx_buffer_info->dma) { - dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - } - if (rx_buffer_info->skb) { - struct sk_buff *skb = rx_buffer_info->skb; - rx_buffer_info->skb = NULL; - do { - struct sk_buff *this = skb; - skb = IXGBE_CB(skb)->prev; - dev_kfree_skb(this); - } while (skb); - } + rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->dma) + dma_unmap_page(dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (rx_buffer->page) + __free_page(rx_buffer->page); + rx_buffer->page = NULL; } size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; @@ -3274,6 +3519,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; @@ -3282,7 +3528,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; default: - if (adapter->hw.mac.type == ixgbe_mac_X540_vf) + if (adapter->hw.mac.type != ixgbe_mac_82599_vf) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; } @@ -3291,17 +3537,35 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) if ((new_mtu < 68) || (max_frame > max_possible_frame)) return -EINVAL; - hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", + hw_dbg(hw, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - if (netif_running(netdev)) - ixgbevf_reinit_locked(adapter); + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, max_frame); return 0; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbevf_netpoll(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return; + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -3438,6 +3702,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = { #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = ixgbevf_busy_poll_recv, #endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbevf_netpoll, +#endif }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) @@ -3465,6 +3732,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbe_hw *hw = NULL; const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; int err, pci_using_dac; + bool disable_dev = false; err = pci_enable_device(pdev); if (err) @@ -3499,7 +3767,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); - pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; @@ -3588,16 +3855,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_register; + pci_set_drvdata(pdev, netdev); netif_carrier_off(netdev); ixgbevf_init_last_counter_stats(adapter); - /* print the MAC address */ - hw_dbg(hw, "%pM\n", netdev->dev_addr); + /* print the VF info */ + dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); + dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); - hw_dbg(hw, "MAC: %d\n", hw->mac.type); + switch (hw->mac.type) { + case ixgbe_mac_X550_vf: + dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); + break; + case ixgbe_mac_X540_vf: + dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); + break; + case ixgbe_mac_82599_vf: + default: + dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); + break; + } - hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); return 0; err_register: @@ -3606,12 +3885,13 @@ err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->io_addr); err_ioremap: + disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + if (!adapter || disable_dev) pci_disable_device(pdev); return err; } @@ -3628,7 +3908,13 @@ err_dma: static void ixgbevf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_adapter *adapter; + bool disable_dev; + + if (!netdev) + return; + + adapter = netdev_priv(netdev); set_bit(__IXGBEVF_REMOVING, &adapter->state); @@ -3648,9 +3934,10 @@ static void ixgbevf_remove(struct pci_dev *pdev) hw_dbg(&adapter->hw, "Remove complete\n"); + disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + if (disable_dev) pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 9cddd56d02c3..cdb53be7d995 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -617,3 +617,13 @@ const struct ixgbevf_info ixgbevf_X540_vf_info = { .mac = ixgbe_mac_X540_vf, .mac_ops = &ixgbevf_mac_ops, }; + +const struct ixgbevf_info ixgbevf_X550_vf_info = { + .mac = ixgbe_mac_X550_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { + .mac = ixgbe_mac_X550EM_x_vf, + .mac_ops = &ixgbevf_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index aa8cc8dc25d1..5b172427f459 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -74,6 +74,8 @@ enum ixgbe_mac_type { ixgbe_mac_unknown = 0, ixgbe_mac_82599_vf, ixgbe_mac_X540_vf, + ixgbe_mac_X550_vf, + ixgbe_mac_X550EM_x_vf, ixgbe_num_macs }; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index b151a949f352..d44560d1d268 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) int tx_index; struct tx_desc *desc; u32 cmd_sts; - struct sk_buff *skb; tx_index = txq->tx_used_desc; desc = &txq->tx_desc_area[tx_index]; @@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) reclaimed++; txq->tx_desc_count--; - skb = NULL; - if (cmd_sts & TX_LAST_DESC) - skb = __skb_dequeue(&txq->tx_skb); + if (!IS_TSO_HEADER(txq, desc->buf_ptr)) + dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, + desc->byte_cnt, DMA_TO_DEVICE); + + if (cmd_sts & TX_ENABLE_INTERRUPT) { + struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); + + if (!WARN_ON(!skb)) + dev_kfree_skb(skb); + } if (cmd_sts & ERROR_SUMMARY) { netdev_info(mp->dev, "tx error\n"); mp->dev->stats.tx_errors++; } - if (!IS_TSO_HEADER(txq, desc->buf_ptr)) - dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, - desc->byte_cnt, DMA_TO_DEVICE); - dev_kfree_skb(skb); } __netif_tx_unlock_bh(nq); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index ade067de1689..ccc3ce2e8c8c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2558,11 +2558,10 @@ static void mvneta_adjust_link(struct net_device *ndev) MVNETA_GMAC_FORCE_LINK_DOWN); mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); mvneta_port_up(pp); - netdev_info(pp->dev, "link up\n"); } else { mvneta_port_down(pp); - netdev_info(pp->dev, "link down\n"); } + phy_print_status(phydev); } } diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index ece83f101526..fdf3e382e464 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, { struct mvpp2_prs_entry *pe; int tid_aux, tid; + int ret = 0; pe = mvpp2_prs_vlan_find(priv, tpid, ai); @@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, break; } - if (tid <= tid_aux) - return -EINVAL; + if (tid <= tid_aux) { + ret = -EINVAL; + goto error; + } memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); @@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, mvpp2_prs_hw_write(priv, pe); +error: kfree(pe); - return 0; + return ret; } /* Get first free double vlan ai number */ @@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, unsigned int port_map) { struct mvpp2_prs_entry *pe; - int tid_aux, tid, ai; + int tid_aux, tid, ai, ret = 0; pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); @@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, /* Set ai value for new double vlan entry */ ai = mvpp2_prs_double_vlan_ai_free_get(priv); - if (ai < 0) - return ai; + if (ai < 0) { + ret = ai; + goto error; + } /* Get first single/triple vlan tid */ for (tid_aux = MVPP2_PE_FIRST_FREE_TID; @@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, break; } - if (tid >= tid_aux) - return -ERANGE; + if (tid >= tid_aux) { + ret = -ERANGE; + goto error; + } memset(pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); @@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_tcam_port_map_set(pe, port_map); mvpp2_prs_hw_write(priv, pe); +error: kfree(pe); - return 0; + return ret; } /* IPv4 header parsing for fragmentation and L4 offset */ diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index c3b209cd0660..38f7ceee77d2 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -106,6 +106,7 @@ #define SDMA_CMD_ERD (1 << 7) /* Bit definitions of the Port Config Reg */ +#define PCR_DUPLEX_FULL (1 << 15) #define PCR_HS (1 << 12) #define PCR_EN (1 << 7) #define PCR_PM (1 << 0) @@ -113,11 +114,17 @@ /* Bit definitions of the Port Config Extend Reg */ #define PCXR_2BSM (1 << 28) #define PCXR_DSCP_EN (1 << 21) +#define PCXR_RMII_EN (1 << 20) +#define PCXR_AN_SPEED_DIS (1 << 19) +#define PCXR_SPEED_100 (1 << 18) #define PCXR_MFL_1518 (0 << 14) #define PCXR_MFL_1536 (1 << 14) #define PCXR_MFL_2048 (2 << 14) #define PCXR_MFL_64K (3 << 14) +#define PCXR_FLOWCTL_DIS (1 << 12) #define PCXR_FLP (1 << 11) +#define PCXR_AN_FLOWCTL_DIS (1 << 10) +#define PCXR_AN_DUPLEX_DIS (1 << 9) #define PCXR_PRIO_TX_OFF 3 #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) @@ -170,7 +177,6 @@ #define LINK_UP (1 << 3) /* Bit definitions for work to be done */ -#define WORK_LINK (1 << 0) #define WORK_TX_DONE (1 << 1) /* @@ -197,6 +203,9 @@ struct tx_desc { struct pxa168_eth_private { int port_num; /* User Ethernet port number */ int phy_addr; + int phy_speed; + int phy_duplex; + phy_interface_t phy_intf; int rx_resource_err; /* Rx ring resource error flag */ @@ -269,11 +278,11 @@ enum hash_table_entry { static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); static int pxa168_init_hw(struct pxa168_eth_private *pep); +static int pxa168_init_phy(struct net_device *dev); static void eth_port_reset(struct net_device *dev); static void eth_port_start(struct net_device *dev); static int pxa168_eth_open(struct net_device *dev); static int pxa168_eth_stop(struct net_device *dev); -static int ethernet_phy_setup(struct net_device *dev); static inline u32 rdl(struct pxa168_eth_private *pep, int offset) { @@ -305,26 +314,6 @@ static void abort_dma(struct pxa168_eth_private *pep) netdev_err(pep->dev, "%s : DMA Stuck\n", __func__); } -static int ethernet_phy_get(struct pxa168_eth_private *pep) -{ - unsigned int reg_data; - - reg_data = rdl(pep, PHY_ADDRESS); - - return (reg_data >> (5 * pep->port_num)) & 0x1f; -} - -static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr) -{ - u32 reg_data; - int addr_shift = 5 * pep->port_num; - - reg_data = rdl(pep, PHY_ADDRESS); - reg_data &= ~(0x1f << addr_shift); - reg_data |= (phy_addr & 0x1f) << addr_shift; - wrl(pep, PHY_ADDRESS, reg_data); -} - static void rxq_refill(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); @@ -655,14 +644,7 @@ static void eth_port_start(struct net_device *dev) struct pxa168_eth_private *pep = netdev_priv(dev); int tx_curr_desc, rx_curr_desc; - /* Perform PHY reset, if there is a PHY. */ - if (pep->phy != NULL) { - struct ethtool_cmd cmd; - - pxa168_get_settings(pep->dev, &cmd); - phy_init_hw(pep->phy); - pxa168_set_settings(pep->dev, &cmd); - } + phy_start(pep->phy); /* Assignment of Tx CTRP of given queue */ tx_curr_desc = pep->tx_curr_desc_q; @@ -717,6 +699,8 @@ static void eth_port_reset(struct net_device *dev) val = rdl(pep, PORT_CONFIG); val &= ~PCR_EN; wrl(pep, PORT_CONFIG, val); + + phy_stop(pep->phy); } /* @@ -884,43 +868,9 @@ static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, } if (icr & ICR_RXBUF) ret = 1; - if (icr & ICR_MII_CH) { - pep->work_todo |= WORK_LINK; - ret = 1; - } return ret; } -static void handle_link_event(struct pxa168_eth_private *pep) -{ - struct net_device *dev = pep->dev; - u32 port_status; - int speed; - int duplex; - int fc; - - port_status = rdl(pep, PORT_STATUS); - if (!(port_status & LINK_UP)) { - if (netif_carrier_ok(dev)) { - netdev_info(dev, "link down\n"); - netif_carrier_off(dev); - txq_reclaim(dev, 1); - } - return; - } - if (port_status & PORT_SPEED_100) - speed = 100; - else - speed = 10; - - duplex = (port_status & FULL_DUPLEX) ? 1 : 0; - fc = (port_status & FLOW_CONTROL_DISABLED) ? 0 : 1; - netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", - speed, duplex ? "full" : "half", fc ? "en" : "dis"); - if (!netif_carrier_ok(dev)) - netif_carrier_on(dev); -} - static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; @@ -978,8 +928,11 @@ static int set_port_config_ext(struct pxa168_eth_private *pep) skb_size = PCXR_MFL_64K; /* Extended Port Configuration */ - wrl(pep, - PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */ + wrl(pep, PORT_CONFIG_EXT, + PCXR_AN_SPEED_DIS | /* Disable HW AN */ + PCXR_AN_DUPLEX_DIS | + PCXR_AN_FLOWCTL_DIS | + PCXR_2BSM | /* Two byte prefix aligns IP hdr */ PCXR_DSCP_EN | /* Enable DSCP in IP */ skb_size | PCXR_FLP | /* do not force link pass */ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ @@ -987,6 +940,69 @@ static int set_port_config_ext(struct pxa168_eth_private *pep) return 0; } +static void pxa168_eth_adjust_link(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct phy_device *phy = pep->phy; + u32 cfg, cfg_o = rdl(pep, PORT_CONFIG); + u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT); + + cfg = cfg_o & ~PCR_DUPLEX_FULL; + cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN); + + if (phy->interface == PHY_INTERFACE_MODE_RMII) + cfgext |= PCXR_RMII_EN; + if (phy->speed == SPEED_100) + cfgext |= PCXR_SPEED_100; + if (phy->duplex) + cfg |= PCR_DUPLEX_FULL; + if (!phy->pause) + cfgext |= PCXR_FLOWCTL_DIS; + + /* Bail out if there has nothing changed */ + if (cfg == cfg_o && cfgext == cfgext_o) + return; + + wrl(pep, PORT_CONFIG, cfg); + wrl(pep, PORT_CONFIG_EXT, cfgext); + + phy_print_status(phy); +} + +static int pxa168_init_phy(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct ethtool_cmd cmd; + int err; + + if (pep->phy) + return 0; + + pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); + if (!pep->phy) + return -ENODEV; + + err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link, + pep->phy_intf); + if (err) + return err; + + err = pxa168_get_settings(dev, &cmd); + if (err) + return err; + + cmd.phy_address = pep->phy_addr; + cmd.speed = pep->phy_speed; + cmd.duplex = pep->phy_duplex; + cmd.advertising = PHY_BASIC_FEATURES; + cmd.autoneg = AUTONEG_ENABLE; + + if (cmd.speed != 0) + cmd.autoneg = AUTONEG_DISABLE; + + return pxa168_set_settings(dev, &cmd); +} + static int pxa168_init_hw(struct pxa168_eth_private *pep) { int err = 0; @@ -1133,6 +1149,10 @@ static int pxa168_eth_open(struct net_device *dev) struct pxa168_eth_private *pep = netdev_priv(dev); int err; + err = pxa168_init_phy(dev); + if (err) + return err; + err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev); if (err) { dev_err(&dev->dev, "can't assign irq\n"); @@ -1231,10 +1251,6 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) struct net_device *dev = pep->dev; int work_done = 0; - if (unlikely(pep->work_todo & WORK_LINK)) { - pep->work_todo &= ~(WORK_LINK); - handle_link_event(pep); - } /* * We call txq_reclaim every time since in NAPI interupts are disabled * and due to this we miss the TX_DONE interrupt,which is not updated in @@ -1357,77 +1373,6 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, return -EOPNOTSUPP; } -static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr) -{ - struct mii_bus *bus = pep->smi_bus; - struct phy_device *phydev; - int start; - int num; - int i; - - if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) { - /* Scan entire range */ - start = ethernet_phy_get(pep); - num = 32; - } else { - /* Use phy addr specific to platform */ - start = phy_addr & 0x1f; - num = 1; - } - phydev = NULL; - for (i = 0; i < num; i++) { - int addr = (start + i) & 0x1f; - if (bus->phy_map[addr] == NULL) - mdiobus_scan(bus, addr); - - if (phydev == NULL) { - phydev = bus->phy_map[addr]; - if (phydev != NULL) - ethernet_phy_set_addr(pep, addr); - } - } - - return phydev; -} - -static void phy_init(struct pxa168_eth_private *pep) -{ - struct phy_device *phy = pep->phy; - - phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII); - - if (pep->pd && pep->pd->speed != 0) { - phy->autoneg = AUTONEG_DISABLE; - phy->advertising = 0; - phy->speed = pep->pd->speed; - phy->duplex = pep->pd->duplex; - } else { - phy->autoneg = AUTONEG_ENABLE; - phy->speed = 0; - phy->duplex = 0; - phy->supported &= PHY_BASIC_FEATURES; - phy->advertising = phy->supported | ADVERTISED_Autoneg; - } - - phy_start_aneg(phy); -} - -static int ethernet_phy_setup(struct net_device *dev) -{ - struct pxa168_eth_private *pep = netdev_priv(dev); - - if (pep->pd && pep->pd->init) - pep->pd->init(); - - pep->phy = phy_scan(pep, pep->phy_addr & 0x1f); - if (pep->phy != NULL) - phy_init(pep); - - update_hash_table_mac_address(pep, NULL, dev->dev_addr); - - return 0; -} - static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct pxa168_eth_private *pep = netdev_priv(dev); @@ -1505,16 +1450,14 @@ static int pxa168_eth_probe(struct platform_device *pdev) pep = netdev_priv(dev); pep->dev = dev; pep->clk = clk; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - err = -ENODEV; - goto err_netdev; - } pep->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pep->base)) { err = -ENOMEM; goto err_netdev; } + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); BUG_ON(!res); dev->irq = res->start; @@ -1552,13 +1495,23 @@ static int pxa168_eth_probe(struct platform_device *pdev) pep->port_num = pep->pd->port_number; pep->phy_addr = pep->pd->phy_addr; + pep->phy_speed = pep->pd->speed; + pep->phy_duplex = pep->pd->duplex; + pep->phy_intf = pep->pd->intf; + + if (pep->pd->init) + pep->pd->init(); } else if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "port-id", &pep->port_num); np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - if (np) - of_property_read_u32(np, "reg", &pep->phy_addr); + if (!np) { + dev_err(&pdev->dev, "missing phy-handle\n"); + return -EINVAL; + } + of_property_read_u32(np, "reg", &pep->phy_addr); + pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); } /* Hardware supports only 3 ports */ @@ -1587,11 +1540,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) if (err) goto err_free_mdio; - pxa168_init_hw(pep); - err = ethernet_phy_setup(dev); - if (err) - goto err_mdiobus; SET_NETDEV_DEV(dev, &pdev->dev); + pxa168_init_hw(pep); err = register_netdev(dev); if (err) goto err_mdiobus; @@ -1621,13 +1571,13 @@ static int pxa168_eth_remove(struct platform_device *pdev) pep->htpr, pep->htpr_dma); pep->htpr = NULL; } + if (pep->phy) + phy_disconnect(pep->phy); if (pep->clk) { clk_disable(pep->clk); clk_put(pep->clk); pep->clk = NULL; } - if (pep->phy != NULL) - phy_detach(pep->phy); iounmap(pep->base); pep->base = NULL; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index bd3366267039..f8ab220bd72c 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1290,14 +1290,6 @@ static void rx_set_checksum(struct sky2_port *sky2) ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } -/* - * Fixed initial key as seed to RSS. - */ -static const uint32_t rss_init_key[10] = { - 0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43, - 0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30 -}; - /* Enable/disable receive hash calculation (RSS) */ static void rx_set_rss(struct net_device *dev, netdev_features_t features) { @@ -1313,9 +1305,12 @@ static void rx_set_rss(struct net_device *dev, netdev_features_t features) /* Program RSS initial values */ if (features & NETIF_F_RXHASH) { + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i < nkeys; i++) sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), - rss_init_key[i]); + rss_key[i]); /* Need to turn on (undocumented) flag to make hashing work */ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), @@ -1366,7 +1361,9 @@ static void sky2_rx_clean(struct sky2_port *sky2) { unsigned i; - memset(sky2->rx_le, 0, RX_LE_BYTES); + if (sky2->rx_le) + memset(sky2->rx_le, 0, RX_LE_BYTES); + for (i = 0; i < sky2->rx_pending; i++) { struct rx_ring_info *re = sky2->rx_ring + i; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index b16e1b95566f..5c93d1451c44 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -990,11 +990,11 @@ static struct mlx4_cmd_info cmd_info[] = { { .opcode = MLX4_CMD_CONFIG_DEV, .has_inbox = false, - .has_outbox = false, + .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = mlx4_CMD_EPERM_wrapper + .wrapper = mlx4_CONFIG_DEV_wrapper }, { .opcode = MLX4_CMD_ALLOC_RES, @@ -1338,6 +1338,15 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = mlx4_QUERY_IF_STAT_wrapper }, + { + .opcode = MLX4_CMD_ACCESS_REG, + .has_inbox = true, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ACCESS_REG_wrapper, + }, /* Native multicast commands are not available for guests */ { .opcode = MLX4_CMD_QP_ATTACH, @@ -2108,50 +2117,52 @@ err_vhcr: int mlx4_cmd_init(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); + int flags = 0; + + if (!priv->cmd.initialized) { + mutex_init(&priv->cmd.hcr_mutex); + mutex_init(&priv->cmd.slave_cmd_mutex); + sema_init(&priv->cmd.poll_sem, 1); + priv->cmd.use_events = 0; + priv->cmd.toggle = 1; + priv->cmd.initialized = 1; + flags |= MLX4_CMD_CLEANUP_STRUCT; + } - mutex_init(&priv->cmd.hcr_mutex); - mutex_init(&priv->cmd.slave_cmd_mutex); - sema_init(&priv->cmd.poll_sem, 1); - priv->cmd.use_events = 0; - priv->cmd.toggle = 1; - - priv->cmd.hcr = NULL; - priv->mfunc.vhcr = NULL; - - if (!mlx4_is_slave(dev)) { + if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE); if (!priv->cmd.hcr) { mlx4_err(dev, "Couldn't map command register\n"); - return -ENOMEM; + goto err; } + flags |= MLX4_CMD_CLEANUP_HCR; } - if (mlx4_is_mfunc(dev)) { + if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, &priv->mfunc.vhcr_dma, GFP_KERNEL); if (!priv->mfunc.vhcr) - goto err_hcr; + goto err; + + flags |= MLX4_CMD_CLEANUP_VHCR; } - priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, - MLX4_MAILBOX_SIZE, - MLX4_MAILBOX_SIZE, 0); - if (!priv->cmd.pool) - goto err_vhcr; + if (!priv->cmd.pool) { + priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, + MLX4_MAILBOX_SIZE, + MLX4_MAILBOX_SIZE, 0); + if (!priv->cmd.pool) + goto err; - return 0; + flags |= MLX4_CMD_CLEANUP_POOL; + } -err_vhcr: - if (mlx4_is_mfunc(dev)) - dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, - priv->mfunc.vhcr, priv->mfunc.vhcr_dma); - priv->mfunc.vhcr = NULL; + return 0; -err_hcr: - if (!mlx4_is_slave(dev)) - iounmap(priv->cmd.hcr); +err: + mlx4_cmd_cleanup(dev, flags); return -ENOMEM; } @@ -2175,18 +2186,28 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev) iounmap(priv->mfunc.comm); } -void mlx4_cmd_cleanup(struct mlx4_dev *dev) +void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) { struct mlx4_priv *priv = mlx4_priv(dev); - pci_pool_destroy(priv->cmd.pool); + if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) { + pci_pool_destroy(priv->cmd.pool); + priv->cmd.pool = NULL; + } - if (!mlx4_is_slave(dev)) + if (!mlx4_is_slave(dev) && priv->cmd.hcr && + (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) { iounmap(priv->cmd.hcr); - if (mlx4_is_mfunc(dev)) + priv->cmd.hcr = NULL; + } + if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && + (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, priv->mfunc.vhcr, priv->mfunc.vhcr_dma); - priv->mfunc.vhcr = NULL; + priv->mfunc.vhcr = NULL; + } + if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT)) + priv->cmd.initialized = 0; } /* diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 57dda95b67d8..999014413b1a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -35,52 +35,6 @@ #include "mlx4_en.h" -int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; - int port_up = 0; - int err = 0; - - if (priv->hwtstamp_config.tx_type == tx_type && - priv->hwtstamp_config.rx_filter == rx_filter) - return 0; - - mutex_lock(&mdev->state_lock); - if (priv->port_up) { - port_up = 1; - mlx4_en_stop_port(dev, 1); - } - - mlx4_en_free_resources(priv); - - en_warn(priv, "Changing Time Stamp configuration\n"); - - priv->hwtstamp_config.tx_type = tx_type; - priv->hwtstamp_config.rx_filter = rx_filter; - - if (rx_filter != HWTSTAMP_FILTER_NONE) - dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; - else - dev->features |= NETIF_F_HW_VLAN_CTAG_RX; - - err = mlx4_en_alloc_resources(priv); - if (err) { - en_err(priv, "Failed reallocating port resources\n"); - goto out; - } - if (port_up) { - err = mlx4_en_start_port(dev); - if (err) - en_err(priv, "Failed starting port\n"); - } - -out: - mutex_unlock(&mdev->state_lock); - netdev_features_change(dev); - return err; -} - /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) */ static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ae83da9cd18a..90e0f045a6bc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -35,6 +35,7 @@ #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/mlx4/driver.h> +#include <linux/mlx4/device.h> #include <linux/in.h> #include <net/ip.h> @@ -114,7 +115,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = { "tso_packets", "xmit_more", "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", - "rx_csum_good", "rx_csum_none", "tx_chksum_offload", + "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload", /* packet statistics */ "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3", @@ -374,7 +375,302 @@ static void mlx4_en_get_strings(struct net_device *dev, } } -static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static u32 mlx4_en_autoneg_get(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + u32 autoneg = AUTONEG_DISABLE; + + if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) && + (priv->port_state.flags & MLX4_EN_PORT_ANE)) + autoneg = AUTONEG_ENABLE; + + return autoneg; +} + +static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg) +{ + u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) + | MLX4_PROT_MASK(MLX4_1000BASE_T) + | MLX4_PROT_MASK(MLX4_100BASE_TX))) { + return SUPPORTED_TP; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + | MLX4_PROT_MASK(MLX4_10GBASE_SR) + | MLX4_PROT_MASK(MLX4_56GBASE_SR4) + | MLX4_PROT_MASK(MLX4_40GBASE_CR4) + | MLX4_PROT_MASK(MLX4_40GBASE_SR4) + | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { + return SUPPORTED_FIBRE; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + | MLX4_PROT_MASK(MLX4_40GBASE_KR4) + | MLX4_PROT_MASK(MLX4_20GBASE_KR2) + | MLX4_PROT_MASK(MLX4_10GBASE_KR) + | MLX4_PROT_MASK(MLX4_10GBASE_KX4) + | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { + return SUPPORTED_Backplane; + } + return 0; +} + +static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) +{ + u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper); + + if (!eth_proto) /* link down */ + eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) + | MLX4_PROT_MASK(MLX4_1000BASE_T) + | MLX4_PROT_MASK(MLX4_100BASE_TX))) { + return PORT_TP; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR) + | MLX4_PROT_MASK(MLX4_56GBASE_SR4) + | MLX4_PROT_MASK(MLX4_40GBASE_SR4) + | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { + return PORT_FIBRE; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + | MLX4_PROT_MASK(MLX4_56GBASE_CR4) + | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) { + return PORT_DA; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + | MLX4_PROT_MASK(MLX4_40GBASE_KR4) + | MLX4_PROT_MASK(MLX4_20GBASE_KR2) + | MLX4_PROT_MASK(MLX4_10GBASE_KR) + | MLX4_PROT_MASK(MLX4_10GBASE_KX4) + | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { + return PORT_NONE; + } + return PORT_OTHER; +} + +#define MLX4_LINK_MODES_SZ \ + (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8) + +enum ethtool_report { + SUPPORTED = 0, + ADVERTISED = 1, + SPEED = 2 +}; + +/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */ +static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = { + [MLX4_100BASE_TX] = { + SUPPORTED_100baseT_Full, + ADVERTISED_100baseT_Full, + SPEED_100 + }, + + [MLX4_1000BASE_T] = { + SUPPORTED_1000baseT_Full, + ADVERTISED_1000baseT_Full, + SPEED_1000 + }, + [MLX4_1000BASE_CX_SGMII] = { + SUPPORTED_1000baseKX_Full, + ADVERTISED_1000baseKX_Full, + SPEED_1000 + }, + [MLX4_1000BASE_KX] = { + SUPPORTED_1000baseKX_Full, + ADVERTISED_1000baseKX_Full, + SPEED_1000 + }, + + [MLX4_10GBASE_T] = { + SUPPORTED_10000baseT_Full, + ADVERTISED_10000baseT_Full, + SPEED_10000 + }, + [MLX4_10GBASE_CX4] = { + SUPPORTED_10000baseKX4_Full, + ADVERTISED_10000baseKX4_Full, + SPEED_10000 + }, + [MLX4_10GBASE_KX4] = { + SUPPORTED_10000baseKX4_Full, + ADVERTISED_10000baseKX4_Full, + SPEED_10000 + }, + [MLX4_10GBASE_KR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + [MLX4_10GBASE_CR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + [MLX4_10GBASE_SR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + + [MLX4_20GBASE_KR2] = { + SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full, + ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full, + SPEED_20000 + }, + + [MLX4_40GBASE_CR4] = { + SUPPORTED_40000baseCR4_Full, + ADVERTISED_40000baseCR4_Full, + SPEED_40000 + }, + [MLX4_40GBASE_KR4] = { + SUPPORTED_40000baseKR4_Full, + ADVERTISED_40000baseKR4_Full, + SPEED_40000 + }, + [MLX4_40GBASE_SR4] = { + SUPPORTED_40000baseSR4_Full, + ADVERTISED_40000baseSR4_Full, + SPEED_40000 + }, + + [MLX4_56GBASE_KR4] = { + SUPPORTED_56000baseKR4_Full, + ADVERTISED_56000baseKR4_Full, + SPEED_56000 + }, + [MLX4_56GBASE_CR4] = { + SUPPORTED_56000baseCR4_Full, + ADVERTISED_56000baseCR4_Full, + SPEED_56000 + }, + [MLX4_56GBASE_SR4] = { + SUPPORTED_56000baseSR4_Full, + ADVERTISED_56000baseSR4_Full, + SPEED_56000 + }, +}; + +static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report) +{ + int i; + u32 link_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (eth_proto & MLX4_PROT_MASK(i)) + link_modes |= ptys2ethtool_map[i][report]; + } + return link_modes; +} + +static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report) +{ + int i; + u32 ptys_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (ptys2ethtool_map[i][report] & link_modes) + ptys_modes |= 1 << i; + } + return ptys_modes; +} + +/* Convert actual speed (SPEED_XXX) to ptys link modes */ +static u32 speed2ptys_link_modes(u32 speed) +{ + int i; + u32 ptys_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (ptys2ethtool_map[i][SPEED] == speed) + ptys_modes |= 1 << i; + } + return ptys_modes; +} + +static int ethtool_get_ptys_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_ptys_reg ptys_reg; + u32 eth_proto; + int ret; + + memset(&ptys_reg, 0, sizeof(ptys_reg)); + ptys_reg.local_port = priv->port; + ptys_reg.proto_mask = MLX4_PTYS_EN; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, + MLX4_ACCESS_REG_QUERY, &ptys_reg); + if (ret) { + en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)", + ret); + return ret; + } + en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n", + ptys_reg.proto_mask); + en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n", + be32_to_cpu(ptys_reg.eth_proto_cap)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n", + be32_to_cpu(ptys_reg.eth_proto_admin)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n", + be32_to_cpu(ptys_reg.eth_proto_oper)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n", + be32_to_cpu(ptys_reg.eth_proto_lp_adv)); + + cmd->supported = 0; + cmd->advertising = 0; + + cmd->supported |= ptys_get_supported_port(&ptys_reg); + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap); + cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED); + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin); + cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED); + + cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0; + + cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ? + ADVERTISED_Asym_Pause : 0; + + cmd->port = ptys_get_active_port(&ptys_reg); + cmd->transceiver = (SUPPORTED_TP & cmd->supported) ? + XCVR_EXTERNAL : XCVR_INTERNAL; + + if (mlx4_en_autoneg_get(dev)) { + cmd->supported |= SUPPORTED_Autoneg; + cmd->advertising |= ADVERTISED_Autoneg; + } + + cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv); + cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED); + + cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + ADVERTISED_Autoneg : 0; + + cmd->phy_address = 0; + cmd->mdio_support = 0; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + + return ret; +} + +static void ethtool_get_default_settings(struct net_device *dev, + struct ethtool_cmd *cmd) { struct mlx4_en_priv *priv = netdev_priv(dev); int trans_type; @@ -382,18 +678,7 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->autoneg = AUTONEG_DISABLE; cmd->supported = SUPPORTED_10000baseT_Full; cmd->advertising = ADVERTISED_10000baseT_Full; - - if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) - return -ENOMEM; - - trans_type = priv->port_state.transciver; - if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); - cmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); - cmd->duplex = DUPLEX_UNKNOWN; - } + trans_type = priv->port_state.transceiver; if (trans_type > 0 && trans_type <= 0xC) { cmd->port = PORT_FIBRE; @@ -409,17 +694,118 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->port = -1; cmd->transceiver = -1; } +} + +static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int ret = -EINVAL; + + if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) + return -ENOMEM; + + en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n", + priv->port_state.flags & MLX4_EN_PORT_ANC, + priv->port_state.flags & MLX4_EN_PORT_ANE); + + if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) + ret = ethtool_get_ptys_settings(dev, cmd); + if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */ + ethtool_get_default_settings(dev, cmd); + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); + cmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + } return 0; } +/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */ +static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed, + __be32 proto_cap) +{ + __be32 proto_admin = 0; + + if (!speed) { /* Speed = 0 ==> Reset Link modes */ + proto_admin = proto_cap; + en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n", + be32_to_cpu(proto_cap)); + } else { + u32 ptys_link_modes = speed2ptys_link_modes(speed); + + proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap; + en_info(priv, "Setting Speed to %d\n", speed); + } + return proto_admin; +} + static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - if ((cmd->autoneg == AUTONEG_ENABLE) || - (ethtool_cmd_speed(cmd) != SPEED_10000) || - (cmd->duplex != DUPLEX_FULL)) + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_ptys_reg ptys_reg; + __be32 proto_admin; + int ret; + + u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED); + int speed = ethtool_cmd_speed(cmd); + + en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n", + speed, cmd->advertising, cmd->autoneg, cmd->duplex); + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || + (cmd->duplex == DUPLEX_HALF)) return -EINVAL; - /* Nothing to change */ + memset(&ptys_reg, 0, sizeof(ptys_reg)); + ptys_reg.local_port = priv->port; + ptys_reg.proto_mask = MLX4_PTYS_EN; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, + MLX4_ACCESS_REG_QUERY, &ptys_reg); + if (ret) { + en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n", + ret); + return 0; + } + + proto_admin = cpu_to_be32(ptys_adv); + if (speed >= 0 && speed != priv->port_state.link_speed) + /* If speed was set then speed decides :-) */ + proto_admin = speed_set_ptys_admin(priv, speed, + ptys_reg.eth_proto_cap); + + proto_admin &= ptys_reg.eth_proto_cap; + + if (proto_admin == ptys_reg.eth_proto_admin) + return 0; /* Nothing to change */ + + if (!proto_admin) { + en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); + return -EINVAL; /* nothing to change due to bad input */ + } + + en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", + be32_to_cpu(proto_admin)); + + ptys_reg.eth_proto_admin = proto_admin; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE, + &ptys_reg); + if (ret) { + en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)", + be32_to_cpu(ptys_reg.eth_proto_admin), ret); + return ret; + } + + en_warn(priv, "Port link mode changed, restarting port...\n"); + mutex_lock(&priv->mdev->state_lock); + if (priv->port_up) { + mlx4_en_stop_port(dev, 1); + if (mlx4_en_start_port(dev)) + en_err(priv, "Failed restarting port %d\n", priv->port); + } + mutex_unlock(&priv->mdev->state_lock); return 0; } @@ -587,7 +973,34 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) return priv->rx_ring_num; } -static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key) +static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev) +{ + return MLX4_EN_RSS_KEY_SIZE; +} + +static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + /* check if requested function is supported by the device */ + if ((hfunc == ETH_RSS_HASH_TOP && + !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) || + (hfunc == ETH_RSS_HASH_XOR && + !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))) + return -EINVAL; + + priv->rss_hash_fn = hfunc; + if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH)) + en_warn(priv, + "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); + if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH)) + en_warn(priv, + "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); + return 0; +} + +static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, + u8 *hfunc) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_rss_map *rss_map = &priv->rss_map; @@ -596,17 +1009,23 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key) int err = 0; rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; + rss_rings = 1 << ilog2(rss_rings); while (n--) { + if (!ring_index) + break; ring_index[n] = rss_map->qps[n % rss_rings].qpn - rss_map->base_qpn; } - + if (key) + memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); + if (hfunc) + *hfunc = priv->rss_hash_fn; return err; } static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, - const u8 *key) + const u8 *key, const u8 hfunc) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@ -619,6 +1038,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, * between rings */ for (i = 0; i < priv->rx_ring_num; i++) { + if (!ring_index) + continue; if (i > 0 && !ring_index[i] && !rss_rings) rss_rings = i; @@ -633,13 +1054,22 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, if (!is_power_of_2(rss_rings)) return -EINVAL; + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + err = mlx4_en_check_rxfh_func(dev, hfunc); + if (err) + return err; + } + mutex_lock(&mdev->state_lock); if (priv->port_up) { port_up = 1; mlx4_en_stop_port(dev, 1); } - priv->prof->rss_rings = rss_rings; + if (ring_index) + priv->prof->rss_rings = rss_rings; + if (key) + memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); if (port_up) { err = mlx4_en_start_port(dev); @@ -1309,6 +1739,86 @@ static int mlx4_en_set_tunable(struct net_device *dev, return ret; } +static int mlx4_en_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int ret; + u8 data[4]; + + /* Read first 2 bytes to get Module & REV ID */ + ret = mlx4_get_module_info(mdev->dev, priv->port, + 0/*offset*/, 2/*size*/, data); + if (ret < 2) + return -EIO; + + switch (data[0] /* identifier */) { + case MLX4_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case MLX4_MODULE_ID_QSFP_PLUS: + if (data[1] >= 0x3) { /* revision id */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + } + break; + case MLX4_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + case MLX4_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + return -ENOSYS; + } + + return 0; +} + +static int mlx4_en_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int offset = ee->offset; + int i = 0, ret; + + if (ee->len == 0) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + en_dbg(DRV, priv, + "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", + i, offset, ee->len - i); + + ret = mlx4_get_module_info(mdev->dev, priv->port, + offset, ee->len - i, data + i); + + if (!ret) /* Done reading */ + return 0; + + if (ret < 0) { + en_err(priv, + "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", + i, offset, ee->len - i, ret); + return 0; + } + + i += ret; + offset += ret; + } + return 0; +} const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, @@ -1332,6 +1842,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .get_rxnfc = mlx4_en_get_rxnfc, .set_rxnfc = mlx4_en_set_rxnfc, .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, + .get_rxfh_key_size = mlx4_en_get_rxfh_key_size, .get_rxfh = mlx4_en_get_rxfh, .set_rxfh = mlx4_en_set_rxfh, .get_channels = mlx4_en_get_channels, @@ -1341,6 +1852,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .get_priv_flags = mlx4_en_get_priv_flags, .get_tunable = mlx4_en_get_tunable, .set_tunable = mlx4_en_set_tunable, + .get_module_info = mlx4_en_get_module_info, + .get_module_eeprom = mlx4_en_get_module_eeprom }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 2091ae88615d..9f16f754137b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -221,15 +221,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev) { struct mlx4_en_dev *mdev; int i; - int err; printk_once(KERN_INFO "%s", mlx4_en_version); mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); - if (!mdev) { - err = -ENOMEM; + if (!mdev) goto err_free_res; - } if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) goto err_free_dev; @@ -264,8 +261,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) } /* Build device profile according to supplied module parameters */ - err = mlx4_en_get_profile(mdev); - if (err) { + if (mlx4_en_get_profile(mdev)) { mlx4_err(mdev, "Bad module parameters, aborting\n"); goto err_mr; } @@ -286,10 +282,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) * Note: we cannot use the shared workqueue because of deadlocks caused * by the rtnl lock */ mdev->workqueue = create_singlethread_workqueue("mlx4_en"); - if (!mdev->workqueue) { - err = -ENOMEM; + if (!mdev->workqueue) goto err_mr; - } /* At this stage all non-port specific tasks are complete: * mark the card state as up */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f3032fec8fce..dccf0e1f86be 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -575,7 +575,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv) struct mlx4_mac_entry *entry; int index = 0; int err = 0; - u64 reg_id; + u64 reg_id = 0; int *qpn = &priv->base_qpn; u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); @@ -1693,7 +1693,7 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); #ifdef CONFIG_MLX4_EN_VXLAN - if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) + if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) vxlan_get_rx_port(dev); #endif priv->port_up = true; @@ -1843,8 +1843,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) } local_bh_enable(); - while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) - msleep(1); + napi_synchronize(&cq->napi); mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); mlx4_en_deactivate_cq(priv, cq); @@ -1894,6 +1893,7 @@ static void mlx4_en_clear_stats(struct net_device *dev) priv->rx_ring[i]->packets = 0; priv->rx_ring[i]->csum_ok = 0; priv->rx_ring[i]->csum_none = 0; + priv->rx_ring[i]->csum_complete = 0; } } @@ -2157,7 +2157,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } - if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) { + if (mlx4_en_reset_config(dev, config, dev->features)) { config.tx_type = HWTSTAMP_TX_OFF; config.rx_filter = HWTSTAMP_FILTER_NONE; } @@ -2190,6 +2190,16 @@ static int mlx4_en_set_features(struct net_device *netdev, netdev_features_t features) { struct mlx4_en_priv *priv = netdev_priv(netdev); + int ret = 0; + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { + en_info(priv, "Turn %s RX vlan strip offload\n", + (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); + ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, + features); + if (ret) + return ret; + } if (features & NETIF_F_LOOPBACK) priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); @@ -2249,7 +2259,7 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st #define PORT_ID_BYTE_LEN 8 static int mlx4_en_get_phys_port_id(struct net_device *dev, - struct netdev_phys_port_id *ppid) + struct netdev_phys_item_id *ppid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_dev *mdev = priv->mdev->dev; @@ -2281,8 +2291,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work) ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); out: - if (ret) + if (ret) { en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); + return; + } + + /* set offloads */ + priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; + priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; } static void mlx4_en_del_vxlan_offloads(struct work_struct *work) @@ -2290,6 +2308,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) int ret; struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, vxlan_del_task); + /* unset offloads */ + priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); + priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; + priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 0); @@ -2342,6 +2365,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev, queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); } + +static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops mlx4_netdev_ops = { @@ -2373,6 +2401,7 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_MLX4_EN_VXLAN .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_gso_check = mlx4_en_gso_check, #endif }; @@ -2403,6 +2432,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_gso_check = mlx4_en_gso_check, +#endif }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@ -2431,6 +2465,21 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv = netdev_priv(dev); memset(priv, 0, sizeof(struct mlx4_en_priv)); + spin_lock_init(&priv->stats_lock); + INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); + INIT_WORK(&priv->watchdog_task, mlx4_en_restart); + INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); + INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); + INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); +#ifdef CONFIG_MLX4_EN_VXLAN + INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); + INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); +#endif +#ifdef CONFIG_RFS_ACCEL + INIT_LIST_HEAD(&priv->filters); + spin_lock_init(&priv->filters_lock); +#endif + priv->dev = dev; priv->mdev = mdev; priv->ddev = &mdev->pdev->dev; @@ -2444,6 +2493,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; priv->tx_ring_num = prof->tx_ring_num; priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; + netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, GFP_KERNEL); @@ -2462,16 +2512,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->cqe_size = mdev->dev->caps.cqe_size; priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; - spin_lock_init(&priv->stats_lock); - INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); - INIT_WORK(&priv->watchdog_task, mlx4_en_restart); - INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); - INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); - INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); -#ifdef CONFIG_MLX4_EN_VXLAN - INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); - INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); -#endif #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { @@ -2489,6 +2529,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, /* Query for default mac and max mtu */ priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; + if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & + MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP) + priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; + /* Set default MAC */ dev->addr_len = ETH_ALEN; mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); @@ -2514,11 +2558,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (err) goto out; -#ifdef CONFIG_RFS_ACCEL - INIT_LIST_HEAD(&priv->filters); - spin_lock_init(&priv->filters_lock); -#endif - /* Initialize time stamping config */ priv->hwtstamp_config.flags = 0; priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; @@ -2559,7 +2598,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->features = dev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - dev->hw_features |= NETIF_F_LOOPBACK; + dev->hw_features |= NETIF_F_LOOPBACK | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) @@ -2568,11 +2608,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) dev->priv_flags |= IFF_UNICAST_FLT; - if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { - dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; - dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - dev->features |= NETIF_F_GSO_UDP_TUNNEL; + /* Setting a default hash function value */ + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { + priv->rss_hash_fn = ETH_RSS_HASH_TOP; + } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { + priv->rss_hash_fn = ETH_RSS_HASH_XOR; + } else { + en_warn(priv, + "No RSS hash capabilities exposed, using Toeplitz\n"); + priv->rss_hash_fn = ETH_RSS_HASH_TOP; } mdev->pndev[port] = dev; @@ -2633,3 +2677,79 @@ out: return err; } +int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + + if (priv->hwtstamp_config.tx_type == ts_config.tx_type && + priv->hwtstamp_config.rx_filter == ts_config.rx_filter && + !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) + return 0; /* Nothing to change */ + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && + (features & NETIF_F_HW_VLAN_CTAG_RX) && + (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { + en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); + return -EINVAL; + } + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + mlx4_en_free_resources(priv); + + en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", + ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + priv->hwtstamp_config.tx_type = ts_config.tx_type; + priv->hwtstamp_config.rx_filter = ts_config.rx_filter; + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { + /* RX time-stamping is OFF, update the RX vlan offload + * to the latest wanted state + */ + if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + /* RX vlan offload and RX time-stamping can't co-exist ! + * Regardless of the caller's choice, + * Turn Off RX vlan offload in case of time-stamping is ON + */ + if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + err = mlx4_en_alloc_resources(priv); + if (err) { + en_err(priv, "Failed reallocating port resources\n"); + goto out; + } + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + +out: + mutex_unlock(&mdev->state_lock); + netdev_features_change(dev); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 0a0261d128b9..6cb80072af6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -91,21 +91,37 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) * already synchronized, no need in locking */ state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK); switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) { + case MLX4_EN_100M_SPEED: + state->link_speed = SPEED_100; + break; case MLX4_EN_1G_SPEED: - state->link_speed = 1000; + state->link_speed = SPEED_1000; break; case MLX4_EN_10G_SPEED_XAUI: case MLX4_EN_10G_SPEED_XFI: - state->link_speed = 10000; + state->link_speed = SPEED_10000; + break; + case MLX4_EN_20G_SPEED: + state->link_speed = SPEED_20000; break; case MLX4_EN_40G_SPEED: - state->link_speed = 40000; + state->link_speed = SPEED_40000; + break; + case MLX4_EN_56G_SPEED: + state->link_speed = SPEED_56000; break; default: state->link_speed = -1; break; } - state->transciver = qport_context->transceiver; + + state->transceiver = qport_context->transceiver; + + state->flags = 0; /* Reset and recalculate the port flags */ + state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ? + MLX4_EN_PORT_ANC : 0; + state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ? + MLX4_EN_PORT_ANE : 0; out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); @@ -139,11 +155,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->rx_bytes = 0; priv->port_stats.rx_chksum_good = 0; priv->port_stats.rx_chksum_none = 0; + priv->port_stats.rx_chksum_complete = 0; for (i = 0; i < priv->rx_ring_num; i++) { stats->rx_packets += priv->rx_ring[i]->packets; stats->rx_bytes += priv->rx_ring[i]->bytes; priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; + priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; } stats->tx_packets = 0; stats->tx_bytes = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h index 745090b49d9e..040da4b16b1c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -53,22 +53,49 @@ enum { MLX4_MCAST_ENABLE = 2, }; +enum mlx4_link_mode { + MLX4_1000BASE_CX_SGMII = 0, + MLX4_1000BASE_KX = 1, + MLX4_10GBASE_CX4 = 2, + MLX4_10GBASE_KX4 = 3, + MLX4_10GBASE_KR = 4, + MLX4_20GBASE_KR2 = 5, + MLX4_40GBASE_CR4 = 6, + MLX4_40GBASE_KR4 = 7, + MLX4_56GBASE_KR4 = 8, + MLX4_10GBASE_CR = 12, + MLX4_10GBASE_SR = 13, + MLX4_40GBASE_SR4 = 15, + MLX4_56GBASE_CR4 = 17, + MLX4_56GBASE_SR4 = 18, + MLX4_100BASE_TX = 24, + MLX4_1000BASE_T = 25, + MLX4_10GBASE_T = 26, +}; + +#define MLX4_PROT_MASK(link_mode) (1<<link_mode) + enum { - MLX4_EN_1G_SPEED = 0x02, - MLX4_EN_10G_SPEED_XFI = 0x01, + MLX4_EN_100M_SPEED = 0x04, MLX4_EN_10G_SPEED_XAUI = 0x00, + MLX4_EN_10G_SPEED_XFI = 0x01, + MLX4_EN_1G_SPEED = 0x02, + MLX4_EN_20G_SPEED = 0x08, MLX4_EN_40G_SPEED = 0x40, + MLX4_EN_56G_SPEED = 0x20, MLX4_EN_OTHER_SPEED = 0x0f, }; struct mlx4_en_query_port_context { u8 link_up; #define MLX4_EN_LINK_UP_MASK 0x80 - u8 reserved; +#define MLX4_EN_ANC_MASK 0x40 + u8 autoneg; +#define MLX4_EN_AUTONEG_MASK 0x80 __be16 mtu; u8 reserved2; u8 link_speed; -#define MLX4_EN_SPEED_MASK 0x43 +#define MLX4_EN_SPEED_MASK 0x6f u16 reserved3[5]; __be64 mac; u8 transceiver; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 01660c595f5c..4ca396e3168f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -42,6 +42,10 @@ #include <linux/vmalloc.h> #include <linux/irq.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_checksum.h> +#endif + #include "mlx4_en.h" static int mlx4_alloc_pages(struct mlx4_en_priv *priv, @@ -74,7 +78,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, page_alloc->page_size = PAGE_SIZE << order; page_alloc->page = page; page_alloc->dma = dma; - page_alloc->page_offset = frag_info->frag_align; + page_alloc->page_offset = 0; /* Not doing get_page() for each frag is a big win * on asymetric workloads. Note we can not use atomic_set(). */ @@ -119,7 +123,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, out: while (i--) { - frag_info = &priv->frag_info[i]; if (page_alloc[i].page != ring_alloc[i].page) { dma_unmap_page(priv->ddev, page_alloc[i].dma, page_alloc[i].page_size, PCI_DMA_FROMDEVICE); @@ -157,7 +160,7 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; if (mlx4_alloc_pages(priv, &ring->page_alloc[i], - frag_info, GFP_KERNEL)) + frag_info, GFP_KERNEL | __GFP_COLD)) goto out; } return 0; @@ -269,7 +272,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size, - GFP_KERNEL)) { + GFP_KERNEL | __GFP_COLD)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { en_err(priv, "Failed to allocate enough rx buffers\n"); return -ENOMEM; @@ -636,13 +639,94 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, int index = ring->prod & ring->size_mask; while ((u32) (ring->prod - ring->cons) < ring->actual_size) { - if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC)) + if (mlx4_en_prepare_rx_desc(priv, ring, index, + GFP_ATOMIC | __GFP_COLD)) break; ring->prod++; index = ring->prod & ring->size_mask; } } +/* When hardware doesn't strip the vlan, we need to calculate the checksum + * over it and add it to the hardware's checksum calculation + */ +static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, + struct vlan_hdr *vlanh) +{ + return csum_add(hw_checksum, *(__wsum *)vlanh); +} + +/* Although the stack expects checksum which doesn't include the pseudo + * header, the HW adds it. To address that, we are subtracting the pseudo + * header checksum from the checksum value provided by the HW. + */ +static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, + struct iphdr *iph) +{ + __u16 length_for_csum = 0; + __wsum csum_pseudo_header = 0; + + length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); + csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, + length_for_csum, iph->protocol, 0); + skb->csum = csum_sub(hw_checksum, csum_pseudo_header); +} + +#if IS_ENABLED(CONFIG_IPV6) +/* In IPv6 packets, besides subtracting the pseudo header checksum, + * we also compute/add the IP header checksum which + * is not added by the HW. + */ +static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, + struct ipv6hdr *ipv6h) +{ + __wsum csum_pseudo_hdr = 0; + + if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) + return -1; + hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8)); + + csum_pseudo_hdr = csum_partial(&ipv6h->saddr, + sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); + + skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); + skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); + return 0; +} +#endif +static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, + int hwtstamp_rx_filter) +{ + __wsum hw_checksum = 0; + + void *hdr = (u8 *)va + sizeof(struct ethhdr); + + hw_checksum = csum_unfold((__force __sum16)cqe->checksum); + + if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) && + hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) { + /* next protocol non IPv4 or IPv6 */ + if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto + != htons(ETH_P_IP) && + ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto + != htons(ETH_P_IPV6)) + return -1; + hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); + hdr += sizeof(struct vlan_hdr); + } + + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) + get_fixed_ipv4_csum(hw_checksum, skb, hdr); +#if IS_ENABLED(CONFIG_IPV6) + else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) + if (get_fixed_ipv6_csum(hw_checksum, skb, hdr)) + return -1; +#endif + return 0; +} + int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -744,73 +828,95 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); if (likely(dev->features & NETIF_F_RXCSUM)) { - if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && - (cqe->checksum == cpu_to_be16(0xffff))) { - ring->csum_ok++; - /* This packet is eligible for GRO if it is: - * - DIX Ethernet (type interpretation) - * - TCP/IP (v4) - * - without IP options - * - not an IP fragment - * - no LLS polling in progress - */ - if (!mlx4_en_cq_busy_polling(cq) && - (dev->features & NETIF_F_GRO)) { - struct sk_buff *gro_skb = napi_get_frags(&cq->napi); - if (!gro_skb) - goto next; - - nr = mlx4_en_complete_rx_desc(priv, - rx_desc, frags, gro_skb, - length); - if (!nr) - goto next; - - skb_shinfo(gro_skb)->nr_frags = nr; - gro_skb->len = length; - gro_skb->data_len = length; - gro_skb->ip_summed = CHECKSUM_UNNECESSARY; + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | + MLX4_CQE_STATUS_UDP)) { + if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && + cqe->checksum == cpu_to_be16(0xffff)) { + ip_summed = CHECKSUM_UNNECESSARY; + ring->csum_ok++; + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } + } else { + if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | + MLX4_CQE_STATUS_IPV6))) { + ip_summed = CHECKSUM_COMPLETE; + ring->csum_complete++; + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } + } + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } - if (l2_tunnel) - gro_skb->csum_level = 1; - if ((cqe->vlan_my_qpn & - cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && - (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { - u16 vid = be16_to_cpu(cqe->sl_vid); + /* This packet is eligible for GRO if it is: + * - DIX Ethernet (type interpretation) + * - TCP/IP (v4) + * - without IP options + * - not an IP fragment + * - no LLS polling in progress + */ + if (!mlx4_en_cq_busy_polling(cq) && + (dev->features & NETIF_F_GRO)) { + struct sk_buff *gro_skb = napi_get_frags(&cq->napi); + if (!gro_skb) + goto next; + + nr = mlx4_en_complete_rx_desc(priv, + rx_desc, frags, gro_skb, + length); + if (!nr) + goto next; + + if (ip_summed == CHECKSUM_COMPLETE) { + void *va = skb_frag_address(skb_shinfo(gro_skb)->frags); + if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + ring->csum_complete--; + } + } - __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); - } + skb_shinfo(gro_skb)->nr_frags = nr; + gro_skb->len = length; + gro_skb->data_len = length; + gro_skb->ip_summed = ip_summed; - if (dev->features & NETIF_F_RXHASH) - skb_set_hash(gro_skb, - be32_to_cpu(cqe->immed_rss_invalid), - PKT_HASH_TYPE_L3); + if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) + gro_skb->encapsulation = 1; + if ((cqe->vlan_my_qpn & + cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && + (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + u16 vid = be16_to_cpu(cqe->sl_vid); - skb_record_rx_queue(gro_skb, cq->ring); - skb_mark_napi_id(gro_skb, &cq->napi); + __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); + } - if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { - timestamp = mlx4_en_get_cqe_ts(cqe); - mlx4_en_fill_hwtstamps(mdev, - skb_hwtstamps(gro_skb), - timestamp); - } + if (dev->features & NETIF_F_RXHASH) + skb_set_hash(gro_skb, + be32_to_cpu(cqe->immed_rss_invalid), + PKT_HASH_TYPE_L3); - napi_gro_frags(&cq->napi); - goto next; - } + skb_record_rx_queue(gro_skb, cq->ring); + skb_mark_napi_id(gro_skb, &cq->napi); - /* GRO not possible, complete processing here */ - ip_summed = CHECKSUM_UNNECESSARY; - } else { - ip_summed = CHECKSUM_NONE; - ring->csum_none++; + if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { + timestamp = mlx4_en_get_cqe_ts(cqe); + mlx4_en_fill_hwtstamps(mdev, + skb_hwtstamps(gro_skb), + timestamp); } - } else { - ip_summed = CHECKSUM_NONE; - ring->csum_none++; + + napi_gro_frags(&cq->napi); + goto next; } + /* GRO not possible, complete processing here */ skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); if (!skb) { priv->stats.rx_dropped++; @@ -822,6 +928,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud goto next; } + if (ip_summed == CHECKSUM_COMPLETE) { + if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) { + ip_summed = CHECKSUM_NONE; + ring->csum_complete--; + ring->csum_none++; + } + } + skb->ip_summed = ip_summed; skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, cq->ring); @@ -879,8 +993,8 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq) struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); - if (priv->port_up) - napi_schedule(&cq->napi); + if (likely(priv->port_up)) + napi_schedule_irqoff(&cq->napi); else mlx4_en_arm_cq(priv, cq); } @@ -910,20 +1024,18 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) cpu_curr = smp_processor_id(); aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; - if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) { - /* Current cpu is not according to smp_irq_affinity - - * probably affinity changed. need to stop this NAPI - * poll, and restart it on the right CPU - */ - napi_complete(napi); - mlx4_en_arm_cq(priv, cq); - return 0; - } - } else { - /* Done for now */ - napi_complete(napi); - mlx4_en_arm_cq(priv, cq); + if (likely(cpumask_test_cpu(cpu_curr, aff))) + return budget; + + /* Current cpu is not according to smp_irq_affinity - + * probably affinity changed. need to stop this NAPI + * poll, and restart it on the right CPU + */ + done = 0; } + /* Done for now */ + napi_complete_done(napi, done); + mlx4_en_arm_cq(priv, cq); return done; } @@ -946,15 +1058,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) (eff_mtu > buf_size + frag_sizes[i]) ? frag_sizes[i] : eff_mtu - buf_size; priv->frag_info[i].frag_prefix_size = buf_size; - if (!i) { - priv->frag_info[i].frag_align = NET_IP_ALIGN; - priv->frag_info[i].frag_stride = - ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES); - } else { - priv->frag_info[i].frag_align = 0; - priv->frag_info[i].frag_stride = - ALIGN(frag_sizes[i], SMP_CACHE_BYTES); - } + priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i], + SMP_CACHE_BYTES); buf_size += priv->frag_info[i].frag_size; i++; } @@ -967,11 +1072,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) eff_mtu, priv->num_frags); for (i = 0; i < priv->num_frags; i++) { en_err(priv, - " frag:%d - size:%d prefix:%d align:%d stride:%d\n", + " frag:%d - size:%d prefix:%d stride:%d\n", i, priv->frag_info[i].frag_size, priv->frag_info[i].frag_prefix_size, - priv->frag_info[i].frag_align, priv->frag_info[i].frag_stride); } } @@ -1065,9 +1169,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) int i, qpn; int err = 0; int good_qps = 0; - static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC, - 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD, - 0x593D56D9, 0xF3253C06, 0x2ADC1FFC}; en_dbg(DRV, priv, "Configuring rss steering\n"); err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, @@ -1122,9 +1223,19 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) rss_context->flags = rss_mask; rss_context->hash_fn = MLX4_RSS_HASH_TOP; - for (i = 0; i < 10; i++) - rss_context->rss_key[i] = cpu_to_be32(rsskey[i]); - + if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) { + rss_context->hash_fn = MLX4_RSS_HASH_XOR; + } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) { + rss_context->hash_fn = MLX4_RSS_HASH_TOP; + memcpy(rss_context->rss_key, priv->rss_key, + MLX4_EN_RSS_KEY_SIZE); + netdev_rss_key_fill(rss_context->rss_key, + MLX4_EN_RSS_KEY_SIZE); + } else { + en_err(priv, "Unknown RSS hash function requested\n"); + err = -EINVAL; + goto indir_err; + } err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, &rss_map->indir_qp, &rss_map->indir_state); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 49d5afc7cfb8..2d8ee66138e8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -129,11 +129,15 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv) if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) return -ENOMEM; - /* The device supports 1G, 10G and 40G speeds */ - if (priv->port_state.link_speed != 1000 && - priv->port_state.link_speed != 10000 && - priv->port_state.link_speed != 40000) + /* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */ + if (priv->port_state.link_speed != SPEED_100 && + priv->port_state.link_speed != SPEED_1000 && + priv->port_state.link_speed != SPEED_10000 && + priv->port_state.link_speed != SPEED_20000 && + priv->port_state.link_speed != SPEED_40000 && + priv->port_state.link_speed != SPEED_56000) return priv->port_state.link_speed; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 34c137878545..d0cecbdd9ba8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -479,8 +479,8 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq) struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); - if (priv->port_up) - napi_schedule(&cq->napi); + if (likely(priv->port_up)) + napi_schedule_irqoff(&cq->napi); else mlx4_en_arm_cq(priv, cq); } @@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) * whether LSO is used */ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | - MLX4_WQE_CTRL_TCP_UDP_CSUM); + if (!skb->encapsulation) + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | + MLX4_WQE_CTRL_TCP_UDP_CSUM); + else + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); ring->tx_csum++; } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index a49c9d11d8a5..d68b264cee4d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -1026,6 +1026,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev, pr_cont("\n"); } } + synchronize_irq(eq->irq); mlx4_mtt_cleanup(dev, &eq->mtt); for (i = 0; i < npages; ++i) @@ -1122,8 +1123,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) goto err_out_free; } - err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, - dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); + err = mlx4_bitmap_init(&priv->eq_table.bitmap, + roundup_pow_of_two(dev->caps.num_eqs), + dev->caps.num_eqs - 1, + dev->caps.reserved_eqs, + roundup_pow_of_two(dev->caps.num_eqs) - + dev->caps.num_eqs); if (err) goto err_out_free; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 2e88a235e26b..4251f81a0275 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -139,7 +139,12 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [10] = "TCP/IP offloads/flow-steering for VXLAN support", [11] = "MAD DEMUX (Secure-Host) support", [12] = "Large cache line (>64B) CQE stride support", - [13] = "Large cache line (>64B) EQE stride support" + [13] = "Large cache line (>64B) EQE stride support", + [14] = "Ethernet protocol control support", + [15] = "Ethernet Backplane autoneg support", + [16] = "CONFIG DEV support", + [17] = "Asymmetric EQs support", + [18] = "More than 80 VFs support" }; int i; @@ -174,6 +179,61 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) return err; } +int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 in_modifier; + u8 field; + u16 field16; + int err; + +#define QUERY_FUNC_BUS_OFFSET 0x00 +#define QUERY_FUNC_DEVICE_OFFSET 0x01 +#define QUERY_FUNC_FUNCTION_OFFSET 0x01 +#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 +#define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 +#define QUERY_FUNC_MAX_EQ_OFFSET 0x06 +#define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + in_modifier = slave; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, + MLX4_CMD_QUERY_FUNC, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); + func->bus = field & 0xf; + MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); + func->device = field & 0xf1; + MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); + func->function = field & 0x7; + MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); + func->physical_function = field & 0xf; + MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); + func->rsvd_eqs = field16 & 0xffff; + MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); + func->max_eq = field16 & 0xffff; + MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); + func->rsvd_uars = field & 0x0f; + + mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", + func->bus, func->device, func->function, func->physical_function, + func->max_eq, func->rsvd_eqs, func->rsvd_uars); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -184,6 +244,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, u8 field, port; u32 size, proxy_qp, qkey; int err = 0; + struct mlx4_func func; #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 @@ -228,6 +289,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 +#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31) if (vhcr->op_modifier == 1) { struct mlx4_active_ports actv_ports = @@ -306,11 +368,24 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, size = dev->caps.num_cqs; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); - size = dev->caps.num_eqs; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); - - size = dev->caps.reserved_eqs; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || + mlx4_QUERY_FUNC(dev, &func, slave)) { + size = vhcr->in_modifier & + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? + dev->caps.num_eqs : + rounddown_pow_of_two(dev->caps.num_eqs); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + size = dev->caps.reserved_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + } else { + size = vhcr->in_modifier & + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? + func.max_eq : + rounddown_pow_of_two(func.max_eq); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + size = func.rsvd_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + } size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); @@ -332,7 +407,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, return err; } -int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, +int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, struct mlx4_func_cap *func_cap) { struct mlx4_cmd_mailbox *mailbox; @@ -340,14 +415,17 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, u8 field, op_modifier; u32 size, qkey; int err = 0, quotas = 0; + u32 in_modifier; op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ + in_modifier = op_modifier ? gen_or_port : + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier, + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, MLX4_CMD_QUERY_FUNC_CAP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) @@ -519,6 +597,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 +#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b @@ -560,6 +639,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a +#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 @@ -571,8 +651,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 +#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 +#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d #define QUERY_DEV_CAP_VXLAN 0x9e #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 @@ -605,7 +687,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); dev_cap->max_mpts = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); - dev_cap->reserved_eqs = field & 0xf; + dev_cap->reserved_eqs = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); dev_cap->max_eqs = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); @@ -616,6 +698,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->reserved_mrws = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); dev_cap->max_mtt_seg = 1 << (field & 0x3f); + MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); + dev_cap->num_sys_eqs = size & 0xfff; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); dev_cap->max_requester_per_qp = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); @@ -737,15 +821,22 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); dev_cap->max_rq_desc_sz = size; MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + if (field & (1 << 5)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; if (field & (1 << 6)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; if (field & (1 << 7)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; - MLX4_GET(dev_cap->bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); + if (field & 0x20) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; MLX4_GET(dev_cap->reserved_lkey, outbox, QUERY_DEV_CAP_RSVD_LKEY_OFFSET); + MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); + if (field32 & (1 << 0)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); if (field & 1<<6) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; @@ -770,6 +861,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; if (field32 & (1 << 20)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; + if (field32 & (1 << 21)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { for (i = 1; i <= dev_cap->num_ports; ++i) { @@ -836,8 +929,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) * we can't use any EQs whose doorbell falls on that page, * even if the EQ itself isn't reserved. */ - dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, - dev_cap->reserved_eqs); + if (dev_cap->num_sys_eqs == 0) + dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, + dev_cap->reserved_eqs); + else + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; mlx4_dbg(dev, "Max ICM size %lld MB\n", (unsigned long long) dev_cap->max_icm_sz >> 20); @@ -847,8 +943,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); - mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", - dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); + mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, + dev_cap->eqc_entry_sz); mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", dev_cap->reserved_mrws, dev_cap->reserved_mtts); mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", @@ -1394,6 +1491,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) +#define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) #define INIT_HCA_MCAST_OFFSET 0x0c0 @@ -1497,6 +1595,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); + MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); @@ -1607,6 +1706,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); + MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); @@ -1841,14 +1941,18 @@ int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) struct mlx4_config_dev { __be32 update_flags; - __be32 rsdv1[3]; + __be32 rsvd1[3]; __be16 vxlan_udp_dport; __be16 rsvd2; + __be32 rsvd3[27]; + __be16 rsvd4; + u8 rsvd5; + u8 rx_checksum_val; }; #define MLX4_VXLAN_UDP_DPORT (1 << 0) -static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) +static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) { int err; struct mlx4_cmd_mailbox *mailbox; @@ -1866,6 +1970,77 @@ static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_ return err; } +static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + if (!err) + memcpy(config_dev, mailbox->buf, sizeof(*config_dev)); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +/* Conversion between the HW values and the actual functionality. + * The value represented by the array index, + * and the functionality determined by the flags. + */ +static const u8 config_dev_csum_flags[] = { + [0] = 0, + [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP, + [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP | + MLX4_RX_CSUM_MODE_L4, + [3] = MLX4_RX_CSUM_MODE_L4 | + MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP | + MLX4_RX_CSUM_MODE_MULTI_VLAN +}; + +int mlx4_config_dev_retrieval(struct mlx4_dev *dev, + struct mlx4_config_dev_params *params) +{ + struct mlx4_config_dev config_dev; + int err; + u8 csum_mask; + +#define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7 +#define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0 +#define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4 + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV)) + return -ENOTSUPP; + + err = mlx4_CONFIG_DEV_get(dev, &config_dev); + if (err) + return err; + + csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) & + CONFIG_DEV_RX_CSUM_MODE_MASK; + + if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) + return -EINVAL; + params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask]; + + csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) & + CONFIG_DEV_RX_CSUM_MODE_MASK; + + if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) + return -EINVAL; + params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask]; + + params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval); + int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) { struct mlx4_config_dev config_dev; @@ -1874,7 +2049,7 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); config_dev.vxlan_udp_dport = udp_port; - return mlx4_CONFIG_DEV(dev, &config_dev); + return mlx4_CONFIG_DEV_set(dev, &config_dev); } EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); @@ -2144,3 +2319,142 @@ out: mlx4_free_cmd_mailbox(dev, mailbox); return err; } + +/* Access Reg commands */ +enum mlx4_access_reg_masks { + MLX4_ACCESS_REG_STATUS_MASK = 0x7f, + MLX4_ACCESS_REG_METHOD_MASK = 0x7f, + MLX4_ACCESS_REG_LEN_MASK = 0x7ff +}; + +struct mlx4_access_reg { + __be16 constant1; + u8 status; + u8 resrvd1; + __be16 reg_id; + u8 method; + u8 constant2; + __be32 resrvd2[2]; + __be16 len_const; + __be16 resrvd3; +#define MLX4_ACCESS_REG_HEADER_SIZE (20) + u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; +} __attribute__((__packed__)); + +/** + * mlx4_ACCESS_REG - Generic access reg command. + * @dev: mlx4_dev. + * @reg_id: register ID to access. + * @method: Access method Read/Write. + * @reg_len: register length to Read/Write in bytes. + * @reg_data: reg_data pointer to Read/Write From/To. + * + * Access ConnectX registers FW command. + * Returns 0 on success and copies outbox mlx4_access_reg data + * field into reg_data or a negative error code. + */ +static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, + enum mlx4_access_reg_method method, + u16 reg_len, void *reg_data) +{ + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_access_reg *inbuf, *outbuf; + int err; + + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); + + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + mlx4_free_cmd_mailbox(dev, inbox); + return PTR_ERR(outbox); + } + + inbuf = inbox->buf; + outbuf = outbox->buf; + + inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); + inbuf->constant2 = 0x1; + inbuf->reg_id = cpu_to_be16(reg_id); + inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; + + reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); + inbuf->len_const = + cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | + ((0x3) << 12)); + + memcpy(inbuf->reg_data, reg_data, reg_len); + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, + MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { + err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; + mlx4_err(dev, + "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", + reg_id, err); + goto out; + } + + memcpy(reg_data, outbuf->reg_data, reg_len); +out: + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return err; +} + +/* ConnectX registers IDs */ +enum mlx4_reg_id { + MLX4_REG_ID_PTYS = 0x5004, +}; + +/** + * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) + * register + * @dev: mlx4_dev. + * @method: Access method Read/Write. + * @ptys_reg: PTYS register data pointer. + * + * Access ConnectX PTYS register, to Read/Write Port Type/Speed + * configuration + * Returns 0 on success or a negative error code. + */ +int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, + enum mlx4_access_reg_method method, + struct mlx4_ptys_reg *ptys_reg) +{ + return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, + method, sizeof(*ptys_reg), ptys_reg); +} +EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); + +int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_access_reg *inbuf = inbox->buf; + u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK; + u16 reg_id = be16_to_cpu(inbuf->reg_id); + + if (slave != mlx4_master_func_num(dev) && + method == MLX4_ACCESS_REG_WRITE) + return -EPERM; + + if (reg_id == MLX4_REG_ID_PTYS) { + struct mlx4_ptys_reg *ptys_reg = + (struct mlx4_ptys_reg *)inbuf->reg_data; + + ptys_reg->local_port = + mlx4_slave_convert_port(dev, slave, + ptys_reg->local_port); + } + + return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, + 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 9b835aecac96..475215ee370f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -56,6 +56,7 @@ struct mlx4_dev_cap { int max_mpts; int reserved_eqs; int max_eqs; + int num_sys_eqs; int reserved_mtts; int max_mrw_sz; int reserved_mrws; @@ -145,6 +146,16 @@ struct mlx4_func_cap { u64 phys_port_id; }; +struct mlx4_func { + int bus; + int device; + int function; + int physical_function; + int rsvd_eqs; + int max_eq; + int rsvd_uars; +}; + struct mlx4_adapter { char board_id[MLX4_BOARD_ID_LEN]; u8 inta_pin; @@ -170,6 +181,7 @@ struct mlx4_init_hca_param { u8 log_num_srqs; u8 log_num_cqs; u8 log_num_eqs; + u16 num_sys_eqs; u8 log_rd_per_qp; u8 log_mc_table_sz; u8 log_mpt_sz; @@ -204,13 +216,14 @@ struct mlx4_set_ib_param { }; int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); -int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, +int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, struct mlx4_func_cap *func_cap); int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave); int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); int mlx4_UNMAP_FA(struct mlx4_dev *dev); int mlx4_RUN_FW(struct mlx4_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 90de6e1ad06e..3044f9e623cb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -197,6 +197,29 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev) dev->caps.port_mask[i] = dev->caps.port_type[i]; } +enum { + MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, +}; + +static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + int err = 0; + struct mlx4_func func; + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_QUERY_FUNC(dev, &func, 0); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + return err; + } + dev_cap->max_eqs = func.max_eq; + dev_cap->reserved_eqs = func.rsvd_eqs; + dev_cap->reserved_uars = func.rsvd_uars; + err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; + } + return err; +} + static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) { struct mlx4_caps *dev_cap = &dev->caps; @@ -261,7 +284,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) } dev->caps.num_ports = dev_cap->num_ports; - dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; + dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; + dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? + dev->caps.num_sys_eqs : + MLX4_MAX_EQ_NUM; for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.vl_cap[i] = dev_cap->max_vl[i]; dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; @@ -631,7 +657,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) struct mlx4_dev_cap dev_cap; struct mlx4_func_cap func_cap; struct mlx4_init_hca_param hca_param; - int i; + u8 i; memset(&hca_param, 0, sizeof(hca_param)); err = mlx4_QUERY_HCA(dev, &hca_param); @@ -732,7 +758,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) } for (i = 1; i <= dev->caps.num_ports; ++i) { - err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); + err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); if (err) { mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", i, err); @@ -901,9 +927,12 @@ static ssize_t set_port_type(struct device *dev, struct mlx4_priv *priv = mlx4_priv(mdev); enum mlx4_port_type types[MLX4_MAX_PORTS]; enum mlx4_port_type new_types[MLX4_MAX_PORTS]; + static DEFINE_MUTEX(set_port_type_mutex); int i; int err = 0; + mutex_lock(&set_port_type_mutex); + if (!strcmp(buf, "ib\n")) info->tmp_type = MLX4_PORT_TYPE_IB; else if (!strcmp(buf, "eth\n")) @@ -912,7 +941,8 @@ static ssize_t set_port_type(struct device *dev, info->tmp_type = MLX4_PORT_TYPE_AUTO; else { mlx4_err(mdev, "%s is not supported port type\n", buf); - return -EINVAL; + err = -EINVAL; + goto err_out; } mlx4_stop_sense(mdev); @@ -958,6 +988,9 @@ static ssize_t set_port_type(struct device *dev, out: mlx4_start_sense(mdev); mutex_unlock(&priv->port_mutex); +err_out: + mutex_unlock(&set_port_type_mutex); + return err ? err : count; } @@ -1123,8 +1156,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, if (err) goto err_srq; - num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : - dev->caps.num_eqs; + num_eqs = dev->phys_caps.num_phys_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_EQ * @@ -1186,8 +1218,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, } - num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : - dev->caps.num_eqs; + num_eqs = dev->phys_caps.num_phys_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.table, init_hca->eqc_base, dev_cap->eqc_entry_sz, num_eqs, num_eqs, 0, 0); @@ -1466,6 +1497,12 @@ static void mlx4_close_hca(struct mlx4_dev *dev) else { mlx4_CLOSE_HCA(dev, 0); mlx4_free_icms(dev); + } +} + +static void mlx4_close_fw(struct mlx4_dev *dev) +{ + if (!mlx4_is_slave(dev)) { mlx4_UNMAP_FA(dev); mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); } @@ -1612,16 +1649,10 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev, == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); } -static int mlx4_init_hca(struct mlx4_dev *dev) +static int mlx4_init_fw(struct mlx4_dev *dev) { - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_adapter adapter; - struct mlx4_dev_cap dev_cap; struct mlx4_mod_stat_cfg mlx4_cfg; - struct mlx4_profile profile; - struct mlx4_init_hca_param init_hca; - u64 icm_size; - int err; + int err = 0; if (!mlx4_is_slave(dev)) { err = mlx4_QUERY_FW(dev); @@ -1644,7 +1675,23 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); if (err) mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); + } + + return err; +} + +static int mlx4_init_hca(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_adapter adapter; + struct mlx4_dev_cap dev_cap; + struct mlx4_profile profile; + struct mlx4_init_hca_param init_hca; + u64 icm_size; + struct mlx4_config_dev_params params; + int err; + if (!mlx4_is_slave(dev)) { err = mlx4_dev_cap(dev, &dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); @@ -1696,6 +1743,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev) mlx4_err(dev, "INIT_HCA command failed, aborting\n"); goto err_free_icm; } + + if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_query_func(dev, &dev_cap); + if (err < 0) { + mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); + goto err_stop_fw; + } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { + dev->caps.num_eqs = dev_cap.max_eqs; + dev->caps.reserved_eqs = dev_cap.reserved_eqs; + dev->caps.reserved_uars = dev_cap.reserved_uars; + } + } + /* * If TS is supported by FW * read HCA frequency by QUERY_HCA command @@ -1755,6 +1815,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev) goto unmap_bf; } + /* Query CONFIG_DEV parameters */ + err = mlx4_config_dev_retrieval(dev, ¶ms); + if (err && err != -ENOTSUPP) { + mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); + } else if (!err) { + dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; + dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; + } priv->eq_table.inta_pin = adapter.inta_pin; memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); @@ -2054,12 +2122,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; - int nreq = min_t(int, dev->caps.num_ports * - min_t(int, num_online_cpus() + 1, - MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX); int i; if (msi_x) { + int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); @@ -2259,6 +2326,71 @@ static void mlx4_free_ownership(struct mlx4_dev *dev) iounmap(owner); } +#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ + !!((flags) & MLX4_FLAG_MASTER)) + +static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, + u8 total_vfs, int existing_vfs) +{ + u64 dev_flags = dev->flags; + + dev->dev_vfs = kzalloc( + total_vfs * sizeof(*dev->dev_vfs), + GFP_KERNEL); + if (NULL == dev->dev_vfs) { + mlx4_err(dev, "Failed to allocate memory for VFs\n"); + goto disable_sriov; + } else if (!(dev->flags & MLX4_FLAG_SRIOV)) { + int err = 0; + + atomic_inc(&pf_loading); + if (existing_vfs) { + if (existing_vfs != total_vfs) + mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", + existing_vfs, total_vfs); + } else { + mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); + err = pci_enable_sriov(pdev, total_vfs); + } + if (err) { + mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", + err); + atomic_dec(&pf_loading); + goto disable_sriov; + } else { + mlx4_warn(dev, "Running in master mode\n"); + dev_flags |= MLX4_FLAG_SRIOV | + MLX4_FLAG_MASTER; + dev_flags &= ~MLX4_FLAG_SLAVE; + dev->num_vfs = total_vfs; + } + } + return dev_flags; + +disable_sriov: + dev->num_vfs = 0; + kfree(dev->dev_vfs); + return dev_flags & ~MLX4_FLAG_MASTER; +} + +enum { + MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, +}; + +static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, + int *nvfs) +{ + int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; + /* Checking for 64 VFs as a limitation of CX2 */ + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && + requested_vfs >= 64) { + mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", + requested_vfs); + return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; + } + return 0; +} + static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, int total_vfs, int *nvfs, struct mlx4_priv *priv) { @@ -2267,6 +2399,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, int err; int port; int i; + struct mlx4_dev_cap *dev_cap = NULL; int existing_vfs = 0; dev = &priv->dev; @@ -2303,40 +2436,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, } } - if (total_vfs) { - mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", - total_vfs); - dev->dev_vfs = kzalloc( - total_vfs * sizeof(*dev->dev_vfs), - GFP_KERNEL); - if (NULL == dev->dev_vfs) { - mlx4_err(dev, "Failed to allocate memory for VFs\n"); - err = -ENOMEM; - goto err_free_own; - } else { - atomic_inc(&pf_loading); - existing_vfs = pci_num_vf(pdev); - if (existing_vfs) { - err = 0; - if (existing_vfs != total_vfs) - mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", - existing_vfs, total_vfs); - } else { - err = pci_enable_sriov(pdev, total_vfs); - } - if (err) { - mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", - err); - atomic_dec(&pf_loading); - } else { - mlx4_warn(dev, "Running in master mode\n"); - dev->flags |= MLX4_FLAG_SRIOV | - MLX4_FLAG_MASTER; - dev->num_vfs = total_vfs; - } - } - } - atomic_set(&priv->opreq_count, 0); INIT_WORK(&priv->opreq_task, mlx4_opreq_action); @@ -2350,6 +2449,12 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, mlx4_err(dev, "Failed to reset HCA, aborting\n"); goto err_sriov; } + + if (total_vfs) { + existing_vfs = pci_num_vf(pdev); + dev->flags = MLX4_FLAG_MASTER; + dev->num_vfs = total_vfs; + } } slave_start: @@ -2363,9 +2468,10 @@ slave_start: * before posting commands. Also, init num_slaves before calling * mlx4_init_hca */ if (mlx4_is_mfunc(dev)) { - if (mlx4_is_master(dev)) + if (mlx4_is_master(dev)) { dev->num_slaves = MLX4_MAX_NUM_SLAVES; - else { + + } else { dev->num_slaves = 0; err = mlx4_multi_func_init(dev); if (err) { @@ -2375,17 +2481,109 @@ slave_start: } } + err = mlx4_init_fw(dev); + if (err) { + mlx4_err(dev, "Failed to init fw, aborting.\n"); + goto err_mfunc; + } + + if (mlx4_is_master(dev)) { + if (!dev_cap) { + dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); + + if (!dev_cap) { + err = -ENOMEM; + goto err_fw; + } + + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_fw; + } + + if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) + goto err_fw; + + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { + u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, + existing_vfs); + + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + dev->flags = dev_flags; + if (!SRIOV_VALID_STATE(dev->flags)) { + mlx4_err(dev, "Invalid SRIOV state\n"); + goto err_sriov; + } + err = mlx4_reset(dev); + if (err) { + mlx4_err(dev, "Failed to reset HCA, aborting.\n"); + goto err_sriov; + } + goto slave_start; + } + } else { + /* Legacy mode FW requires SRIOV to be enabled before + * doing QUERY_DEV_CAP, since max_eq's value is different if + * SRIOV is enabled. + */ + memset(dev_cap, 0, sizeof(*dev_cap)); + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_fw; + } + + if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) + goto err_fw; + } + } + err = mlx4_init_hca(dev); if (err) { if (err == -EACCES) { /* Not primary Physical function * Running in slave mode */ - mlx4_cmd_cleanup(dev); + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + /* We're not a PF */ + if (dev->flags & MLX4_FLAG_SRIOV) { + if (!existing_vfs) + pci_disable_sriov(pdev); + if (mlx4_is_master(dev)) + atomic_dec(&pf_loading); + dev->flags &= ~MLX4_FLAG_SRIOV; + } + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); dev->flags |= MLX4_FLAG_SLAVE; dev->flags &= ~MLX4_FLAG_MASTER; goto slave_start; } else - goto err_mfunc; + goto err_fw; + } + + if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { + u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs); + + if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); + dev->flags = dev_flags; + err = mlx4_cmd_init(dev); + if (err) { + /* Only VHCR is cleaned up, so could still + * send FW commands + */ + mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); + goto err_close; + } + } else { + dev->flags = dev_flags; + } + + if (!SRIOV_VALID_STATE(dev->flags)) { + mlx4_err(dev, "Invalid SRIOV state\n"); + goto err_close; + } } /* check if the device is functioning at its maximum possible speed. @@ -2540,12 +2738,15 @@ err_master_mfunc: err_close: mlx4_close_hca(dev); +err_fw: + mlx4_close_fw(dev); + err_mfunc: if (mlx4_is_slave(dev)) mlx4_multi_func_cleanup(dev); err_cmd: - mlx4_cmd_cleanup(dev); + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); err_sriov: if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) @@ -2556,10 +2757,10 @@ err_sriov: kfree(priv->dev.dev_vfs); -err_free_own: if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); + kfree(dev_cap); return err; } @@ -2787,15 +2988,17 @@ static void mlx4_unload_one(struct pci_dev *pdev) if (mlx4_is_master(dev)) mlx4_multi_func_cleanup(dev); mlx4_close_hca(dev); + mlx4_close_fw(dev); if (mlx4_is_slave(dev)) mlx4_multi_func_cleanup(dev); - mlx4_cmd_cleanup(dev); + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { mlx4_warn(dev, "Disabling SR-IOV\n"); pci_disable_sriov(pdev); + dev->flags &= ~MLX4_FLAG_SRIOV; dev->num_vfs = 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index ca0f98c95105..872843179f44 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, cur->ib.dst_gid_msk); break; + case MLX4_NET_TRANS_RULE_ID_VXLAN: + len += snprintf(buf + len, BUF_SIZE - len, + "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); + break; case MLX4_NET_TRANS_RULE_ID_IPV6: break; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index de10dbb2e6ed..f48e7c3eecf8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -606,6 +606,7 @@ struct mlx4_cmd { u8 use_events; u8 toggle; u8 comm_toggle; + u8 initialized; }; enum { @@ -947,6 +948,11 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1121,8 +1127,16 @@ int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave, int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); +enum { + MLX4_CMD_CLEANUP_STRUCT = 1UL << 0, + MLX4_CMD_CLEANUP_POOL = 1UL << 1, + MLX4_CMD_CLEANUP_HCR = 1UL << 2, + MLX4_CMD_CLEANUP_VHCR = 1UL << 3, + MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1 +}; + int mlx4_cmd_init(struct mlx4_dev *dev); -void mlx4_cmd_cleanup(struct mlx4_dev *dev); +void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); int mlx4_multi_func_init(struct mlx4_dev *dev); void mlx4_multi_func_cleanup(struct mlx4_dev *dev); void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); @@ -1273,6 +1287,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 8fef65840b3b..ac48a8d91501 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -326,6 +326,7 @@ struct mlx4_en_rx_ring { #endif unsigned long csum_ok; unsigned long csum_none; + unsigned long csum_complete; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; }; @@ -375,7 +376,6 @@ struct mlx4_en_port_profile { }; struct mlx4_en_profile { - int rss_xor; int udp_rss; u8 rss_mask; u32 active_ports; @@ -421,10 +421,16 @@ struct mlx4_en_rss_map { enum mlx4_qp_state indir_state; }; +enum mlx4_en_port_flag { + MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */ + MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */ +}; + struct mlx4_en_port_state { int link_state; int link_speed; - int transciver; + int transceiver; + u32 flags; }; struct mlx4_en_pkt_stats { @@ -443,6 +449,7 @@ struct mlx4_en_port_stats { unsigned long rx_alloc_failed; unsigned long rx_chksum_good; unsigned long rx_chksum_none; + unsigned long rx_chksum_complete; unsigned long tx_chksum_offload; #define NUM_PORT_STATS 9 }; @@ -475,7 +482,6 @@ struct mlx4_en_frag_info { u16 frag_size; u16 frag_prefix_size; u16 frag_stride; - u16 frag_align; }; #ifdef CONFIG_MLX4_EN_DCB @@ -502,7 +508,8 @@ enum { MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2), /* whether we need to drop packets that hardware loopback-ed */ MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), - MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4) + MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4), + MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), }; #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE) @@ -610,6 +617,8 @@ struct mlx4_en_priv { __be16 vxlan_port; u32 pflags; + u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; + u8 rss_hash_fn; }; enum mlx4_en_wol { @@ -829,6 +838,13 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv); void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); +#define DEV_FEATURE_CHANGED(dev, new_features, feature) \ + ((dev->features & feature) ^ (new_features & feature)) + +int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t new_features); + /* * Functions for time stamping */ @@ -838,9 +854,6 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, u64 timestamp); void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev); void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev); -int mlx4_en_timestamp_config(struct net_device *dev, - int tx_type, - int rx_filter); /* Globals */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 193a6adb5d04..d6f549685c0f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -130,10 +130,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) err_out_free: for (i = 0; i <= buddy->max_order; ++i) - if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i])) - vfree(buddy->bits[i]); - else - kfree(buddy->bits[i]); + kvfree(buddy->bits[i]); err_out: kfree(buddy->bits); @@ -147,10 +144,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) int i; for (i = 0; i <= buddy->max_order; ++i) - if (is_vmalloc_addr(buddy->bits[i])) - vfree(buddy->bits[i]); - else - kfree(buddy->bits[i]); + kvfree(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 94eeb2c7d7e4..30eb1ead0fe6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1311,3 +1311,159 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, return 0; } EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); + +/* Cable Module Info */ +#define MODULE_INFO_MAX_READ 48 + +#define I2C_ADDR_LOW 0x50 +#define I2C_ADDR_HIGH 0x51 +#define I2C_PAGE_SIZE 256 + +/* Module Info Data */ +struct mlx4_cable_info { + u8 i2c_addr; + u8 page_num; + __be16 dev_mem_address; + __be16 reserved1; + __be16 size; + __be32 reserved2[2]; + u8 data[MODULE_INFO_MAX_READ]; +}; + +enum cable_info_err { + CABLE_INF_INV_PORT = 0x1, + CABLE_INF_OP_NOSUP = 0x2, + CABLE_INF_NOT_CONN = 0x3, + CABLE_INF_NO_EEPRM = 0x4, + CABLE_INF_PAGE_ERR = 0x5, + CABLE_INF_INV_ADDR = 0x6, + CABLE_INF_I2C_ADDR = 0x7, + CABLE_INF_QSFP_VIO = 0x8, + CABLE_INF_I2C_BUSY = 0x9, +}; + +#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF) + +static inline const char *cable_info_mad_err_str(u16 mad_status) +{ + u8 err = MAD_STATUS_2_CABLE_ERR(mad_status); + + switch (err) { + case CABLE_INF_INV_PORT: + return "invalid port selected"; + case CABLE_INF_OP_NOSUP: + return "operation not supported for this port (the port is of type CX4 or internal)"; + case CABLE_INF_NOT_CONN: + return "cable is not connected"; + case CABLE_INF_NO_EEPRM: + return "the connected cable has no EPROM (passive copper cable)"; + case CABLE_INF_PAGE_ERR: + return "page number is greater than 15"; + case CABLE_INF_INV_ADDR: + return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)"; + case CABLE_INF_I2C_ADDR: + return "invalid I2C slave address"; + case CABLE_INF_QSFP_VIO: + return "at least one cable violates the QSFP specification and ignores the modsel signal"; + case CABLE_INF_I2C_BUSY: + return "I2C bus is constantly busy"; + } + return "Unknown Error"; +} + +/** + * mlx4_get_module_info - Read cable module eeprom data + * @dev: mlx4_dev. + * @port: port number. + * @offset: byte offset in eeprom to start reading data from. + * @size: num of bytes to read. + * @data: output buffer to put the requested data into. + * + * Reads cable module eeprom data, puts the outcome data into + * data pointer paramer. + * Returns num of read bytes on success or a negative error + * code. + */ +int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + u16 offset, u16 size, u8 *data) +{ + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_mad_ifc *inmad, *outmad; + struct mlx4_cable_info *cable_info; + u16 i2c_addr; + int ret; + + if (size > MODULE_INFO_MAX_READ) + size = MODULE_INFO_MAX_READ; + + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); + + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + mlx4_free_cmd_mailbox(dev, inbox); + return PTR_ERR(outbox); + } + + inmad = (struct mlx4_mad_ifc *)(inbox->buf); + outmad = (struct mlx4_mad_ifc *)(outbox->buf); + + inmad->method = 0x1; /* Get */ + inmad->class_version = 0x1; + inmad->mgmt_class = 0x1; + inmad->base_version = 0x1; + inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */ + + if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE) + /* Cross pages reads are not allowed + * read until offset 256 in low page + */ + size -= offset + size - I2C_PAGE_SIZE; + + i2c_addr = I2C_ADDR_LOW; + if (offset >= I2C_PAGE_SIZE) { + /* Reset offset to high page */ + i2c_addr = I2C_ADDR_HIGH; + offset -= I2C_PAGE_SIZE; + } + + cable_info = (struct mlx4_cable_info *)inmad->data; + cable_info->dev_mem_address = cpu_to_be16(offset); + cable_info->page_num = 0; + cable_info->i2c_addr = i2c_addr; + cable_info->size = cpu_to_be16(size); + + ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (ret) + goto out; + + if (be16_to_cpu(outmad->status)) { + /* Mad returned with bad status */ + ret = be16_to_cpu(outmad->status); + mlx4_warn(dev, + "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n", + 0xFF60, port, i2c_addr, offset, size, + ret, cable_info_mad_err_str(ret)); + + if (i2c_addr == I2C_ADDR_HIGH && + MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR) + /* Some SFP cables do not support i2c slave + * address 0x51 (high page), abort silently. + */ + ret = 0; + else + ret = -ret; + goto out; + } + cable_info = (struct mlx4_cable_info *)outmad->data; + memcpy(data, cable_info->data, size); + ret = size; +out: + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return ret; +} +EXPORT_SYMBOL(mlx4_get_module_info); diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 14089d9e1667..2bf437aafc53 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -126,8 +126,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_AUXC].num = request->num_qp; profile[MLX4_RES_SRQ].num = request->num_srq; profile[MLX4_RES_CQ].num = request->num_cq; - profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? - dev->phys_caps.num_phys_eqs : + profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs : min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); profile[MLX4_RES_DMPT].num = request->num_mpt; profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; @@ -216,10 +215,18 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, init_hca->log_num_cqs = profile[i].log_num; break; case MLX4_RES_EQ: - dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs, - MAX_MSIX)); - init_hca->eqc_base = profile[i].start; - init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + init_hca->log_num_eqs = 0x1f; + init_hca->eqc_base = profile[i].start; + init_hca->num_sys_eqs = dev_cap->num_sys_eqs; + } else { + dev->caps.num_eqs = roundup_pow_of_two( + min_t(unsigned, + dev_cap->max_eqs, + MAX_MSIX)); + init_hca->eqc_base = profile[i].start; + init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); + } break; case MLX4_RES_DMPT: dev->caps.num_mpts = profile[i].num; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 5d2498dcf536..16f617b5749e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, switch (op) { case RES_OP_RESERVE: - count = get_param_l(&in_param); + count = get_param_l(&in_param) & 0xffffff; align = get_param_h(&in_param); err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); if (err) @@ -2872,6 +2872,23 @@ out_add: return err; } +int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + u8 get = vhcr->op_modifier; + + if (get != 1) + return -EPERM; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + + return err; +} + static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, int len, struct res_mtt **res) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 368c6c5ea014..a2853057c779 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1363,7 +1363,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) goto err_map; } - if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) { + if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { dev_err(&dev->pdev->dev, "command queue size overflow\n"); err = -EINVAL; goto err_map; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ed53291468f3..ab684463780b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -225,8 +225,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; - mlx5_core_dbg(dev, "event %s(%d) arrived\n", - eqe_type_str(eqe->type), eqe->type); + mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", + eqe_type_str(eqe->type), eqe->type, rsn); mlx5_rsc_event(dev, rsn, eqe->type); break; @@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", name, pci_name(dev->pdev)); eq->eqn = out.eq_number; + eq->irqn = vecidx; + eq->dev = dev; + eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, eq->name, eq); if (err) goto err_eq; - eq->irqn = vecidx; - eq->dev = dev; - eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; - err = mlx5_debug_eq_add(dev, eq); if (err) goto err_irq; @@ -391,7 +390,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, */ eq_update_ci(eq, 1); - mlx5_vfree(in); + kvfree(in); return 0; err_irq: @@ -401,7 +400,7 @@ err_eq: mlx5_cmd_destroy_eq(dev, eq->eqn); err_in: - mlx5_vfree(in); + kvfree(in); err_buf: mlx5_buf_free(dev, &eq->buf); @@ -420,6 +419,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) if (err) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); + synchronize_irq(table->msix_arr[eq->irqn].vector); mlx5_buf_free(dev, &eq->buf); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3d8e8e489b2d..3f4525619a07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -43,6 +43,7 @@ #include <linux/mlx5/qp.h> #include <linux/mlx5/srq.h> #include <linux/debugfs.h> +#include <linux/kmod.h> #include <linux/mlx5/mlx5_ifc.h> #include "mlx5_core.h" @@ -225,7 +226,7 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev) table->msix_arr[i].entry = i; nvec = pci_enable_msix_range(dev->pdev, table->msix_arr, - MLX5_EQ_VEC_COMP_BASE, nvec); + MLX5_EQ_VEC_COMP_BASE + 1, nvec); if (nvec < 0) return nvec; @@ -840,6 +841,8 @@ struct mlx5_core_event_handler { void *data); }; +#define MLX5_IB_MOD "mlx5_ib" + static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -864,20 +867,24 @@ static int init_one(struct pci_dev *pdev, dev->profile = &profile[prof_sel]; dev->event = mlx5_core_event; + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); err = mlx5_dev_init(dev, pdev); if (err) { dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); goto out; } - INIT_LIST_HEAD(&priv->ctx_list); - spin_lock_init(&priv->ctx_lock); err = mlx5_register_device(dev); if (err) { dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); goto out_init; } + err = request_module_nowait(MLX5_IB_MOD); + if (err) + pr_info("failed request module on %s\n", MLX5_IB_MOD); + return 0; out_init: @@ -896,8 +903,12 @@ static void remove_one(struct pci_dev *pdev) } static const struct pci_device_id mlx5_core_pci_table[] = { - { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ + { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ + { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ + { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ + { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ + { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ { 0, } }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index d476918ef269..4fdaae9b54d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -349,7 +349,7 @@ out_4k: for (i--; i >= 0; i--) free_4k(dev, be64_to_cpu(in->pas[i])); out_free: - mlx5_vfree(in); + kvfree(in); return err; } @@ -400,7 +400,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, } out_free: - mlx5_vfree(out); + kvfree(out); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 313965853e10..72c2d002c3b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -68,9 +68,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, memcpy(data_out, out->data, size_out); ex2: - mlx5_vfree(out); + kvfree(out); ex1: - mlx5_vfree(in); + kvfree(in); return err; } EXPORT_SYMBOL_GPL(mlx5_core_access_reg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 0a6348cefc01..06801d6f595e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -96,6 +96,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) int err; memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR); in.uarn = cpu_to_be32(uarn); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 9e7e3f1dce3e..af099057f0e9 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -2913,16 +2913,11 @@ again: flags |= MXGEFW_FLAGS_SMALL; /* pad frames to at least ETH_ZLEN bytes */ - if (unlikely(skb->len < ETH_ZLEN)) { - if (skb_padto(skb, ETH_ZLEN)) { - /* The packet is gone, so we must - * return 0 */ - ss->stats.tx_dropped += 1; - return NETDEV_TX_OK; - } - /* adjust the len to account for the zero pad - * so that the nic can know how long it is */ - skb->len = ETH_ZLEN; + if (eth_skb_pad(skb)) { + /* The packet is gone, so we must + * return 0 */ + ss->stats.tx_dropped += 1; + return NETDEV_TX_OK; } } diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index 9e4ddbba7036..66c2d50d5b8d 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -326,13 +326,9 @@ static int mac_onboard_sonic_probe(struct net_device *dev) macintosh_config->ident == MAC_MODEL_P588 || macintosh_config->ident == MAC_MODEL_P575 || macintosh_config->ident == MAC_MODEL_C610) { - unsigned long flags; int card_present; - local_irq_save(flags); card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS); - local_irq_restore(flags); - if (!card_present) { printk("none.\n"); return -ENODEV; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 30d934d66356..44e8d7d25547 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1837,10 +1837,8 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; out: - if (mac->iob_pdev) - pci_dev_put(mac->iob_pdev); - if (mac->dma_pdev) - pci_dev_put(mac->dma_pdev); + pci_dev_put(mac->iob_pdev); + pci_dev_put(mac->dma_pdev); free_netdev(dev); out_disable_device: diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 0b2a1ccd276d..613037584d08 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work) if (test_bit(__NX_RESETTING, &adapter->state)) goto reschedule; - if (test_bit(__NX_DEV_UP, &adapter->state)) { + if (test_bit(__NX_DEV_UP, &adapter->state) && + !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) { if (!adapter->has_link_events) { netxen_nic_handle_phy_intr(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index f5e29f7bdae3..1aa25b13ace1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -376,13 +376,14 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) } static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *netdev, const unsigned char *addr) + struct net_device *netdev, + const unsigned char *addr, u16 vid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = -EOPNOTSUPP; if (!adapter->fdb_mac_learn) - return ndo_dflt_fdb_del(ndm, tb, netdev, addr); + return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid); if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || qlcnic_sriov_check(adapter)) { @@ -401,13 +402,13 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *netdev, - const unsigned char *addr, u16 flags) + const unsigned char *addr, u16 vid, u16 flags) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = 0; if (!adapter->fdb_mac_learn) - return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags); + return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags); if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) && !qlcnic_sriov_check(adapter)) { @@ -460,7 +461,7 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter) } static int qlcnic_get_phys_port_id(struct net_device *netdev, - struct netdev_phys_port_id *ppid) + struct netdev_phys_item_id *ppid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -503,6 +504,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev, adapter->flags |= QLCNIC_DEL_VXLAN_PORT; } + +static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops qlcnic_netdev_ops = { @@ -526,6 +532,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { #ifdef CONFIG_QLCNIC_VXLAN .ndo_add_vxlan_port = qlcnic_add_vxlan_port, .ndo_del_vxlan_port = qlcnic_del_vxlan_port, + .ndo_gso_check = qlcnic_gso_check, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = qlcnic_poll_controller, diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig index f3a47147937d..9a49f42ac2ba 100644 --- a/drivers/net/ethernet/qualcomm/Kconfig +++ b/drivers/net/ethernet/qualcomm/Kconfig @@ -5,7 +5,6 @@ config NET_VENDOR_QUALCOMM bool "Qualcomm devices" default y - depends on SPI_MASTER && OF_GPIO ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM config QCA7000 tristate "Qualcomm Atheros QCA7000 support" - depends on SPI_MASTER && OF_GPIO + depends on SPI_MASTER && OF ---help--- This SPI protocol driver supports the Qualcomm Atheros QCA7000. diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 007b38cce69a..63dc0f95d050 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -112,6 +112,7 @@ #include <linux/io.h> #include <linux/uaccess.h> #include <linux/gfp.h> +#include <linux/if_vlan.h> #include <asm/irq.h> #define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION @@ -182,10 +183,13 @@ static int debug = -1; /* Number of Tx descriptor registers. */ #define NUM_TX_DESC 4 -/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/ -#define MAX_ETH_FRAME_SIZE 1536 +/* max supported ethernet frame size -- must be at least (dev->mtu+18+4).*/ +#define MAX_ETH_FRAME_SIZE 1792 -/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */ +/* max supported payload size */ +#define MAX_ETH_DATA_SIZE (MAX_ETH_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN) + +/* Size of the Tx bounce buffers -- must be at least (dev->mtu+18+4). */ #define TX_BUF_SIZE MAX_ETH_FRAME_SIZE #define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) @@ -920,11 +924,19 @@ static int rtl8139_set_features(struct net_device *dev, netdev_features_t featur return 0; } +static int rtl8139_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 68 || new_mtu > MAX_ETH_DATA_SIZE) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + static const struct net_device_ops rtl8139_netdev_ops = { .ndo_open = rtl8139_open, .ndo_stop = rtl8139_close, .ndo_get_stats64 = rtl8139_get_stats64, - .ndo_change_mtu = eth_change_mtu, + .ndo_change_mtu = rtl8139_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = rtl8139_set_mac_address, .ndo_start_xmit = rtl8139_start_xmit, diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h index 040b13739947..32497f0e537c 100644 --- a/drivers/net/ethernet/realtek/atp.h +++ b/drivers/net/ethernet/realtek/atp.h @@ -6,10 +6,10 @@ /* The header prepended to received packets. */ struct rx_header { - ushort pad; /* Pad. */ - ushort rx_count; - ushort rx_status; /* Unknown bit assignments :-<. */ - ushort cur_addr; /* Apparently the current buffer address(?) */ + ushort pad; /* Pad. */ + ushort rx_count; + ushort rx_status; /* Unknown bit assignments :-<. */ + ushort cur_addr; /* Apparently the current buffer address(?) */ }; #define PAR_DATA 0 @@ -29,22 +29,25 @@ struct rx_header { #define RdAddr 0xC0 #define HNib 0x10 -enum page0_regs -{ - /* The first six registers hold the ethernet physical station address. */ - PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5, - TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */ - TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */ - ISR = 10, IMR = 11, /* Interrupt status and mask. */ - CMR1 = 12, /* Command register 1. */ - CMR2 = 13, /* Command register 2. */ - MODSEL = 14, /* Mode select register. */ - MAR = 14, /* Memory address register (?). */ - CMR2_h = 0x1d, }; - -enum eepage_regs -{ PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */ +enum page0_regs { + /* The first six registers hold + * the ethernet physical station address. + */ + PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5, + TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */ + TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */ + ISR = 10, IMR = 11, /* Interrupt status and mask. */ + CMR1 = 12, /* Command register 1. */ + CMR2 = 13, /* Command register 2. */ + MODSEL = 14, /* Mode select register. */ + MAR = 14, /* Memory address register (?). */ + CMR2_h = 0x1d, +}; +enum eepage_regs { + PROM_CMD = 6, + PROM_DATA = 7 /* Note that PROM_CMD is in the "high" bits. */ +}; #define ISR_TxOK 0x01 #define ISR_RxOK 0x04 @@ -72,141 +75,146 @@ enum eepage_regs #define CMR2h_Normal 2 /* Accept physical and broadcast address. */ #define CMR2h_PROMISC 3 /* Promiscuous mode. */ -/* An inline function used below: it differs from inb() by explicitly return an unsigned - char, saving a truncation. */ +/* An inline function used below: it differs from inb() by explicitly + * return an unsigned char, saving a truncation. + */ static inline unsigned char inbyte(unsigned short port) { - unsigned char _v; - __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port)); - return _v; + unsigned char _v; + + __asm__ __volatile__ ("inb %w1,%b0" : "=a" (_v) : "d" (port)); + return _v; } /* Read register OFFSET. - This command should always be terminated with read_end(). */ + * This command should always be terminated with read_end(). + */ static inline unsigned char read_nibble(short port, unsigned char offset) { - unsigned char retval; - outb(EOC+offset, port + PAR_DATA); - outb(RdAddr+offset, port + PAR_DATA); - inbyte(port + PAR_STATUS); /* Settling time delay */ - retval = inbyte(port + PAR_STATUS); - outb(EOC+offset, port + PAR_DATA); - - return retval; + unsigned char retval; + + outb(EOC+offset, port + PAR_DATA); + outb(RdAddr+offset, port + PAR_DATA); + inbyte(port + PAR_STATUS); /* Settling time delay */ + retval = inbyte(port + PAR_STATUS); + outb(EOC+offset, port + PAR_DATA); + + return retval; } /* Functions for bulk data read. The interrupt line is always disabled. */ /* Get a byte using read mode 0, reading data from the control lines. */ static inline unsigned char read_byte_mode0(short ioaddr) { - unsigned char low_nib; - - outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); - inbyte(ioaddr + PAR_STATUS); - low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; - outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); - inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ - inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ - return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); + unsigned char low_nib; + + outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ + inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); } /* The same as read_byte_mode0(), but does multiple inb()s for stability. */ static inline unsigned char read_byte_mode2(short ioaddr) { - unsigned char low_nib; - - outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); - inbyte(ioaddr + PAR_STATUS); - low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; - outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); - inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ - return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); + unsigned char low_nib; + + outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); } /* Read a byte through the data register. */ static inline unsigned char read_byte_mode4(short ioaddr) { - unsigned char low_nib; + unsigned char low_nib; - outb(RdAddr | MAR, ioaddr + PAR_DATA); - low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; - outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); - return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); + outb(RdAddr | MAR, ioaddr + PAR_DATA); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); } /* Read a byte through the data register, double reading to allow settling. */ static inline unsigned char read_byte_mode6(short ioaddr) { - unsigned char low_nib; - - outb(RdAddr | MAR, ioaddr + PAR_DATA); - inbyte(ioaddr + PAR_STATUS); - low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; - outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); - inbyte(ioaddr + PAR_STATUS); - return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); + unsigned char low_nib; + + outb(RdAddr | MAR, ioaddr + PAR_DATA); + inbyte(ioaddr + PAR_STATUS); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); + inbyte(ioaddr + PAR_STATUS); + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); } static inline void write_reg(short port, unsigned char reg, unsigned char value) { - unsigned char outval; - outb(EOC | reg, port + PAR_DATA); - outval = WrAddr | reg; - outb(outval, port + PAR_DATA); - outb(outval, port + PAR_DATA); /* Double write for PS/2. */ - - outval &= 0xf0; - outval |= value; - outb(outval, port + PAR_DATA); - outval &= 0x1f; - outb(outval, port + PAR_DATA); - outb(outval, port + PAR_DATA); - - outb(EOC | outval, port + PAR_DATA); + unsigned char outval; + + outb(EOC | reg, port + PAR_DATA); + outval = WrAddr | reg; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); /* Double write for PS/2. */ + + outval &= 0xf0; + outval |= value; + outb(outval, port + PAR_DATA); + outval &= 0x1f; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); + + outb(EOC | outval, port + PAR_DATA); } static inline void write_reg_high(short port, unsigned char reg, unsigned char value) { - unsigned char outval = EOC | HNib | reg; + unsigned char outval = EOC | HNib | reg; - outb(outval, port + PAR_DATA); - outval &= WrAddr | HNib | 0x0f; - outb(outval, port + PAR_DATA); - outb(outval, port + PAR_DATA); /* Double write for PS/2. */ + outb(outval, port + PAR_DATA); + outval &= WrAddr | HNib | 0x0f; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); /* Double write for PS/2. */ - outval = WrAddr | HNib | value; - outb(outval, port + PAR_DATA); - outval &= HNib | 0x0f; /* HNib | value */ - outb(outval, port + PAR_DATA); - outb(outval, port + PAR_DATA); + outval = WrAddr | HNib | value; + outb(outval, port + PAR_DATA); + outval &= HNib | 0x0f; /* HNib | value */ + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); - outb(EOC | HNib | outval, port + PAR_DATA); + outb(EOC | HNib | outval, port + PAR_DATA); } /* Write a byte out using nibble mode. The low nibble is written first. */ static inline void write_reg_byte(short port, unsigned char reg, unsigned char value) { - unsigned char outval; - outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */ - outval = WrAddr | reg; - outb(outval, port + PAR_DATA); - outb(outval, port + PAR_DATA); /* Double write for PS/2. */ - - outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA); - outb(value & 0x0f, port + PAR_DATA); - value >>= 4; - outb(value, port + PAR_DATA); - outb(0x10 | value, port + PAR_DATA); - outb(0x10 | value, port + PAR_DATA); - - outb(EOC | value, port + PAR_DATA); /* Reset the address register. */ + unsigned char outval; + + outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */ + outval = WrAddr | reg; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); /* Double write for PS/2. */ + + outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA); + outb(value & 0x0f, port + PAR_DATA); + value >>= 4; + outb(value, port + PAR_DATA); + outb(0x10 | value, port + PAR_DATA); + outb(0x10 | value, port + PAR_DATA); + + outb(EOC | value, port + PAR_DATA); /* Reset the address register. */ } -/* - * Bulk data writes to the packet buffer. The interrupt line remains enabled. +/* Bulk data writes to the packet buffer. The interrupt line remains enabled. * The first, faster method uses only the dataport (data modes 0, 2 & 4). * The second (backup) method uses data and control regs (modes 1, 3 & 5). * It should only be needed when there is skew between the individual data @@ -214,28 +222,28 @@ write_reg_byte(short port, unsigned char reg, unsigned char value) */ static inline void write_byte_mode0(short ioaddr, unsigned char value) { - outb(value & 0x0f, ioaddr + PAR_DATA); - outb((value>>4) | 0x10, ioaddr + PAR_DATA); + outb(value & 0x0f, ioaddr + PAR_DATA); + outb((value>>4) | 0x10, ioaddr + PAR_DATA); } static inline void write_byte_mode1(short ioaddr, unsigned char value) { - outb(value & 0x0f, ioaddr + PAR_DATA); - outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL); - outb((value>>4) | 0x10, ioaddr + PAR_DATA); - outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL); + outb(value & 0x0f, ioaddr + PAR_DATA); + outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL); + outb((value>>4) | 0x10, ioaddr + PAR_DATA); + outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL); } /* Write 16bit VALUE to the packet buffer: the same as above just doubled. */ static inline void write_word_mode0(short ioaddr, unsigned short value) { - outb(value & 0x0f, ioaddr + PAR_DATA); - value >>= 4; - outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); - value >>= 4; - outb(value & 0x0f, ioaddr + PAR_DATA); - value >>= 4; - outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); + outb(value & 0x0f, ioaddr + PAR_DATA); + value >>= 4; + outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); + value >>= 4; + outb(value & 0x0f, ioaddr + PAR_DATA); + value >>= 4; + outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); } /* EEPROM_Ctrl bits. */ @@ -248,10 +256,10 @@ static inline void write_word_mode0(short ioaddr, unsigned short value) /* Delay between EEPROM clock transitions. */ #define eeprom_delay(ticks) \ -do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0) +do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0) /* The EEPROM commands include the alway-set leading bit. */ #define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17) -#define EE_READ(offset) (((6 << 6) + (offset)) << 17) +#define EE_READ(offset) (((6 << 6) + (offset)) << 17) #define EE_ERASE(offset) (((7 << 6) + (offset)) << 17) #define EE_CMD_SIZE 27 /* The command+address+data size. */ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index cf154f74cba1..b9c2f33b463d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1377,6 +1377,16 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond) return RTL_R8(IBISR0) & 0x02; } +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); + rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); + RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); + RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); +} + static void rtl8168dp_driver_start(struct rtl8169_private *tp) { rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); @@ -1417,12 +1427,7 @@ static void rtl8168dp_driver_stop(struct rtl8169_private *tp) static void rtl8168ep_driver_stop(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); - rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); - RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); - RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); + rtl8168ep_stop_cmac(tp); ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10); @@ -5934,7 +5939,6 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); RTL_W8(MaxTxPacketSize, EarlySize); @@ -6027,7 +6031,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); RTL_W8(MaxTxPacketSize, EarlySize); @@ -6091,6 +6094,8 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; + rtl8168ep_stop_cmac(tp); + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); @@ -6109,7 +6114,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); RTL_W8(MaxTxPacketSize, EarlySize); @@ -6832,14 +6836,6 @@ err_out: return -EIO; } -static bool rtl_skb_pad(struct sk_buff *skb) -{ - if (skb_padto(skb, ETH_ZLEN)) - return false; - skb_put(skb, ETH_ZLEN - skb->len); - return true; -} - static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) { return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; @@ -6980,7 +6976,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, u8 ip_protocol; if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb); + return !(skb_checksum_help(skb) || eth_skb_pad(skb)); if (transport_offset > TCPHO_MAX) { netif_warn(tp, tx_err, tp->dev, @@ -7015,7 +7011,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, opts[1] |= transport_offset << TCPHO_SHIFT; } else { if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - return rtl_skb_pad(skb); + return !eth_skb_pad(skb); } return true; @@ -8005,6 +8001,12 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp) return; } +static void rtl_hw_init_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + rtl_hw_init_8168g(tp); +} + static void rtl_hw_initialize(struct rtl8169_private *tp) { switch (tp->mac_version) { @@ -8017,12 +8019,13 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - rtl_hw_init_8168g(tp); + rtl_hw_init_8168ep(tp); break; - default: break; } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 60e9c2cd051e..ad2e285aefd4 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1,5 +1,6 @@ /* SuperH Ethernet device driver * + * Copyright (C) 2014 Renesas Electronics Corporation * Copyright (C) 2006-2012 Nobuhiro Iwamatsu * Copyright (C) 2008-2014 Renesas Solutions Corp. * Copyright (C) 2013-2014 Cogent Embedded, Inc. @@ -1141,7 +1142,7 @@ static void sh_eth_ring_format(struct net_device *ndev) /* RX descriptor */ rxdesc = &mdp->rx_ring[i]; - rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); + rxdesc->addr = virt_to_phys(skb->data); rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); /* The size of the buffer is 16 byte boundary. */ @@ -1394,10 +1395,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) int entry = mdp->cur_rx % mdp->num_rx_ring; int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; + int limit; struct sk_buff *skb; u16 pkt_len = 0; u32 desc_status; + boguscnt = min(boguscnt, *quota); + limit = boguscnt; rxdesc = &mdp->rx_ring[entry]; while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { desc_status = edmac_to_cpu(mdp, rxdesc->status); @@ -1406,11 +1410,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (--boguscnt < 0) break; - if (*quota <= 0) - break; - - (*quota)--; - if (!(desc_status & RDFEND)) ndev->stats.rx_length_errors++; @@ -1477,7 +1476,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) sh_eth_set_receive_align(skb); skb_checksum_none_assert(skb); - rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); + rxdesc->addr = virt_to_phys(skb->data); } if (entry >= mdp->num_rx_ring - 1) rxdesc->status |= @@ -1501,6 +1500,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) sh_eth_write(ndev, EDRRR_R, EDRRR); } + *quota -= limit - boguscnt - 1; + return *quota <= 0; } @@ -2746,6 +2747,7 @@ static const struct of_device_id sh_eth_match_table[] = { { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data }, { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data }, { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data }, + { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data }, { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data }, { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, { } @@ -2769,10 +2771,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (unlikely(res == NULL)) { - dev_err(&pdev->dev, "invalid resource\n"); - return -EINVAL; - } ndev = alloc_etherdev(sizeof(struct sh_eth_private)); if (!ndev) @@ -2781,8 +2779,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); - /* The sh Ether-specific entries in the device structure. */ - ndev->base_addr = res->start; devno = pdev->id; if (devno < 0) devno = 0; @@ -2806,6 +2802,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) goto out_release; } + ndev->base_addr = res->start; + spin_lock_init(&mdp->lock); mdp->pdev = pdev; @@ -2887,6 +2885,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev) } } + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + /* MDIO bus init */ ret = sh_mdio_init(mdp, pd); if (ret) { @@ -2973,6 +2974,7 @@ static struct platform_device_id sh_eth_id_table[] = { { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data }, { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data }, + { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data }, { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data }, { } }; diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig new file mode 100644 index 000000000000..b9952ef040e4 --- /dev/null +++ b/drivers/net/ethernet/rocker/Kconfig @@ -0,0 +1,27 @@ +# +# Rocker device configuration +# + +config NET_VENDOR_ROCKER + bool "Rocker devices" + default y + ---help--- + If you have a network device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Rocker devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_ROCKER + +config ROCKER + tristate "Rocker switch driver (EXPERIMENTAL)" + depends on PCI && NET_SWITCHDEV && BRIDGE + ---help--- + This driver supports Rocker switch device. + + To compile this driver as a module, choose M here: the + module will be called rocker. + +endif # NET_VENDOR_ROCKER diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile new file mode 100644 index 000000000000..f85fb12f36f1 --- /dev/null +++ b/drivers/net/ethernet/rocker/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Rocker network device drivers. +# + +obj-$(CONFIG_ROCKER) += rocker.o diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c new file mode 100644 index 000000000000..55364359b868 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker.c @@ -0,0 +1,4391 @@ +/* + * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver + * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/spinlock.h> +#include <linux/hashtable.h> +#include <linux/crc32.h> +#include <linux/sort.h> +#include <linux/random.h> +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/skbuff.h> +#include <linux/socket.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/if_bridge.h> +#include <linux/bitops.h> +#include <net/switchdev.h> +#include <net/rtnetlink.h> +#include <asm-generic/io-64-nonatomic-lo-hi.h> +#include <generated/utsrelease.h> + +#include "rocker.h" + +static const char rocker_driver_name[] = "rocker"; + +static const struct pci_device_id rocker_pci_id_table[] = { + {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, + {0, } +}; + +struct rocker_flow_tbl_key { + u32 priority; + enum rocker_of_dpa_table_id tbl_id; + union { + struct { + u32 in_lport; + u32 in_lport_mask; + enum rocker_of_dpa_table_id goto_tbl; + } ig_port; + struct { + u32 in_lport; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool untagged; + __be16 new_vlan_id; + } vlan; + struct { + u32 in_lport; + u32 in_lport_mask; + __be16 eth_type; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool copy_to_cpu; + } term_mac; + struct { + __be16 eth_type; + __be32 dst4; + __be32 dst4_mask; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + } ucast_routing; + struct { + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + int has_eth_dst; + int has_eth_dst_mask; + __be16 vlan_id; + u32 tunnel_id; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + bool copy_to_cpu; + } bridge; + struct { + u32 in_lport; + u32 in_lport_mask; + u8 eth_src[ETH_ALEN]; + u8 eth_src_mask[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 eth_type; + __be16 vlan_id; + __be16 vlan_id_mask; + u8 ip_proto; + u8 ip_proto_mask; + u8 ip_tos; + u8 ip_tos_mask; + u32 group_id; + } acl; + }; +}; + +struct rocker_flow_tbl_entry { + struct hlist_node entry; + u32 ref_count; + u64 cookie; + struct rocker_flow_tbl_key key; + u32 key_crc32; /* key */ +}; + +struct rocker_group_tbl_entry { + struct hlist_node entry; + u32 cmd; + u32 group_id; /* key */ + u16 group_count; + u32 *group_ids; + union { + struct { + u8 pop_vlan; + } l2_interface; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + u32 group_id; + } l2_rewrite; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + bool ttl_check; + u32 group_id; + } l3_unicast; + }; +}; + +struct rocker_fdb_tbl_entry { + struct hlist_node entry; + u32 key_crc32; /* key */ + bool learned; + struct rocker_fdb_tbl_key { + u32 lport; + u8 addr[ETH_ALEN]; + __be16 vlan_id; + } key; +}; + +struct rocker_internal_vlan_tbl_entry { + struct hlist_node entry; + int ifindex; /* key */ + u32 ref_count; + __be16 vlan_id; +}; + +struct rocker_desc_info { + char *data; /* mapped */ + size_t data_size; + size_t tlv_size; + struct rocker_desc *desc; + DEFINE_DMA_UNMAP_ADDR(mapaddr); +}; + +struct rocker_dma_ring_info { + size_t size; + u32 head; + u32 tail; + struct rocker_desc *desc; /* mapped */ + dma_addr_t mapaddr; + struct rocker_desc_info *desc_info; + unsigned int type; +}; + +struct rocker; + +enum { + ROCKER_CTRL_LINK_LOCAL_MCAST, + ROCKER_CTRL_LOCAL_ARP, + ROCKER_CTRL_IPV4_MCAST, + ROCKER_CTRL_IPV6_MCAST, + ROCKER_CTRL_DFLT_BRIDGING, + ROCKER_CTRL_MAX, +}; + +#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00 +#define ROCKER_N_INTERNAL_VLANS 255 +#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) +#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS) + +struct rocker_port { + struct net_device *dev; + struct net_device *bridge_dev; + struct rocker *rocker; + unsigned int port_number; + u32 lport; + __be16 internal_vlan_id; + int stp_state; + u32 brport_flags; + bool ctrls[ROCKER_CTRL_MAX]; + unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN]; + struct napi_struct napi_tx; + struct napi_struct napi_rx; + struct rocker_dma_ring_info tx_ring; + struct rocker_dma_ring_info rx_ring; +}; + +struct rocker { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + struct msix_entry *msix_entries; + unsigned int port_count; + struct rocker_port **ports; + struct { + u64 id; + } hw; + spinlock_t cmd_ring_lock; + struct rocker_dma_ring_info cmd_ring; + struct rocker_dma_ring_info event_ring; + DECLARE_HASHTABLE(flow_tbl, 16); + spinlock_t flow_tbl_lock; + u64 flow_tbl_next_cookie; + DECLARE_HASHTABLE(group_tbl, 16); + spinlock_t group_tbl_lock; + DECLARE_HASHTABLE(fdb_tbl, 16); + spinlock_t fdb_tbl_lock; + unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; + DECLARE_HASHTABLE(internal_vlan_tbl, 8); + spinlock_t internal_vlan_tbl_lock; +}; + +static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; +static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; +static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; +static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; + +/* Rocker priority levels for flow table entries. Higher + * priority match takes precedence over lower priority match. + */ + +enum { + ROCKER_PRIORITY_UNKNOWN = 0, + ROCKER_PRIORITY_IG_PORT = 1, + ROCKER_PRIORITY_VLAN = 1, + ROCKER_PRIORITY_TERM_MAC_UCAST = 0, + ROCKER_PRIORITY_TERM_MAC_MCAST = 1, + ROCKER_PRIORITY_UNICAST_ROUTING = 1, + ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, + ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, + ROCKER_PRIORITY_BRIDGING_VLAN = 3, + ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, + ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, + ROCKER_PRIORITY_BRIDGING_TENANT = 3, + ROCKER_PRIORITY_ACL_CTRL = 3, + ROCKER_PRIORITY_ACL_NORMAL = 2, + ROCKER_PRIORITY_ACL_DFLT = 1, +}; + +static bool rocker_vlan_id_is_internal(__be16 vlan_id) +{ + u16 start = ROCKER_INTERNAL_VLAN_ID_BASE; + u16 end = 0xffe; + u16 _vlan_id = ntohs(vlan_id); + + return (_vlan_id >= start && _vlan_id <= end); +} + +static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port, + u16 vid, bool *pop_vlan) +{ + __be16 vlan_id; + + if (pop_vlan) + *pop_vlan = false; + vlan_id = htons(vid); + if (!vlan_id) { + vlan_id = rocker_port->internal_vlan_id; + if (pop_vlan) + *pop_vlan = true; + } + + return vlan_id; +} + +static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port, + __be16 vlan_id) +{ + if (rocker_vlan_id_is_internal(vlan_id)) + return 0; + + return ntohs(vlan_id); +} + +static bool rocker_port_is_bridged(struct rocker_port *rocker_port) +{ + return !!rocker_port->bridge_dev; +} + +struct rocker_wait { + wait_queue_head_t wait; + bool done; + bool nowait; +}; + +static void rocker_wait_reset(struct rocker_wait *wait) +{ + wait->done = false; + wait->nowait = false; +} + +static void rocker_wait_init(struct rocker_wait *wait) +{ + init_waitqueue_head(&wait->wait); + rocker_wait_reset(wait); +} + +static struct rocker_wait *rocker_wait_create(gfp_t gfp) +{ + struct rocker_wait *wait; + + wait = kmalloc(sizeof(*wait), gfp); + if (!wait) + return NULL; + rocker_wait_init(wait); + return wait; +} + +static void rocker_wait_destroy(struct rocker_wait *work) +{ + kfree(work); +} + +static bool rocker_wait_event_timeout(struct rocker_wait *wait, + unsigned long timeout) +{ + wait_event_timeout(wait->wait, wait->done, HZ / 10); + if (!wait->done) + return false; + return true; +} + +static void rocker_wait_wake_up(struct rocker_wait *wait) +{ + wait->done = true; + wake_up(&wait->wait); +} + +static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector) +{ + return rocker->msix_entries[vector].vector; +} + +static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_TX(rocker_port->port_number)); +} + +static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_RX(rocker_port->port_number)); +} + +#define rocker_write32(rocker, reg, val) \ + writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read32(rocker, reg) \ + readl((rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_write64(rocker, reg, val) \ + writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read64(rocker, reg) \ + readq((rocker)->hw_addr + (ROCKER_ ## reg)) + +/***************************** + * HW basic testing functions + *****************************/ + +static int rocker_reg_test(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + u64 test_reg; + u64 rnd; + + rnd = prandom_u32(); + rnd >>= 1; + rocker_write32(rocker, TEST_REG, rnd); + test_reg = rocker_read32(rocker, TEST_REG); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", + test_reg, rnd * 2); + return -EIO; + } + + rnd = prandom_u32(); + rnd <<= 31; + rnd |= prandom_u32(); + rocker_write64(rocker, TEST_REG64, rnd); + test_reg = rocker_read64(rocker, TEST_REG64); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", + test_reg, rnd * 2); + return -EIO; + } + + return 0; +} + +static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait, + u32 test_type, dma_addr_t dma_handle, + unsigned char *buf, unsigned char *expect, + size_t size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + rocker_wait_reset(wait); + rocker_write32(rocker, TEST_DMA_CTRL, test_type); + + if (!rocker_wait_event_timeout(wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + return -EIO; + } + + for (i = 0; i < size; i++) { + if (buf[i] != expect[i]) { + dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", + buf[i], i, expect[i]); + return -EIO; + } + } + return 0; +} + +#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) +#define ROCKER_TEST_DMA_FILL_PATTERN 0x96 + +static int rocker_dma_test_offset(struct rocker *rocker, + struct rocker_wait *wait, int offset) +{ + struct pci_dev *pdev = rocker->pdev; + unsigned char *alloc; + unsigned char *buf; + unsigned char *expect; + dma_addr_t dma_handle; + int i; + int err; + + alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, + GFP_KERNEL | GFP_DMA); + if (!alloc) + return -ENOMEM; + buf = alloc + offset; + expect = buf + ROCKER_TEST_DMA_BUF_SIZE; + + dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(pdev, dma_handle)) { + err = -EIO; + goto free_alloc; + } + + rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); + rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); + + memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); + for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) + expect[i] = ~buf[i]; + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + +unmap: + pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); +free_alloc: + kfree(alloc); + + return err; +} + +static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait) +{ + int i; + int err; + + for (i = 0; i < 8; i++) { + err = rocker_dma_test_offset(rocker, wait, i); + if (err) + return err; + } + return 0; +} + +static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) +{ + struct rocker_wait *wait = dev_id; + + rocker_wait_wake_up(wait); + + return IRQ_HANDLED; +} + +static int rocker_basic_hw_test(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + struct rocker_wait wait; + int err; + + err = rocker_reg_test(rocker); + if (err) { + dev_err(&pdev->dev, "reg test failed\n"); + return err; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), + rocker_test_irq_handler, 0, + rocker_driver_name, &wait); + if (err) { + dev_err(&pdev->dev, "cannot assign test irq\n"); + return err; + } + + rocker_wait_init(&wait); + rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); + + if (!rocker_wait_event_timeout(&wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + err = -EIO; + goto free_irq; + } + + err = rocker_dma_test(rocker, &wait); + if (err) + dev_err(&pdev->dev, "dma test failed\n"); + +free_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); + return err; +} + +/****** + * TLV + ******/ + +#define ROCKER_TLV_ALIGNTO 8U +#define ROCKER_TLV_ALIGN(len) \ + (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) +#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) + +/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * | Header | Pad | Payload | Pad | + * | (struct rocker_tlv) | ing | | ing | + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * <--------------------------- tlv->len --------------------------> + */ + +static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, + int *remaining) +{ + int totlen = ROCKER_TLV_ALIGN(tlv->len); + + *remaining -= totlen; + return (struct rocker_tlv *) ((char *) tlv + totlen); +} + +static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) +{ + return remaining >= (int) ROCKER_TLV_HDRLEN && + tlv->len >= ROCKER_TLV_HDRLEN && + tlv->len <= remaining; +} + +#define rocker_tlv_for_each(pos, head, len, rem) \ + for (pos = head, rem = len; \ + rocker_tlv_ok(pos, rem); \ + pos = rocker_tlv_next(pos, &(rem))) + +#define rocker_tlv_for_each_nested(pos, tlv, rem) \ + rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ + rocker_tlv_len(tlv), rem) + +static int rocker_tlv_attr_size(int payload) +{ + return ROCKER_TLV_HDRLEN + payload; +} + +static int rocker_tlv_total_size(int payload) +{ + return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); +} + +static int rocker_tlv_padlen(int payload) +{ + return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); +} + +static int rocker_tlv_type(const struct rocker_tlv *tlv) +{ + return tlv->type; +} + +static void *rocker_tlv_data(const struct rocker_tlv *tlv) +{ + return (char *) tlv + ROCKER_TLV_HDRLEN; +} + +static int rocker_tlv_len(const struct rocker_tlv *tlv) +{ + return tlv->len - ROCKER_TLV_HDRLEN; +} + +static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) +{ + return *(u8 *) rocker_tlv_data(tlv); +} + +static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) +{ + return *(u16 *) rocker_tlv_data(tlv); +} + +static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) +{ + return *(__be16 *) rocker_tlv_data(tlv); +} + +static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) +{ + return *(u32 *) rocker_tlv_data(tlv); +} + +static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) +{ + return *(u64 *) rocker_tlv_data(tlv); +} + +static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len) +{ + const struct rocker_tlv *tlv; + const struct rocker_tlv *head = (const struct rocker_tlv *) buf; + int rem; + + memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); + + rocker_tlv_for_each(tlv, head, buf_len, rem) { + u32 type = rocker_tlv_type(tlv); + + if (type > 0 && type <= maxtype) + tb[type] = (struct rocker_tlv *) tlv; + } +} + +static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype, + const struct rocker_tlv *tlv) +{ + rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), + rocker_tlv_len(tlv)); +} + +static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype, + struct rocker_desc_info *desc_info) +{ + rocker_tlv_parse(tb, maxtype, desc_info->data, + desc_info->desc->tlv_size); +} + +static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info) +{ + return (struct rocker_tlv *) ((char *) desc_info->data + + desc_info->tlv_size); +} + +static int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data) +{ + int tail_room = desc_info->data_size - desc_info->tlv_size; + int total_size = rocker_tlv_total_size(attrlen); + struct rocker_tlv *tlv; + + if (unlikely(tail_room < total_size)) + return -EMSGSIZE; + + tlv = rocker_tlv_start(desc_info); + desc_info->tlv_size += total_size; + tlv->type = attrtype; + tlv->len = rocker_tlv_attr_size(attrlen); + memcpy(rocker_tlv_data(tlv), data, attrlen); + memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); + return 0; +} + +static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, + int attrtype, u8 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); +} + +static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, + int attrtype, u16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); +} + +static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, + int attrtype, __be16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); +} + +static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, + int attrtype, u32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); +} + +static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, + int attrtype, __be32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); +} + +static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, + int attrtype, u64 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); +} + +static struct rocker_tlv * +rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) +{ + struct rocker_tlv *start = rocker_tlv_start(desc_info); + + if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) + return NULL; + + return start; +} + +static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; +} + +static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + desc_info->tlv_size = (char *) start - desc_info->data; +} + +/****************************************** + * DMA rings and descriptors manipulations + ******************************************/ + +static u32 __pos_inc(u32 pos, size_t limit) +{ + return ++pos == limit ? 0 : pos; +} + +static int rocker_desc_err(struct rocker_desc_info *desc_info) +{ + return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN); +} + +static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info) +{ + desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; +} + +static bool rocker_desc_gen(struct rocker_desc_info *desc_info) +{ + u32 comp_err = desc_info->desc->comp_err; + + return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; +} + +static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info) +{ + return (void *) desc_info->desc->cookie; +} + +static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info, + void *ptr) +{ + desc_info->desc->cookie = (long) ptr; +} + +static struct rocker_desc_info * +rocker_desc_head_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + u32 head = __pos_inc(info->head, info->size); + + desc_info = &info->desc_info[info->head]; + if (head == info->tail) + return NULL; /* ring full */ + desc_info->tlv_size = 0; + return desc_info; +} + +static void rocker_desc_commit(struct rocker_desc_info *desc_info) +{ + desc_info->desc->buf_size = desc_info->data_size; + desc_info->desc->tlv_size = desc_info->tlv_size; +} + +static void rocker_desc_head_set(struct rocker *rocker, + struct rocker_dma_ring_info *info, + struct rocker_desc_info *desc_info) +{ + u32 head = __pos_inc(info->head, info->size); + + BUG_ON(head == info->tail); + rocker_desc_commit(desc_info); + info->head = head; + rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); +} + +static struct rocker_desc_info * +rocker_desc_tail_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + + if (info->tail == info->head) + return NULL; /* nothing to be done between head and tail */ + desc_info = &info->desc_info[info->tail]; + if (!rocker_desc_gen(desc_info)) + return NULL; /* gen bit not set, desc is not ready yet */ + info->tail = __pos_inc(info->tail, info->size); + desc_info->tlv_size = desc_info->desc->tlv_size; + return desc_info; +} + +static void rocker_dma_ring_credits_set(struct rocker *rocker, + struct rocker_dma_ring_info *info, + u32 credits) +{ + if (credits) + rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); +} + +static unsigned long rocker_dma_ring_size_fix(size_t size) +{ + return max(ROCKER_DMA_SIZE_MIN, + min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); +} + +static int rocker_dma_ring_create(struct rocker *rocker, + unsigned int type, + size_t size, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(size != rocker_dma_ring_size_fix(size)); + info->size = size; + info->type = type; + info->head = 0; + info->tail = 0; + info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), + GFP_KERNEL); + if (!info->desc_info) + return -ENOMEM; + + info->desc = pci_alloc_consistent(rocker->pdev, + info->size * sizeof(*info->desc), + &info->mapaddr); + if (!info->desc) { + kfree(info->desc_info); + return -ENOMEM; + } + + for (i = 0; i < info->size; i++) + info->desc_info[i].desc = &info->desc[i]; + + rocker_write32(rocker, DMA_DESC_CTRL(info->type), + ROCKER_DMA_DESC_CTRL_RESET); + rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); + rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); + + return 0; +} + +static void rocker_dma_ring_destroy(struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); + + pci_free_consistent(rocker->pdev, + info->size * sizeof(struct rocker_desc), + info->desc, info->mapaddr); + kfree(info->desc_info); +} + +static void rocker_dma_ring_pass_to_producer(struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(info->head || info->tail); + + /* When ring is consumer, we need to advance head for each desc. + * That tells hw that the desc is ready to be used by it. + */ + for (i = 0; i < info->size - 1; i++) + rocker_desc_head_set(rocker, info, &info->desc_info[i]); + rocker_desc_commit(&info->desc_info[i]); +} + +static int rocker_dma_ring_bufs_alloc(struct rocker *rocker, + struct rocker_dma_ring_info *info, + int direction, size_t buf_size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + int err; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + dma_addr_t dma_handle; + char *buf; + + buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); + if (!buf) { + err = -ENOMEM; + goto rollback; + } + + dma_handle = pci_map_single(pdev, buf, buf_size, direction); + if (pci_dma_mapping_error(pdev, dma_handle)) { + kfree(buf); + err = -EIO; + goto rollback; + } + + desc_info->data = buf; + desc_info->data_size = buf_size; + dma_unmap_addr_set(desc_info, mapaddr, dma_handle); + + desc->buf_addr = dma_handle; + desc->buf_size = buf_size; + } + return 0; + +rollback: + for (i--; i >= 0; i--) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } + return err; +} + +static void rocker_dma_ring_bufs_free(struct rocker *rocker, + struct rocker_dma_ring_info *info, + int direction) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + + desc->buf_addr = 0; + desc->buf_size = 0; + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } +} + +static int rocker_dma_rings_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, + ROCKER_DMA_CMD_DEFAULT_SIZE, + &rocker->cmd_ring); + if (err) { + dev_err(&pdev->dev, "failed to create command dma ring\n"); + return err; + } + + spin_lock_init(&rocker->cmd_ring_lock); + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); + goto err_dma_cmd_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, + ROCKER_DMA_EVENT_DEFAULT_SIZE, + &rocker->event_ring); + if (err) { + dev_err(&pdev->dev, "failed to create event dma ring\n"); + goto err_dma_event_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, + PCI_DMA_FROMDEVICE, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); + goto err_dma_event_ring_bufs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); + return 0; + +err_dma_event_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->event_ring); +err_dma_event_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_cmd_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); + return err; +} + +static void rocker_dma_rings_fini(struct rocker *rocker) +{ + rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->event_ring); + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); +} + +static int rocker_dma_rx_ring_skb_map(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + struct sk_buff *skb, size_t buf_len) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + + dma_handle = pci_map_single(pdev, skb->data, buf_len, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, dma_handle)) + return -EIO; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) + goto tlv_put_failure; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) + goto tlv_put_failure; + return 0; + +tlv_put_failure: + pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); + desc_info->tlv_size = 0; + return -EMSGSIZE; +} + +static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port) +{ + return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; +} + +static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct net_device *dev = rocker_port->dev; + struct sk_buff *skb; + size_t buf_len = rocker_port_rx_buf_len(rocker_port); + int err; + + /* Ensure that hw will see tlv_size zero in case of an error. + * That tells hw to use another descriptor. + */ + rocker_desc_cookie_ptr_set(desc_info, NULL); + desc_info->tlv_size = 0; + + skb = netdev_alloc_skb_ip_align(dev, buf_len); + if (!skb) + return -ENOMEM; + err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info, + skb, buf_len); + if (err) { + dev_kfree_skb_any(skb); + return err; + } + rocker_desc_cookie_ptr_set(desc_info, skb); + return 0; +} + +static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker, + struct rocker_tlv **attrs) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + size_t len; + + if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || + !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) + return; + dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); + len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); + pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); +} + +static void rocker_dma_rx_ring_skb_free(struct rocker *rocker, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + + if (!skb) + return; + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + dev_kfree_skb_any(skb); +} + +static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + int i; + int err; + + for (i = 0; i < rx_ring->size; i++) { + err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, + &rx_ring->desc_info[i]); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); + return err; +} + +static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + int i; + + for (i = 0; i < rx_ring->size; i++) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); +} + +static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + int err; + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_TX(rocker_port->port_number), + ROCKER_DMA_TX_DEFAULT_SIZE, + &rocker_port->tx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); + return err; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE, + ROCKER_DMA_TX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); + goto err_dma_tx_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_RX(rocker_port->port_number), + ROCKER_DMA_RX_DEFAULT_SIZE, + &rocker_port->rx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); + goto err_dma_rx_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL, + ROCKER_DMA_RX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); + goto err_dma_rx_ring_bufs_alloc; + } + + err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); + goto err_dma_rx_ring_skbs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); + + return 0; + +err_dma_rx_ring_skbs_alloc: + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_rx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); +err_dma_rx_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); +err_dma_tx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); + return err; +} + +static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + + rocker_dma_rx_ring_skbs_free(rocker, rocker_port); + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); +} + +static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) +{ + u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); + + if (enable) + val |= 1 << rocker_port->lport; + else + val &= ~(1 << rocker_port->lport); + rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); +} + +/******************************** + * Interrupt handler and helpers + ********************************/ + +static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + u32 credits = 0; + + spin_lock(&rocker->cmd_ring_lock); + while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { + wait = rocker_desc_cookie_ptr_get(desc_info); + if (wait->nowait) { + rocker_desc_gen_clear(desc_info); + rocker_wait_destroy(wait); + } else { + rocker_wait_wake_up(wait); + } + credits++; + } + spin_unlock(&rocker->cmd_ring_lock); + rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); + + return IRQ_HANDLED; +} + +static void rocker_port_link_up(struct rocker_port *rocker_port) +{ + netif_carrier_on(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is up\n"); +} + +static void rocker_port_link_down(struct rocker_port *rocker_port) +{ + netif_carrier_off(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is down\n"); +} + +static int rocker_event_link_change(struct rocker *rocker, + const struct rocker_tlv *info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; + unsigned int port_number; + bool link_up; + struct rocker_port *rocker_port; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] || + !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1; + link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + if (netif_carrier_ok(rocker_port->dev) != link_up) { + if (link_up) + rocker_port_link_up(rocker_port); + else + rocker_port_link_down(rocker_port); + } + + return 0; +} + +#define ROCKER_OP_FLAG_REMOVE BIT(0) +#define ROCKER_OP_FLAG_NOWAIT BIT(1) +#define ROCKER_OP_FLAG_LEARNED BIT(2) +#define ROCKER_OP_FLAG_REFRESH BIT(3) + +static int rocker_port_fdb(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id, int flags); + +static int rocker_event_mac_vlan_seen(struct rocker *rocker, + const struct rocker_tlv *info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; + unsigned int port_number; + struct rocker_port *rocker_port; + unsigned char *addr; + int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED; + __be16 vlan_id; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1; + addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); + vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + + if (rocker_port->stp_state != BR_STATE_LEARNING && + rocker_port->stp_state != BR_STATE_FORWARDING) + return 0; + + return rocker_port_fdb(rocker_port, addr, vlan_id, flags); +} + +static int rocker_event_process(struct rocker *rocker, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; + struct rocker_tlv *info; + u16 type; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); + if (!attrs[ROCKER_TLV_EVENT_TYPE] || + !attrs[ROCKER_TLV_EVENT_INFO]) + return -EIO; + + type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); + info = attrs[ROCKER_TLV_EVENT_INFO]; + + switch (type) { + case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: + return rocker_event_link_change(rocker, info); + case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: + return rocker_event_mac_vlan_seen(rocker, info); + } + + return -EOPNOTSUPP; +} + +static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + struct pci_dev *pdev = rocker->pdev; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + dev_err(&pdev->dev, "event desc received with err %d\n", + err); + } else { + err = rocker_event_process(rocker, desc_info); + if (err) + dev_err(&pdev->dev, "event processing failed with err %d\n", + err); + } + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); + credits++; + } + rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); + + return IRQ_HANDLED; +} + +static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_tx); + return IRQ_HANDLED; +} + +static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_rx); + return IRQ_HANDLED; +} + +/******************** + * Command interface + ********************/ + +typedef int (*rocker_cmd_cb_t)(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv); + +static int rocker_cmd_exec(struct rocker *rocker, + struct rocker_port *rocker_port, + rocker_cmd_cb_t prepare, void *prepare_priv, + rocker_cmd_cb_t process, void *process_priv, + bool nowait) +{ + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + unsigned long flags; + int err; + + wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL); + if (!wait) + return -ENOMEM; + wait->nowait = nowait; + + spin_lock_irqsave(&rocker->cmd_ring_lock, flags); + desc_info = rocker_desc_head_get(&rocker->cmd_ring); + if (!desc_info) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + err = -EAGAIN; + goto out; + } + err = prepare(rocker, rocker_port, desc_info, prepare_priv); + if (err) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + goto out; + } + rocker_desc_cookie_ptr_set(desc_info, wait); + rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + + if (nowait) + return 0; + + if (!rocker_wait_event_timeout(wait, HZ / 10)) + return -EIO; + + err = rocker_desc_err(desc_info); + if (err) + return err; + + if (process) + err = process(rocker, rocker_port, desc_info, process_priv); + + rocker_desc_gen_clear(desc_info); +out: + rocker_wait_destroy(wait); + return err; +} + +static int +rocker_cmd_get_port_settings_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + u32 speed; + u8 duplex; + u8 autoneg; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) + return -EIO; + + speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); + duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); + autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = SUPPORTED_TP; + ecmd->phy_address = 0xff; + ecmd->port = PORT_TP; + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; + ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + return 0; +} + +static int +rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + struct rocker_tlv *attr; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; + if (!attr) + return -EIO; + + if (rocker_tlv_len(attr) != ETH_ALEN) + return -EINVAL; + + ether_addr_copy(macaddr, rocker_tlv_data(attr)); + return 0; +} + +static int +rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, + ethtool_cmd_speed(ecmd))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, + ecmd->duplex)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, + ecmd->autoneg)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, + ETH_ALEN, macaddr)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_learning_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, + !!(rocker_port->brport_flags & BR_LEARNING))) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_ethtool_proc, + ecmd, false); +} + +static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_macaddr_proc, + macaddr, false); +} + +static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_settings_ethtool_prep, + ecmd, NULL, NULL, false); +} + +static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_settings_macaddr_prep, + macaddr, NULL, NULL, false); +} + +static int rocker_port_set_learning(struct rocker_port *rocker_port) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_learning_prep, + NULL, NULL, NULL, false); +} + +static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, + entry->key.ig_port.in_lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, + entry->key.ig_port.in_lport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ig_port.goto_tbl)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, + entry->key.vlan.in_lport)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.vlan.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.vlan.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.vlan.goto_tbl)) + return -EMSGSIZE; + if (entry->key.vlan.untagged && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, + entry->key.vlan.new_vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, + entry->key.term_mac.in_lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, + entry->key.term_mac.in_lport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.term_mac.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.term_mac.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.term_mac.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.term_mac.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.term_mac.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.term_mac.goto_tbl)) + return -EMSGSIZE; + if (entry->key.term_mac.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.term_mac.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.ucast_routing.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, + entry->key.ucast_routing.dst4)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, + entry->key.ucast_routing.dst4_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ucast_routing.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.ucast_routing.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (entry->key.bridge.has_eth_dst && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.bridge.eth_dst)) + return -EMSGSIZE; + if (entry->key.bridge.has_eth_dst_mask && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.bridge.eth_dst_mask)) + return -EMSGSIZE; + if (entry->key.bridge.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.bridge.vlan_id)) + return -EMSGSIZE; + if (entry->key.bridge.tunnel_id && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, + entry->key.bridge.tunnel_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.bridge.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.bridge.group_id)) + return -EMSGSIZE; + if (entry->key.bridge.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.bridge.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, + entry->key.acl.in_lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, + entry->key.acl.in_lport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->key.acl.eth_src)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_src_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.acl.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.acl.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.acl.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.acl.vlan_id_mask)) + return -EMSGSIZE; + + switch (ntohs(entry->key.acl.eth_type)) { + case ETH_P_IP: + case ETH_P_IPV6: + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, + entry->key.acl.ip_proto)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, + entry->key.acl.ip_proto_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, + entry->key.acl.ip_tos & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, + entry->key.acl.ip_tos_mask & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, + (entry->key.acl.ip_tos & 0xc0) >> 6)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_ECN_MASK, + (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) + return -EMSGSIZE; + break; + } + + if (entry->key.acl.group_id != ROCKER_GROUP_NONE && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.acl.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, + entry->key.tbl_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, + entry->key.priority)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + + switch (entry->key.tbl_id) { + case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: + err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_VLAN: + err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: + err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: + err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_BRIDGING: + err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: + err = rocker_cmd_flow_tbl_add_acl(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int rocker_cmd_flow_tbl_del(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT, + ROCKER_GROUP_PORT_GET(entry->group_id))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, + entry->l2_interface.pop_vlan)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l2_rewrite.group_id)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l2_rewrite.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l2_rewrite.eth_dst)) + return -EMSGSIZE; + if (entry->l2_rewrite.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l2_rewrite.vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + int i; + struct rocker_tlv *group_ids; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, + entry->group_count)) + return -EMSGSIZE; + + group_ids = rocker_tlv_nest_start(desc_info, + ROCKER_TLV_OF_DPA_GROUP_IDS); + if (!group_ids) + return -EMSGSIZE; + + for (i = 0; i < entry->group_count; i++) + /* Note TLV array is 1-based */ + if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, group_ids); + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l3_unicast.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l3_unicast.eth_dst)) + return -EMSGSIZE; + if (entry->l3_unicast.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l3_unicast.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, + entry->l3_unicast.ttl_check)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l3_unicast.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_group_tbl_add(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: + err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int rocker_cmd_group_tbl_del(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +/***************************************** + * Flow, group, FDB, internal VLAN tables + *****************************************/ + +static int rocker_init_tbls(struct rocker *rocker) +{ + hash_init(rocker->flow_tbl); + spin_lock_init(&rocker->flow_tbl_lock); + + hash_init(rocker->group_tbl); + spin_lock_init(&rocker->group_tbl_lock); + + hash_init(rocker->fdb_tbl); + spin_lock_init(&rocker->fdb_tbl_lock); + + hash_init(rocker->internal_vlan_tbl); + spin_lock_init(&rocker->internal_vlan_tbl_lock); + + return 0; +} + +static void rocker_free_tbls(struct rocker *rocker) +{ + unsigned long flags; + struct rocker_flow_tbl_entry *flow_entry; + struct rocker_group_tbl_entry *group_entry; + struct rocker_fdb_tbl_entry *fdb_entry; + struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; + struct hlist_node *tmp; + int bkt; + + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry) + hash_del(&flow_entry->entry); + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + + spin_lock_irqsave(&rocker->group_tbl_lock, flags); + hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry) + hash_del(&group_entry->entry); + spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); + + spin_lock_irqsave(&rocker->fdb_tbl_lock, flags); + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry) + hash_del(&fdb_entry->entry); + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags); + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags); + hash_for_each_safe(rocker->internal_vlan_tbl, bkt, + tmp, internal_vlan_entry, entry) + hash_del(&internal_vlan_entry->entry); + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); +} + +static struct rocker_flow_tbl_entry * +rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match) +{ + struct rocker_flow_tbl_entry *found; + + hash_for_each_possible(rocker->flow_tbl, found, + entry, match->key_crc32) { + if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + return found; + } + + return NULL; +} + +static int rocker_flow_tbl_add(struct rocker_port *rocker_port, + struct rocker_flow_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_flow_tbl_entry *found; + unsigned long flags; + bool add_to_hw = false; + int err = 0; + + match->key_crc32 = crc32(~0, &match->key, sizeof(match->key)); + + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + + found = rocker_flow_tbl_find(rocker, match); + + if (found) { + kfree(match); + } else { + found = match; + found->cookie = rocker->flow_tbl_next_cookie++; + hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); + add_to_hw = true; + } + + found->ref_count++; + + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + + if (add_to_hw) { + err = rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_flow_tbl_add, + found, NULL, NULL, nowait); + if (err) { + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + hash_del(&found->entry); + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + kfree(found); + } + } + + return err; +} + +static int rocker_flow_tbl_del(struct rocker_port *rocker_port, + struct rocker_flow_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_flow_tbl_entry *found; + unsigned long flags; + bool del_from_hw = false; + int err = 0; + + match->key_crc32 = crc32(~0, &match->key, sizeof(match->key)); + + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + + found = rocker_flow_tbl_find(rocker, match); + + if (found) { + found->ref_count--; + if (found->ref_count == 0) { + hash_del(&found->entry); + del_from_hw = true; + } + } + + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + + kfree(match); + + if (del_from_hw) { + err = rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_flow_tbl_del, + found, NULL, NULL, nowait); + kfree(found); + } + + return err; +} + +static gfp_t rocker_op_flags_gfp(int flags) +{ + return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL; +} + +static int rocker_flow_tbl_do(struct rocker_port *rocker_port, + int flags, struct rocker_flow_tbl_entry *entry) +{ + bool nowait = flags & ROCKER_OP_FLAG_NOWAIT; + + if (flags & ROCKER_OP_FLAG_REMOVE) + return rocker_flow_tbl_del(rocker_port, entry, nowait); + else + return rocker_flow_tbl_add(rocker_port, entry, nowait); +} + +static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, + int flags, u32 in_lport, u32 in_lport_mask, + enum rocker_of_dpa_table_id goto_tbl) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.priority = ROCKER_PRIORITY_IG_PORT; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; + entry->key.ig_port.in_lport = in_lport; + entry->key.ig_port.in_lport_mask = in_lport_mask; + entry->key.ig_port.goto_tbl = goto_tbl; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, + int flags, u32 in_lport, + __be16 vlan_id, __be16 vlan_id_mask, + enum rocker_of_dpa_table_id goto_tbl, + bool untagged, __be16 new_vlan_id) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.priority = ROCKER_PRIORITY_VLAN; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; + entry->key.vlan.in_lport = in_lport; + entry->key.vlan.vlan_id = vlan_id; + entry->key.vlan.vlan_id_mask = vlan_id_mask; + entry->key.vlan.goto_tbl = goto_tbl; + + entry->key.vlan.untagged = untagged; + entry->key.vlan.new_vlan_id = new_vlan_id; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, + u32 in_lport, u32 in_lport_mask, + __be16 eth_type, const u8 *eth_dst, + const u8 *eth_dst_mask, __be16 vlan_id, + __be16 vlan_id_mask, bool copy_to_cpu, + int flags) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + if (is_multicast_ether_addr(eth_dst)) { + entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; + } else { + entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + } + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + entry->key.term_mac.in_lport = in_lport; + entry->key.term_mac.in_lport_mask = in_lport_mask; + entry->key.term_mac.eth_type = eth_type; + ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); + ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); + entry->key.term_mac.vlan_id = vlan_id; + entry->key.term_mac.vlan_id_mask = vlan_id_mask; + entry->key.term_mac.copy_to_cpu = copy_to_cpu; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, + int flags, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 vlan_id, u32 tunnel_id, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, bool copy_to_cpu) +{ + struct rocker_flow_tbl_entry *entry; + u32 priority; + bool vlan_bridging = !!vlan_id; + bool dflt = !eth_dst || (eth_dst && eth_dst_mask); + bool wild = false; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; + + if (eth_dst) { + entry->key.bridge.has_eth_dst = 1; + ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); + } + if (eth_dst_mask) { + entry->key.bridge.has_eth_dst_mask = 1; + ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); + if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN)) + wild = true; + } + + priority = ROCKER_PRIORITY_UNKNOWN; + if (vlan_bridging && dflt && wild) + priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD; + else if (vlan_bridging && dflt && !wild) + priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; + else if (vlan_bridging && !dflt) + priority = ROCKER_PRIORITY_BRIDGING_VLAN; + else if (!vlan_bridging && dflt && wild) + priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD; + else if (!vlan_bridging && dflt && !wild) + priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; + else if (!vlan_bridging && !dflt) + priority = ROCKER_PRIORITY_BRIDGING_TENANT; + + entry->key.priority = priority; + entry->key.bridge.vlan_id = vlan_id; + entry->key.bridge.tunnel_id = tunnel_id; + entry->key.bridge.goto_tbl = goto_tbl; + entry->key.bridge.group_id = group_id; + entry->key.bridge.copy_to_cpu = copy_to_cpu; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, + int flags, u32 in_lport, + u32 in_lport_mask, + const u8 *eth_src, const u8 *eth_src_mask, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 eth_type, + __be16 vlan_id, __be16 vlan_id_mask, + u8 ip_proto, u8 ip_proto_mask, + u8 ip_tos, u8 ip_tos_mask, + u32 group_id) +{ + u32 priority; + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + priority = ROCKER_PRIORITY_ACL_NORMAL; + if (eth_dst && eth_dst_mask) { + if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0) + priority = ROCKER_PRIORITY_ACL_DFLT; + else if (is_link_local_ether_addr(eth_dst)) + priority = ROCKER_PRIORITY_ACL_CTRL; + } + + entry->key.priority = priority; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + entry->key.acl.in_lport = in_lport; + entry->key.acl.in_lport_mask = in_lport_mask; + + if (eth_src) + ether_addr_copy(entry->key.acl.eth_src, eth_src); + if (eth_src_mask) + ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); + if (eth_dst) + ether_addr_copy(entry->key.acl.eth_dst, eth_dst); + if (eth_dst_mask) + ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); + + entry->key.acl.eth_type = eth_type; + entry->key.acl.vlan_id = vlan_id; + entry->key.acl.vlan_id_mask = vlan_id_mask; + entry->key.acl.ip_proto = ip_proto; + entry->key.acl.ip_proto_mask = ip_proto_mask; + entry->key.acl.ip_tos = ip_tos; + entry->key.acl.ip_tos_mask = ip_tos_mask; + entry->key.acl.group_id = group_id; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static struct rocker_group_tbl_entry * +rocker_group_tbl_find(struct rocker *rocker, + struct rocker_group_tbl_entry *match) +{ + struct rocker_group_tbl_entry *found; + + hash_for_each_possible(rocker->group_tbl, found, + entry, match->group_id) { + if (found->group_id == match->group_id) + return found; + } + + return NULL; +} + +static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry) +{ + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + kfree(entry->group_ids); + break; + default: + break; + } + kfree(entry); +} + +static int rocker_group_tbl_add(struct rocker_port *rocker_port, + struct rocker_group_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_group_tbl_entry *found; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&rocker->group_tbl_lock, flags); + + found = rocker_group_tbl_find(rocker, match); + + if (found) { + hash_del(&found->entry); + rocker_group_tbl_entry_free(found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; + } else { + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; + } + + hash_add(rocker->group_tbl, &found->entry, found->group_id); + + spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); + + if (found->cmd) + err = rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_group_tbl_add, + found, NULL, NULL, nowait); + + return err; +} + +static int rocker_group_tbl_del(struct rocker_port *rocker_port, + struct rocker_group_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_group_tbl_entry *found; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&rocker->group_tbl_lock, flags); + + found = rocker_group_tbl_find(rocker, match); + + if (found) { + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; + } + + spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); + + rocker_group_tbl_entry_free(match); + + if (found) { + err = rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_group_tbl_del, + found, NULL, NULL, nowait); + rocker_group_tbl_entry_free(found); + } + + return err; +} + +static int rocker_group_tbl_do(struct rocker_port *rocker_port, + int flags, struct rocker_group_tbl_entry *entry) +{ + bool nowait = flags & ROCKER_OP_FLAG_NOWAIT; + + if (flags & ROCKER_OP_FLAG_REMOVE) + return rocker_group_tbl_del(rocker_port, entry, nowait); + else + return rocker_group_tbl_add(rocker_port, entry, nowait); +} + +static int rocker_group_l2_interface(struct rocker_port *rocker_port, + int flags, __be16 vlan_id, + u32 out_lport, int pop_vlan) +{ + struct rocker_group_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + entry->l2_interface.pop_vlan = pop_vlan; + + return rocker_group_tbl_do(rocker_port, flags, entry); +} + +static int rocker_group_l2_fan_out(struct rocker_port *rocker_port, + int flags, u8 group_count, + u32 *group_ids, u32 group_id) +{ + struct rocker_group_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->group_id = group_id; + entry->group_count = group_count; + + entry->group_ids = kcalloc(group_count, sizeof(u32), + rocker_op_flags_gfp(flags)); + if (!entry->group_ids) { + kfree(entry); + return -ENOMEM; + } + memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); + + return rocker_group_tbl_do(rocker_port, flags, entry); +} + +static int rocker_group_l2_flood(struct rocker_port *rocker_port, + int flags, __be16 vlan_id, + u8 group_count, u32 *group_ids, + u32 group_id) +{ + return rocker_group_l2_fan_out(rocker_port, flags, + group_count, group_ids, + group_id); +} + +static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, + int flags, __be16 vlan_id) +{ + struct rocker_port *p; + struct rocker *rocker = rocker_port->rocker; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 group_ids[rocker->port_count]; + u8 group_count = 0; + int err; + int i; + + /* Adjust the flood group for this VLAN. The flood group + * references an L2 interface group for each port in this + * VLAN. + */ + + for (i = 0; i < rocker->port_count; i++) { + p = rocker->ports[i]; + if (!rocker_port_is_bridged(p)) + continue; + if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { + group_ids[group_count++] = + ROCKER_GROUP_L2_INTERFACE(vlan_id, + p->lport); + } + } + + /* If there are no bridged ports in this VLAN, we're done */ + if (group_count == 0) + return 0; + + err = rocker_group_l2_flood(rocker_port, flags, vlan_id, + group_count, group_ids, + group_id); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 flood group\n", err); + + return err; +} + +static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, + int flags, __be16 vlan_id, + bool pop_vlan) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_port *p; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + u32 out_lport; + int ref = 0; + int err; + int i; + + /* An L2 interface group for this port in this VLAN, but + * only when port STP state is LEARNING|FORWARDING. + */ + + if (rocker_port->stp_state == BR_STATE_LEARNING || + rocker_port->stp_state == BR_STATE_FORWARDING) { + out_lport = rocker_port->lport; + err = rocker_group_l2_interface(rocker_port, flags, + vlan_id, out_lport, + pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for lport %d\n", + err, out_lport); + return err; + } + } + + /* An L2 interface group for this VLAN to CPU port. + * Add when first port joins this VLAN and destroy when + * last port leaves this VLAN. + */ + + for (i = 0; i < rocker->port_count; i++) { + p = rocker->ports[i]; + if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) + ref++; + } + + if ((!adding || ref != 1) && (adding || ref != 0)) + return 0; + + out_lport = 0; + err = rocker_group_l2_interface(rocker_port, flags, + vlan_id, out_lport, + pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for CPU port\n", err); + return err; + } + + return 0; +} + +static struct rocker_ctrl { + const u8 *eth_dst; + const u8 *eth_dst_mask; + __be16 eth_type; + bool acl; + bool bridge; + bool term; + bool copy_to_cpu; +} rocker_ctrls[] = { + [ROCKER_CTRL_LINK_LOCAL_MCAST] = { + /* pass link local multicast pkts up to CPU for filtering */ + .eth_dst = ll_mac, + .eth_dst_mask = ll_mask, + .acl = true, + }, + [ROCKER_CTRL_LOCAL_ARP] = { + /* pass local ARP pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .eth_type = htons(ETH_P_ARP), + .acl = true, + }, + [ROCKER_CTRL_IPV4_MCAST] = { + /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ + .eth_dst = ipv4_mcast, + .eth_dst_mask = ipv4_mask, + .eth_type = htons(ETH_P_IP), + .term = true, + .copy_to_cpu = true, + }, + [ROCKER_CTRL_IPV6_MCAST] = { + /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ + .eth_dst = ipv6_mcast, + .eth_dst_mask = ipv6_mask, + .eth_type = htons(ETH_P_IPV6), + .term = true, + .copy_to_cpu = true, + }, + [ROCKER_CTRL_DFLT_BRIDGING] = { + /* flood any pkts on vlan */ + .bridge = true, + .copy_to_cpu = true, + }, +}; + +static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, + int flags, struct rocker_ctrl *ctrl, + __be16 vlan_id) +{ + u32 in_lport = rocker_port->lport; + u32 in_lport_mask = 0xffffffff; + u32 out_lport = 0; + u8 *eth_src = NULL; + u8 *eth_src_mask = NULL; + __be16 vlan_id_mask = htons(0xffff); + u8 ip_proto = 0; + u8 ip_proto_mask = 0; + u8 ip_tos = 0; + u8 ip_tos_mask = 0; + u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + int err; + + err = rocker_flow_tbl_acl(rocker_port, flags, + in_lport, in_lport_mask, + eth_src, eth_src_mask, + ctrl->eth_dst, ctrl->eth_dst_mask, + ctrl->eth_type, + vlan_id, vlan_id_mask, + ip_proto, ip_proto_mask, + ip_tos, ip_tos_mask, + group_id); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port, + int flags, struct rocker_ctrl *ctrl, + __be16 vlan_id) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 tunnel_id = 0; + int err; + + if (!rocker_port_is_bridged(rocker_port)) + return 0; + + err = rocker_flow_tbl_bridge(rocker_port, flags, + ctrl->eth_dst, ctrl->eth_dst_mask, + vlan_id, tunnel_id, + goto_tbl, group_id, ctrl->copy_to_cpu); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, + int flags, struct rocker_ctrl *ctrl, + __be16 vlan_id) +{ + u32 in_lport_mask = 0xffffffff; + __be16 vlan_id_mask = htons(0xffff); + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = rocker_port->internal_vlan_id; + + err = rocker_flow_tbl_term_mac(rocker_port, + rocker_port->lport, in_lport_mask, + ctrl->eth_type, ctrl->eth_dst, + ctrl->eth_dst_mask, vlan_id, + vlan_id_mask, ctrl->copy_to_cpu, + flags); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags, + struct rocker_ctrl *ctrl, __be16 vlan_id) +{ + if (ctrl->acl) + return rocker_port_ctrl_vlan_acl(rocker_port, flags, + ctrl, vlan_id); + if (ctrl->bridge) + return rocker_port_ctrl_vlan_bridge(rocker_port, flags, + ctrl, vlan_id); + + if (ctrl->term) + return rocker_port_ctrl_vlan_term(rocker_port, flags, + ctrl, vlan_id); + + return -EOPNOTSUPP; +} + +static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port, + int flags, __be16 vlan_id) +{ + int err = 0; + int i; + + for (i = 0; i < ROCKER_CTRL_MAX; i++) { + if (rocker_port->ctrls[i]) { + err = rocker_port_ctrl_vlan(rocker_port, flags, + &rocker_ctrls[i], vlan_id); + if (err) + return err; + } + } + + return err; +} + +static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags, + struct rocker_ctrl *ctrl) +{ + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, rocker_port->vlan_bitmap)) + continue; + err = rocker_port_ctrl_vlan(rocker_port, flags, + ctrl, htons(vid)); + if (err) + break; + } + + return err; +} + +static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, + u16 vid) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + u32 in_lport = rocker_port->lport; + __be16 vlan_id = htons(vid); + __be16 vlan_id_mask = htons(0xffff); + __be16 internal_vlan_id; + bool untagged; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + int err; + + internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged); + + if (adding && test_and_set_bit(ntohs(internal_vlan_id), + rocker_port->vlan_bitmap)) + return 0; /* already added */ + else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id), + rocker_port->vlan_bitmap)) + return 0; /* already removed */ + + if (adding) { + err = rocker_port_ctrl_vlan_add(rocker_port, flags, + internal_vlan_id); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port ctrl vlan add\n", err); + return err; + } + } + + err = rocker_port_vlan_l2_groups(rocker_port, flags, + internal_vlan_id, untagged); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 groups\n", err); + return err; + } + + err = rocker_port_vlan_flood_group(rocker_port, flags, + internal_vlan_id); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 flood group\n", err); + return err; + } + + err = rocker_flow_tbl_vlan(rocker_port, flags, + in_lport, vlan_id, vlan_id_mask, + goto_tbl, untagged, internal_vlan_id); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) port VLAN table\n", err); + + return err; +} + +static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags) +{ + enum rocker_of_dpa_table_id goto_tbl; + u32 in_lport; + u32 in_lport_mask; + int err; + + /* Normal Ethernet Frames. Matches pkts from any local physical + * ports. Goto VLAN tbl. + */ + + in_lport = 0; + in_lport_mask = 0xffff0000; + goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; + + err = rocker_flow_tbl_ig_port(rocker_port, flags, + in_lport, in_lport_mask, + goto_tbl); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) ingress port table entry\n", err); + + return err; +} + +struct rocker_fdb_learn_work { + struct work_struct work; + struct net_device *dev; + int flags; + u8 addr[ETH_ALEN]; + u16 vid; +}; + +static void rocker_port_fdb_learn_work(struct work_struct *work) +{ + struct rocker_fdb_learn_work *lw = + container_of(work, struct rocker_fdb_learn_work, work); + bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); + bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); + + if (learned && removing) + br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid); + else if (learned && !removing) + br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid); + + kfree(work); +} + +static int rocker_port_fdb_learn(struct rocker_port *rocker_port, + int flags, const u8 *addr, __be16 vlan_id) +{ + struct rocker_fdb_learn_work *lw; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 out_lport = rocker_port->lport; + u32 tunnel_id = 0; + u32 group_id = ROCKER_GROUP_NONE; + bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); + bool copy_to_cpu = false; + int err; + + if (rocker_port_is_bridged(rocker_port)) + group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + + if (!(flags & ROCKER_OP_FLAG_REFRESH)) { + err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL, + vlan_id, tunnel_id, goto_tbl, + group_id, copy_to_cpu); + if (err) + return err; + } + + if (!syncing) + return 0; + + if (!rocker_port_is_bridged(rocker_port)) + return 0; + + lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags)); + if (!lw) + return -ENOMEM; + + INIT_WORK(&lw->work, rocker_port_fdb_learn_work); + + lw->dev = rocker_port->dev; + lw->flags = flags; + ether_addr_copy(lw->addr, addr); + lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id); + + schedule_work(&lw->work); + + return 0; +} + +static struct rocker_fdb_tbl_entry * +rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match) +{ + struct rocker_fdb_tbl_entry *found; + + hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32) + if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + return found; + + return NULL; +} + +static int rocker_port_fdb(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id, int flags) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *fdb; + struct rocker_fdb_tbl_entry *found; + bool removing = (flags & ROCKER_OP_FLAG_REMOVE); + unsigned long lock_flags; + + fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags)); + if (!fdb) + return -ENOMEM; + + fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); + fdb->key.lport = rocker_port->lport; + ether_addr_copy(fdb->key.addr, addr); + fdb->key.vlan_id = vlan_id; + fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + + found = rocker_fdb_tbl_find(rocker, fdb); + + if (removing && found) { + kfree(fdb); + hash_del(&found->entry); + } else if (!removing && !found) { + hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32); + } + + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + + /* Check if adding and already exists, or removing and can't find */ + if (!found != !removing) { + kfree(fdb); + if (!found && removing) + return 0; + /* Refreshing existing to update aging timers */ + flags |= ROCKER_OP_FLAG_REFRESH; + } + + return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id); +} + +static int rocker_port_fdb_flush(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *found; + unsigned long lock_flags; + int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE; + struct hlist_node *tmp; + int bkt; + int err = 0; + + if (rocker_port->stp_state == BR_STATE_LEARNING || + rocker_port->stp_state == BR_STATE_FORWARDING) + return 0; + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.lport != rocker_port->lport) + continue; + if (!found->learned) + continue; + err = rocker_port_fdb_learn(rocker_port, flags, + found->key.addr, + found->key.vlan_id); + if (err) + goto err_out; + hash_del(&found->entry); + } + +err_out: + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + + return err; +} + +static int rocker_port_router_mac(struct rocker_port *rocker_port, + int flags, __be16 vlan_id) +{ + u32 in_lport_mask = 0xffffffff; + __be16 eth_type; + const u8 *dst_mac_mask = ff_mac; + __be16 vlan_id_mask = htons(0xffff); + bool copy_to_cpu = false; + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = rocker_port->internal_vlan_id; + + eth_type = htons(ETH_P_IP); + err = rocker_flow_tbl_term_mac(rocker_port, + rocker_port->lport, in_lport_mask, + eth_type, rocker_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + if (err) + return err; + + eth_type = htons(ETH_P_IPV6); + err = rocker_flow_tbl_term_mac(rocker_port, + rocker_port->lport, in_lport_mask, + eth_type, rocker_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + + return err; +} + +static int rocker_port_fwding(struct rocker_port *rocker_port) +{ + bool pop_vlan; + u32 out_lport; + __be16 vlan_id; + u16 vid; + int flags = ROCKER_OP_FLAG_NOWAIT; + int err; + + /* Port will be forwarding-enabled if its STP state is LEARNING + * or FORWARDING. Traffic from CPU can still egress, regardless of + * port STP state. Use L2 interface group on port VLANs as a way + * to toggle port forwarding: if forwarding is disabled, L2 + * interface group will not exist. + */ + + if (rocker_port->stp_state != BR_STATE_LEARNING && + rocker_port->stp_state != BR_STATE_FORWARDING) + flags |= ROCKER_OP_FLAG_REMOVE; + + out_lport = rocker_port->lport; + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, rocker_port->vlan_bitmap)) + continue; + vlan_id = htons(vid); + pop_vlan = rocker_vlan_id_is_internal(vlan_id); + err = rocker_group_l2_interface(rocker_port, flags, + vlan_id, out_lport, + pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for lport %d\n", + err, out_lport); + return err; + } + } + + return 0; +} + +static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state) +{ + bool want[ROCKER_CTRL_MAX] = { 0, }; + int flags; + int err; + int i; + + if (rocker_port->stp_state == state) + return 0; + + rocker_port->stp_state = state; + + switch (state) { + case BR_STATE_DISABLED: + /* port is completely disabled */ + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + break; + case BR_STATE_LEARNING: + case BR_STATE_FORWARDING: + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + want[ROCKER_CTRL_IPV4_MCAST] = true; + want[ROCKER_CTRL_IPV6_MCAST] = true; + if (rocker_port_is_bridged(rocker_port)) + want[ROCKER_CTRL_DFLT_BRIDGING] = true; + else + want[ROCKER_CTRL_LOCAL_ARP] = true; + break; + } + + for (i = 0; i < ROCKER_CTRL_MAX; i++) { + if (want[i] != rocker_port->ctrls[i]) { + flags = ROCKER_OP_FLAG_NOWAIT | + (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE); + err = rocker_port_ctrl(rocker_port, flags, + &rocker_ctrls[i]); + if (err) + return err; + rocker_port->ctrls[i] = want[i]; + } + } + + err = rocker_port_fdb_flush(rocker_port); + if (err) + return err; + + return rocker_port_fwding(rocker_port); +} + +static struct rocker_internal_vlan_tbl_entry * +rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex) +{ + struct rocker_internal_vlan_tbl_entry *found; + + hash_for_each_possible(rocker->internal_vlan_tbl, found, + entry, ifindex) { + if (found->ifindex == ifindex) + return found; + } + + return NULL; +} + +static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port, + int ifindex) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_internal_vlan_tbl_entry *entry; + struct rocker_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return 0; + + entry->ifindex = ifindex; + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); + + found = rocker_internal_vlan_tbl_find(rocker, ifindex); + if (found) { + kfree(entry); + goto found; + } + + found = entry; + hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex); + + for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) { + if (test_and_set_bit(i, rocker->internal_vlan_bitmap)) + continue; + found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i); + goto found; + } + + netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n"); + +found: + found->ref_count++; + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); + + return found->vlan_id; +} + +static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port, + int ifindex) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + unsigned long bit; + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); + + found = rocker_internal_vlan_tbl_find(rocker, ifindex); + if (!found) { + netdev_err(rocker_port->dev, + "ifindex (%d) not found in internal VLAN tbl\n", + ifindex); + goto not_found; + } + + if (--found->ref_count <= 0) { + bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE; + clear_bit(bit, rocker->internal_vlan_bitmap); + hash_del(&found->entry); + kfree(found); + } + +not_found: + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); +} + +/***************** + * Net device ops + *****************/ + +static int rocker_port_open(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + u8 stp_state = rocker_port_is_bridged(rocker_port) ? + BR_STATE_BLOCKING : BR_STATE_FORWARDING; + int err; + + err = rocker_port_dma_rings_init(rocker_port); + if (err) + return err; + + err = request_irq(rocker_msix_tx_vector(rocker_port), + rocker_tx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign tx irq\n"); + goto err_request_tx_irq; + } + + err = request_irq(rocker_msix_rx_vector(rocker_port), + rocker_rx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign rx irq\n"); + goto err_request_rx_irq; + } + + err = rocker_port_stp_update(rocker_port, stp_state); + if (err) + goto err_stp_update; + + napi_enable(&rocker_port->napi_tx); + napi_enable(&rocker_port->napi_rx); + rocker_port_set_enable(rocker_port, true); + netif_start_queue(dev); + return 0; + +err_stp_update: + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); +err_request_rx_irq: + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); +err_request_tx_irq: + rocker_port_dma_rings_fini(rocker_port); + return err; +} + +static int rocker_port_stop(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + netif_stop_queue(dev); + rocker_port_set_enable(rocker_port, false); + napi_disable(&rocker_port->napi_rx); + napi_disable(&rocker_port->napi_tx); + rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); + rocker_port_dma_rings_fini(rocker_port); + + return 0; +} + +static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; + struct rocker_tlv *attr; + int rem; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); + if (!attrs[ROCKER_TLV_TX_FRAGS]) + return; + rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { + struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; + dma_addr_t dma_handle; + size_t len; + + if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) + continue; + rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, + attr); + if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || + !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) + continue; + dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); + len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); + pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); + } +} + +static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + char *buf, size_t buf_len) +{ + struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + struct rocker_tlv *frag; + + dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); + return -EIO; + } + frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); + if (!frag) + goto unmap_frag; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, + dma_handle)) + goto nest_cancel; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, + buf_len)) + goto nest_cancel; + rocker_tlv_nest_end(desc_info, frag); + return 0; + +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frag); +unmap_frag: + pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); + return -EMSGSIZE; +} + +static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_tlv *frags; + int i; + int err; + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (unlikely(!desc_info)) { + if (net_ratelimit()) + netdev_err(dev, "tx ring full when queue awake\n"); + return NETDEV_TX_BUSY; + } + + rocker_desc_cookie_ptr_set(desc_info, skb); + + frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); + if (!frags) + goto out; + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb->data, skb_headlen(skb)); + if (err) + goto nest_cancel; + if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) + goto nest_cancel; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb_frag_address(frag), + skb_frag_size(frag)); + if (err) + goto unmap_frags; + } + rocker_tlv_nest_end(desc_info, frags); + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (!desc_info) + netif_stop_queue(dev); + + return NETDEV_TX_OK; + +unmap_frags: + rocker_tx_desc_frags_unmap(rocker_port, desc_info); +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frags); +out: + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int rocker_port_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); + if (err) + return err; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return 0; +} + +static int rocker_port_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_vlan(rocker_port, 0, vid); + if (err) + return err; + + return rocker_port_router_mac(rocker_port, 0, htons(vid)); +} + +static int rocker_port_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE, + htons(vid)); + if (err) + return err; + + return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid); +} + +static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 nlm_flags) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL); + int flags = 0; + + if (!rocker_port_is_bridged(rocker_port)) + return -EINVAL; + + return rocker_port_fdb(rocker_port, addr, vlan_id, flags); +} + +static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL); + int flags = ROCKER_OP_FLAG_REMOVE; + + if (!rocker_port_is_bridged(rocker_port)) + return -EINVAL; + + return rocker_port_fdb(rocker_port, addr, vlan_id, flags); +} + +static int rocker_fdb_fill_info(struct sk_buff *skb, + struct rocker_port *rocker_port, + const unsigned char *addr, u16 vid, + u32 portid, u32 seq, int type, + unsigned int flags) +{ + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = NTF_SELF; + ndm->ndm_type = 0; + ndm->ndm_ifindex = rocker_port->dev->ifindex; + ndm->ndm_state = NUD_REACHABLE; + + if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) + goto nla_put_failure; + + if (vid && nla_put_u16(skb, NDA_VLAN, vid)) + goto nla_put_failure; + + return nlmsg_end(skb, nlh); + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int rocker_port_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, + int idx) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *found; + struct hlist_node *tmp; + int bkt; + unsigned long lock_flags; + const unsigned char *addr; + u16 vid; + int err; + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.lport != rocker_port->lport) + continue; + if (idx < cb->args[0]) + goto skip; + addr = found->key.addr; + vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id); + err = rocker_fdb_fill_info(skb, rocker_port, addr, vid, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH, NLM_F_MULTI); + if (err < 0) + break; +skip: + ++idx; + } + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + return idx; +} + +static int rocker_port_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct nlattr *protinfo; + struct nlattr *afspec; + struct nlattr *attr; + u16 mode; + int err; + + protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), + IFLA_PROTINFO); + afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + if (afspec) { + attr = nla_find_nested(afspec, IFLA_BRIDGE_MODE); + if (attr) { + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_SWDEV) + return -EINVAL; + } + } + + if (protinfo) { + attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING); + if (attr) { + if (nla_len(attr) < sizeof(u8)) + return -EINVAL; + + if (nla_get_u8(attr)) + rocker_port->brport_flags |= BR_LEARNING; + else + rocker_port->brport_flags &= ~BR_LEARNING; + err = rocker_port_set_learning(rocker_port); + if (err) + return err; + } + attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC); + if (attr) { + if (nla_len(attr) < sizeof(u8)) + return -EINVAL; + + if (nla_get_u8(attr)) + rocker_port->brport_flags |= BR_LEARNING_SYNC; + else + rocker_port->brport_flags &= ~BR_LEARNING_SYNC; + } + } + + return 0; +} + +static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 filter_mask) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + u16 mode = BRIDGE_MODE_SWDEV; + u32 mask = BR_LEARNING | BR_LEARNING_SYNC; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, + rocker_port->brport_flags, mask); +} + +static int rocker_port_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + + psid->id_len = sizeof(rocker->hw.id); + memcpy(&psid->id, &rocker->hw.id, psid->id_len); + return 0; +} + +static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_port_stp_update(rocker_port, state); +} + +static const struct net_device_ops rocker_port_netdev_ops = { + .ndo_open = rocker_port_open, + .ndo_stop = rocker_port_stop, + .ndo_start_xmit = rocker_port_xmit, + .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid, + .ndo_fdb_add = rocker_port_fdb_add, + .ndo_fdb_del = rocker_port_fdb_del, + .ndo_fdb_dump = rocker_port_fdb_dump, + .ndo_bridge_setlink = rocker_port_bridge_setlink, + .ndo_bridge_getlink = rocker_port_bridge_getlink, + .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get, + .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update, +}; + +/******************** + * ethtool interface + ********************/ + +static int rocker_port_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); +} + +static int rocker_port_set_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); +} + +static void rocker_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); +} + +static const struct ethtool_ops rocker_port_ethtool_ops = { + .get_settings = rocker_port_get_settings, + .set_settings = rocker_port_set_settings, + .get_drvinfo = rocker_port_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +/***************** + * NAPI interface + *****************/ + +static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_tx); +} + +static int rocker_port_poll_tx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Cleanup tx descriptors */ + while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { + err = rocker_desc_err(desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "tx desc received with err %d\n", + err); + rocker_tx_desc_frags_unmap(rocker_port, desc_info); + dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info)); + credits++; + } + + if (credits && netif_queue_stopped(rocker_port->dev)) + netif_wake_queue(rocker_port->dev); + + napi_complete(napi); + rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); + + return 0; +} + +static int rocker_port_rx_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + size_t rx_len; + + if (!skb) + return -ENOENT; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) + return -EINVAL; + + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + + rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); + skb_put(skb, rx_len); + skb->protocol = eth_type_trans(skb, rocker_port->dev); + netif_receive_skb(skb); + + return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info); +} + +static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_rx); +} + +static int rocker_port_poll_rx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Process rx descriptors */ + while (credits < budget && + (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "rx desc received with err %d\n", + err); + } else { + err = rocker_port_rx_proc(rocker, rocker_port, + desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "rx processing failed with err %d\n", + err); + } + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); + credits++; + } + + if (credits < budget) + napi_complete(napi); + + rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); + + return credits; +} + +/***************** + * PCI driver ops + *****************/ + +static void rocker_carrier_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); + bool link_up; + + link_up = link_status & (1 << rocker_port->lport); + if (link_up) + netif_carrier_on(rocker_port->dev); + else + netif_carrier_off(rocker_port->dev); +} + +static void rocker_remove_ports(struct rocker *rocker) +{ + struct rocker_port *rocker_port; + int i; + + for (i = 0; i < rocker->port_count; i++) { + rocker_port = rocker->ports[i]; + rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE); + unregister_netdev(rocker_port->dev); + } + kfree(rocker->ports); +} + +static void rocker_port_dev_addr_init(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_cmd_get_port_settings_macaddr(rocker_port, + rocker_port->dev->dev_addr); + if (err) { + dev_warn(&pdev->dev, "failed to get mac address, using random\n"); + eth_hw_addr_random(rocker_port->dev); + } +} + +static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) +{ + struct pci_dev *pdev = rocker->pdev; + struct rocker_port *rocker_port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct rocker_port)); + if (!dev) + return -ENOMEM; + rocker_port = netdev_priv(dev); + rocker_port->dev = dev; + rocker_port->rocker = rocker; + rocker_port->port_number = port_number; + rocker_port->lport = port_number + 1; + rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; + + rocker_port_dev_addr_init(rocker, rocker_port); + dev->netdev_ops = &rocker_port_netdev_ops; + dev->ethtool_ops = &rocker_port_ethtool_ops; + netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, + NAPI_POLL_WEIGHT); + netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, + NAPI_POLL_WEIGHT); + rocker_carrier_init(rocker_port); + + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "register_netdev failed\n"); + goto err_register_netdev; + } + rocker->ports[port_number] = rocker_port; + + rocker_port_set_learning(rocker_port); + + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); + err = rocker_port_ig_tbl(rocker_port, 0); + if (err) { + dev_err(&pdev->dev, "install ig port table failed\n"); + goto err_port_ig_tbl; + } + + return 0; + +err_port_ig_tbl: + unregister_netdev(dev); +err_register_netdev: + free_netdev(dev); + return err; +} + +static int rocker_probe_ports(struct rocker *rocker) +{ + int i; + size_t alloc_size; + int err; + + alloc_size = sizeof(struct rocker_port *) * rocker->port_count; + rocker->ports = kmalloc(alloc_size, GFP_KERNEL); + for (i = 0; i < rocker->port_count; i++) { + err = rocker_probe_port(rocker, i); + if (err) + goto remove_ports; + } + return 0; + +remove_ports: + rocker_remove_ports(rocker); + return err; +} + +static int rocker_msix_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int msix_entries; + int i; + int err; + + msix_entries = pci_msix_vec_count(pdev); + if (msix_entries < 0) + return msix_entries; + + if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) + return -EINVAL; + + rocker->msix_entries = kmalloc_array(msix_entries, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!rocker->msix_entries) + return -ENOMEM; + + for (i = 0; i < msix_entries; i++) + rocker->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); + if (err < 0) + goto err_enable_msix; + + return 0; + +err_enable_msix: + kfree(rocker->msix_entries); + return err; +} + +static void rocker_msix_fini(struct rocker *rocker) +{ + pci_disable_msix(rocker->pdev); + kfree(rocker->msix_entries); +} + +static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct rocker *rocker; + int err; + + rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); + if (!rocker) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto err_pci_enable_device; + } + + err = pci_request_regions(pdev, rocker_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_pci_request_regions; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } + + if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { + dev_err(&pdev->dev, "invalid PCI region size\n"); + goto err_pci_resource_len_check; + } + + rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!rocker->hw_addr) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -EIO; + goto err_ioremap; + } + pci_set_master(pdev); + + rocker->pdev = pdev; + pci_set_drvdata(pdev, rocker); + + rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); + + err = rocker_msix_init(rocker); + if (err) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_msix_init; + } + + err = rocker_basic_hw_test(rocker); + if (err) { + dev_err(&pdev->dev, "basic hw test failed\n"); + goto err_basic_hw_test; + } + + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + + err = rocker_dma_rings_init(rocker); + if (err) + goto err_dma_rings_init; + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), + rocker_cmd_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign cmd irq\n"); + goto err_request_cmd_irq; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), + rocker_event_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign event irq\n"); + goto err_request_event_irq; + } + + rocker->hw.id = rocker_read64(rocker, SWITCH_ID); + + err = rocker_init_tbls(rocker); + if (err) { + dev_err(&pdev->dev, "cannot init rocker tables\n"); + goto err_init_tbls; + } + + err = rocker_probe_ports(rocker); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + + dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id); + + return 0; + +err_probe_ports: + rocker_free_tbls(rocker); +err_init_tbls: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); +err_request_event_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); +err_request_cmd_irq: + rocker_dma_rings_fini(rocker); +err_dma_rings_init: +err_basic_hw_test: + rocker_msix_fini(rocker); +err_msix_init: + iounmap(rocker->hw_addr); +err_ioremap: +err_pci_resource_len_check: +err_pci_set_dma_mask: + pci_release_regions(pdev); +err_pci_request_regions: + pci_disable_device(pdev); +err_pci_enable_device: + kfree(rocker); + return err; +} + +static void rocker_remove(struct pci_dev *pdev) +{ + struct rocker *rocker = pci_get_drvdata(pdev); + + rocker_free_tbls(rocker); + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + rocker_remove_ports(rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); + rocker_dma_rings_fini(rocker); + rocker_msix_fini(rocker); + iounmap(rocker->hw_addr); + pci_release_regions(rocker->pdev); + pci_disable_device(rocker->pdev); + kfree(rocker); +} + +static struct pci_driver rocker_pci_driver = { + .name = rocker_driver_name, + .id_table = rocker_pci_id_table, + .probe = rocker_probe, + .remove = rocker_remove, +}; + +/************************************ + * Net device notifier event handler + ************************************/ + +static bool rocker_port_dev_check(struct net_device *dev) +{ + return dev->netdev_ops == &rocker_port_netdev_ops; +} + +static int rocker_port_bridge_join(struct rocker_port *rocker_port, + struct net_device *bridge) +{ + int err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->dev->ifindex); + + rocker_port->bridge_dev = bridge; + + /* Use bridge internal VLAN ID for untagged pkts */ + err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0); + if (err) + return err; + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, + bridge->ifindex); + err = rocker_port_vlan(rocker_port, 0, 0); + + return err; +} + +static int rocker_port_bridge_leave(struct rocker_port *rocker_port) +{ + int err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->bridge_dev->ifindex); + + rocker_port->bridge_dev = NULL; + + /* Use port internal VLAN ID for untagged pkts */ + err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0); + if (err) + return err; + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, + rocker_port->dev->ifindex); + err = rocker_port_vlan(rocker_port, 0, 0); + + return err; +} + +static int rocker_port_master_changed(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct net_device *master = netdev_master_upper_dev_get(dev); + int err = 0; + + if (master && master->rtnl_link_ops && + !strcmp(master->rtnl_link_ops->kind, "bridge")) + err = rocker_port_bridge_join(rocker_port, master); + else + err = rocker_port_bridge_leave(rocker_port); + + return err; +} + +static int rocker_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev; + int err; + + switch (event) { + case NETDEV_CHANGEUPPER: + dev = netdev_notifier_info_to_dev(ptr); + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + err = rocker_port_master_changed(dev); + if (err) + netdev_warn(dev, + "failed to reflect master change (err %d)\n", + err); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netdevice_nb __read_mostly = { + .notifier_call = rocker_netdevice_event, +}; + +/*********************** + * Module init and exit + ***********************/ + +static int __init rocker_module_init(void) +{ + int err; + + register_netdevice_notifier(&rocker_netdevice_nb); + err = pci_register_driver(&rocker_pci_driver); + if (err) + goto err_pci_register_driver; + return 0; + +err_pci_register_driver: + unregister_netdevice_notifier(&rocker_netdevice_nb); + return err; +} + +static void __exit rocker_module_exit(void) +{ + unregister_netdevice_notifier(&rocker_netdevice_nb); + pci_unregister_driver(&rocker_pci_driver); +} + +module_init(rocker_module_init); +module_exit(rocker_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); +MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>"); +MODULE_DESCRIPTION("Rocker switch device driver"); +MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h new file mode 100644 index 000000000000..8d2865ba634c --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker.h @@ -0,0 +1,428 @@ +/* + * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver + * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_H +#define _ROCKER_H + +#include <linux/types.h> + +#define PCI_VENDOR_ID_REDHAT 0x1b36 +#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 + +#define ROCKER_PCI_BAR0_SIZE 0x2000 + +/* MSI-X vectors */ +enum { + ROCKER_MSIX_VEC_CMD, + ROCKER_MSIX_VEC_EVENT, + ROCKER_MSIX_VEC_TEST, + ROCKER_MSIX_VEC_RESERVED0, + __ROCKER_MSIX_VEC_TX, + __ROCKER_MSIX_VEC_RX, +#define ROCKER_MSIX_VEC_TX(port) \ + (__ROCKER_MSIX_VEC_TX + ((port) * 2)) +#define ROCKER_MSIX_VEC_RX(port) \ + (__ROCKER_MSIX_VEC_RX + ((port) * 2)) +#define ROCKER_MSIX_VEC_COUNT(portcnt) \ + (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1) +}; + +/* Rocker bogus registers */ +#define ROCKER_BOGUS_REG0 0x0000 +#define ROCKER_BOGUS_REG1 0x0004 +#define ROCKER_BOGUS_REG2 0x0008 +#define ROCKER_BOGUS_REG3 0x000c + +/* Rocker test registers */ +#define ROCKER_TEST_REG 0x0010 +#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ +#define ROCKER_TEST_IRQ 0x0020 +#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ +#define ROCKER_TEST_DMA_SIZE 0x0030 +#define ROCKER_TEST_DMA_CTRL 0x0034 + +/* Rocker test register ctrl */ +#define ROCKER_TEST_DMA_CTRL_CLEAR (1 << 0) +#define ROCKER_TEST_DMA_CTRL_FILL (1 << 1) +#define ROCKER_TEST_DMA_CTRL_INVERT (1 << 2) + +/* Rocker DMA ring register offsets */ +#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */ +#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32) +#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32) +#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32) +#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32) +#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32) +#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32) + +/* Rocker dma ctrl register bits */ +#define ROCKER_DMA_DESC_CTRL_RESET (1 << 0) + +/* Rocker DMA ring types */ +enum rocker_dma_type { + ROCKER_DMA_CMD, + ROCKER_DMA_EVENT, + __ROCKER_DMA_TX, + __ROCKER_DMA_RX, +#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2) +#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2) +}; + +/* Rocker DMA ring size limits and default sizes */ +#define ROCKER_DMA_SIZE_MIN 2ul +#define ROCKER_DMA_SIZE_MAX 65536ul +#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul +#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul +#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_TX_DESC_SIZE 256 +#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_RX_DESC_SIZE 256 + +/* Rocker DMA descriptor struct */ +struct rocker_desc { + u64 buf_addr; + u64 cookie; + u16 buf_size; + u16 tlv_size; + u16 resv[5]; + u16 comp_err; +}; + +#define ROCKER_DMA_DESC_COMP_ERR_GEN (1 << 15) + +/* Rocker DMA TLV struct */ +struct rocker_tlv { + u32 type; + u16 len; +}; + +/* TLVs */ +enum { + ROCKER_TLV_CMD_UNSPEC, + ROCKER_TLV_CMD_TYPE, /* u16 */ + ROCKER_TLV_CMD_INFO, /* nest */ + + __ROCKER_TLV_CMD_MAX, + ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_TYPE_UNSPEC, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, + + __ROCKER_TLV_CMD_TYPE_MAX, + ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, + ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ + + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + ROCKER_TLV_CMD_PORT_SETTINGS_MAX = + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, +}; + +enum rocker_port_mode { + ROCKER_PORT_MODE_OF_DPA, +}; + +enum { + ROCKER_TLV_EVENT_UNSPEC, + ROCKER_TLV_EVENT_TYPE, /* u16 */ + ROCKER_TLV_EVENT_INFO, /* nest */ + + __ROCKER_TLV_EVENT_MAX, + ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_TYPE_UNSPEC, + ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, + ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, + + __ROCKER_TLV_EVENT_TYPE_MAX, + ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, + ROCKER_TLV_EVENT_LINK_CHANGED_LPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ + + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, + ROCKER_TLV_EVENT_LINK_CHANGED_MAX = + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, + ROCKER_TLV_EVENT_MAC_VLAN_LPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ + ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ + + __ROCKER_TLV_EVENT_MAC_VLAN_MAX, + ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, +}; + +enum { + ROCKER_TLV_RX_UNSPEC, + ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */ + ROCKER_TLV_RX_CSUM, /* u16 */ + ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ + ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ + ROCKER_TLV_RX_FRAG_LEN, /* u16 */ + + __ROCKER_TLV_RX_MAX, + ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, +}; + +#define ROCKER_RX_FLAGS_IPV4 (1 << 0) +#define ROCKER_RX_FLAGS_IPV6 (1 << 1) +#define ROCKER_RX_FLAGS_CSUM_CALC (1 << 2) +#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD (1 << 3) +#define ROCKER_RX_FLAGS_IP_FRAG (1 << 4) +#define ROCKER_RX_FLAGS_TCP (1 << 5) +#define ROCKER_RX_FLAGS_UDP (1 << 6) +#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD (1 << 7) + +enum { + ROCKER_TLV_TX_UNSPEC, + ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */ + ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ + ROCKER_TLV_TX_TSO_MSS, /* u16 */ + ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ + ROCKER_TLV_TX_FRAGS, /* array */ + + __ROCKER_TLV_TX_MAX, + ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, +}; + +#define ROCKER_TX_OFFLOAD_NONE 0 +#define ROCKER_TX_OFFLOAD_IP_CSUM 1 +#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 +#define ROCKER_TX_OFFLOAD_L3_CSUM 3 +#define ROCKER_TX_OFFLOAD_TSO 4 + +#define ROCKER_TX_FRAGS_MAX 16 + +enum { + ROCKER_TLV_TX_FRAG_UNSPEC, + ROCKER_TLV_TX_FRAG, /* nest */ + + __ROCKER_TLV_TX_FRAG_MAX, + ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, +}; + +enum { + ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, + ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ + ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ + + __ROCKER_TLV_TX_FRAG_ATTR_MAX, + ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, +}; + +/* cmd info nested for OF-DPA msgs */ +enum { + ROCKER_TLV_OF_DPA_UNSPEC, + ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ + ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ + ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ + ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ + ROCKER_TLV_OF_DPA_IN_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_LPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ + ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ + ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ + ROCKER_TLV_OF_DPA_TUN_LOG_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ + ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ + ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ + ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ + ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ + ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ + ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ + + __ROCKER_TLV_OF_DPA_MAX, + ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, +}; + +/* OF-DPA table IDs */ + +enum rocker_of_dpa_table_id { + ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, + ROCKER_OF_DPA_TABLE_ID_VLAN = 10, + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, + ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, +}; + +/* OF-DPA flow stats */ +enum { + ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, + ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ + + __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, + ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, +}; + +/* OF-DPA group types */ +enum rocker_of_dpa_group_type { + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, + ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, + ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, + ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, + ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, + ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, +}; + +/* OF-DPA group L2 overlay types */ +enum rocker_of_dpa_overlay_type { + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, +}; + +/* OF-DPA group ID encoding */ +#define ROCKER_GROUP_TYPE_SHIFT 28 +#define ROCKER_GROUP_TYPE_MASK 0xf0000000 +#define ROCKER_GROUP_VLAN_SHIFT 16 +#define ROCKER_GROUP_VLAN_MASK 0x0fff0000 +#define ROCKER_GROUP_PORT_SHIFT 0 +#define ROCKER_GROUP_PORT_MASK 0x0000ffff +#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 +#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 +#define ROCKER_GROUP_SUBTYPE_SHIFT 10 +#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 +#define ROCKER_GROUP_INDEX_SHIFT 0 +#define ROCKER_GROUP_INDEX_MASK 0x0000ffff +#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 +#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff + +#define ROCKER_GROUP_TYPE_GET(group_id) \ + (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) +#define ROCKER_GROUP_TYPE_SET(type) \ + (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) +#define ROCKER_GROUP_VLAN_GET(group_id) \ + (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) +#define ROCKER_GROUP_VLAN_SET(vlan_id) \ + (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK) +#define ROCKER_GROUP_PORT_GET(group_id) \ + (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) +#define ROCKER_GROUP_PORT_SET(port) \ + (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) +#define ROCKER_GROUP_INDEX_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) +#define ROCKER_GROUP_INDEX_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) +#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ + ROCKER_GROUP_INDEX_LONG_SHIFT) +#define ROCKER_GROUP_INDEX_LONG_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ + ROCKER_GROUP_INDEX_LONG_MASK) + +#define ROCKER_GROUP_NONE 0 +#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) +#define ROCKER_GROUP_L2_REWRITE(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) +#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L3_UNICAST(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) + +/* Rocker general purpose registers */ +#define ROCKER_CONTROL 0x0300 +#define ROCKER_PORT_PHYS_COUNT 0x0304 +#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ +#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ +#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ + +/* Rocker control bits */ +#define ROCKER_CONTROL_RESET (1 << 0) + +#endif diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index b147d469a799..7fd6e275d1c2 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -90,9 +90,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev) /* Get memory resource */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - goto err_out; - addr = devm_ioremap_resource(dev, res); if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 002d4cdc319f..fbb6cfa0f5f1 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx) EFX_MAX_CHANNELS, resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); - BUG_ON(efx->max_channels == 0); + if (WARN_ON(efx->max_channels == 0)) + return -EIO; nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); if (!nic_data) @@ -3688,6 +3689,11 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .ptp_write_host_time = efx_ef10_ptp_write_host_time, .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, + .sriov_init = efx_ef10_sriov_init, + .sriov_fini = efx_ef10_sriov_fini, + .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed, + .sriov_wanted = efx_ef10_sriov_wanted, + .sriov_reset = efx_ef10_sriov_reset, .revision = EFX_REV_HUNT_A0, .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b2cc590dd1dd..238482495e81 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1314,7 +1314,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) /* If RSS is requested for the PF *and* VFs then we can't write RSS * table entries that are inaccessible to VFs */ - if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 && + if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && count > efx_vf_size(efx)) { netif_warn(efx, probe, efx->net_dev, "Reducing number of RSS channels from %u to %u for " @@ -1426,7 +1426,9 @@ static int efx_probe_interrupts(struct efx_nic *efx) } /* RSS might be usable on VFs even if it is disabled on the PF */ - efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ? + + efx->rss_spread = ((efx->n_rx_channels > 1 || + !efx->type->sriov_wanted(efx)) ? efx->n_rx_channels : efx_vf_size(efx)); return 0; @@ -1614,7 +1616,7 @@ static int efx_probe_nic(struct efx_nic *efx) goto fail2; if (efx->n_channels > 1) - get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); + netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) efx->rx_indir_table[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); @@ -2166,7 +2168,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) } ether_addr_copy(net_dev->dev_addr, new_addr); - efx_sriov_mac_address_changed(efx); + efx->type->sriov_mac_address_changed(efx); /* Reconfigure the MAC */ mutex_lock(&efx->mac_lock); @@ -2210,10 +2212,10 @@ static const struct net_device_ops efx_farch_netdev_ops = { .ndo_set_rx_mode = efx_set_rx_mode, .ndo_set_features = efx_set_features, #ifdef CONFIG_SFC_SRIOV - .ndo_set_vf_mac = efx_sriov_set_vf_mac, - .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, - .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, - .ndo_get_vf_config = efx_sriov_get_vf_config, + .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac, + .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan, + .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, + .ndo_get_vf_config = efx_siena_sriov_get_vf_config, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, @@ -2433,7 +2435,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) if (rc) goto fail; efx_restore_filters(efx); - efx_sriov_reset(efx); + efx->type->sriov_reset(efx); mutex_unlock(&efx->mac_lock); @@ -2826,7 +2828,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) efx_disable_interrupts(efx); rtnl_unlock(); - efx_sriov_fini(efx); + efx->type->sriov_fini(efx); efx_unregister_netdev(efx); efx_mtd_remove(efx); @@ -3023,7 +3025,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev, if (rc) goto fail4; - rc = efx_sriov_init(efx); + rc = efx->type->sriov_init(efx); if (rc) netif_err(efx, probe, efx->net_dev, "SR-IOV can't be enabled rc %d\n", rc); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index cad258a78708..4835bc0d0de8 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1086,19 +1086,29 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) 0 : ARRAY_SIZE(efx->rx_indir_table)); } -static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key) +static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc) { struct efx_nic *efx = netdev_priv(net_dev); - memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); return 0; } -static int efx_ethtool_set_rxfh(struct net_device *net_dev, - const u32 *indir, const u8 *key) +static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, + const u8 *key, const u8 hfunc) { struct efx_nic *efx = netdev_priv(net_dev); + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table)); efx->type->rx_push_rss_config(efx); return 0; diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 157037546d30..f166c8ef38a3 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2766,6 +2766,11 @@ const struct efx_nic_type falcon_a1_nic_type = { .mtd_write = falcon_mtd_write, .mtd_sync = falcon_mtd_sync, #endif + .sriov_init = efx_falcon_sriov_init, + .sriov_fini = efx_falcon_sriov_fini, + .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed, + .sriov_wanted = efx_falcon_sriov_wanted, + .sriov_reset = efx_falcon_sriov_reset, .revision = EFX_REV_FALCON_A1, .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, @@ -2862,6 +2867,11 @@ const struct efx_nic_type falcon_b0_nic_type = { .mtd_write = falcon_mtd_write, .mtd_sync = falcon_mtd_sync, #endif + .sriov_init = efx_falcon_sriov_init, + .sriov_fini = efx_falcon_sriov_fini, + .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed, + .sriov_wanted = efx_falcon_sriov_wanted, + .sriov_reset = efx_falcon_sriov_reset, .revision = EFX_REV_FALCON_B0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 6859437b59fb..75975328e020 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -226,6 +226,9 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer, unsigned int len) { +#ifdef CONFIG_SFC_SRIOV + struct siena_nic_data *nic_data = efx->nic_data; +#endif len = ALIGN(len, EFX_BUF_SIZE); if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) @@ -237,8 +240,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, buffer->index = efx->next_buffer_table; efx->next_buffer_table += buffer->entries; #ifdef CONFIG_SFC_SRIOV - BUG_ON(efx_sriov_enabled(efx) && - efx->vf_buftbl_base < efx->next_buffer_table); + BUG_ON(efx_siena_sriov_enabled(efx) && + nic_data->vf_buftbl_base < efx->next_buffer_table); #endif netif_dbg(efx, probe, efx->net_dev, @@ -667,7 +670,7 @@ static int efx_farch_do_flush(struct efx_nic *efx) * the firmware (though we will still have to poll for * completion). If that fails, fall back to the old scheme. */ - if (efx_sriov_enabled(efx)) { + if (efx_siena_sriov_enabled(efx)) { rc = efx_mcdi_flush_rxqs(efx); if (!rc) goto wait; @@ -1195,13 +1198,13 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", channel->channel, ev_sub_data); efx_farch_handle_tx_flush_done(efx, event); - efx_sriov_tx_flush_done(efx, event); + efx_siena_sriov_tx_flush_done(efx, event); break; case FSE_AZ_RX_DESCQ_FLS_DONE_EV: netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", channel->channel, ev_sub_data); efx_farch_handle_rx_flush_done(efx, event); - efx_sriov_rx_flush_done(efx, event); + efx_siena_sriov_rx_flush_done(efx, event); break; case FSE_AZ_EVQ_INIT_DONE_EV: netif_dbg(efx, hw, efx->net_dev, @@ -1240,7 +1243,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) ev_sub_data); efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } else - efx_sriov_desc_fetch_err(efx, ev_sub_data); + efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); break; case FSE_BZ_TX_DSC_ERROR_EV: if (ev_sub_data < EFX_VI_BASE) { @@ -1250,7 +1253,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) ev_sub_data); efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } else - efx_sriov_desc_fetch_err(efx, ev_sub_data); + efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); break; default: netif_vdbg(efx, hw, efx->net_dev, @@ -1315,7 +1318,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) efx_farch_handle_driver_event(channel, &event); break; case FSE_CZ_EV_CODE_USER_EV: - efx_sriov_event(channel, &event); + efx_siena_sriov_event(channel, &event); break; case FSE_CZ_EV_CODE_MCDI_EV: efx_mcdi_process_event(channel, &event); @@ -1668,6 +1671,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) { unsigned vi_count, buftbl_min; +#ifdef CONFIG_SFC_SRIOV + struct siena_nic_data *nic_data = efx->nic_data; +#endif + /* Account for the buffer table entries backing the datapath channels * and the descriptor caches for those channels. */ @@ -1678,10 +1685,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); #ifdef CONFIG_SFC_SRIOV - if (efx_sriov_wanted(efx)) { + if (efx->type->sriov_wanted(efx)) { unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; - efx->vf_buftbl_base = buftbl_min; + nic_data->vf_buftbl_base = buftbl_min; vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; vi_count = max(vi_count, EFX_VI_BASE); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 5239cf9bdc56..d37928f01949 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1035,7 +1035,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, /* MAC stats are gather lazily. We can ignore this. */ break; case MCDI_EVENT_CODE_FLR: - efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); + efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); break; case MCDI_EVENT_CODE_PTP_RX: case MCDI_EVENT_CODE_PTP_FAULT: diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9ede32064685..325dd94bca46 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -913,13 +913,6 @@ struct vfdi_status; * @vf_count: Number of VFs intended to be enabled. * @vf_init_count: Number of VFs that have been fully initialised. * @vi_scale: log2 number of vnics per VF. - * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. - * @vfdi_status: Common VFDI status page to be dmad to VF address space. - * @local_addr_list: List of local addresses. Protected by %local_lock. - * @local_page_list: List of DMA addressable pages used to broadcast - * %local_addr_list. Protected by %local_lock. - * @local_lock: Mutex protecting %local_addr_list and %local_page_list. - * @peer_work: Work item to broadcast peer addresses to VMs. * @ptp_data: PTP state data * @vpd_sn: Serial number read from VPD * @monitor_work: Hardware monitor workitem @@ -1060,17 +1053,10 @@ struct efx_nic { wait_queue_head_t flush_wq; #ifdef CONFIG_SFC_SRIOV - struct efx_channel *vfdi_channel; struct efx_vf *vf; unsigned vf_count; unsigned vf_init_count; unsigned vi_scale; - unsigned vf_buftbl_base; - struct efx_buffer vfdi_status; - struct list_head local_addr_list; - struct list_head local_page_list; - struct mutex local_lock; - struct work_struct peer_work; #endif struct efx_ptp_data *ptp_data; @@ -1344,6 +1330,11 @@ struct efx_nic_type { int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); int (*ptp_set_ts_config)(struct efx_nic *efx, struct hwtstamp_config *init); + int (*sriov_init)(struct efx_nic *efx); + void (*sriov_fini)(struct efx_nic *efx); + void (*sriov_mac_address_changed)(struct efx_nic *efx); + bool (*sriov_wanted)(struct efx_nic *efx); + void (*sriov_reset)(struct efx_nic *efx); int revision; unsigned int txd_ptr_tbl_base; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index f77cce034ad4..93d10cbbd1cf 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -378,12 +378,30 @@ enum { /** * struct siena_nic_data - Siena NIC state + * @efx: Pointer back to main interface structure * @wol_filter_id: Wake-on-LAN packet filter id * @stats: Hardware statistics + * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. + * @vfdi_status: Common VFDI status page to be dmad to VF address space. + * @local_addr_list: List of local addresses. Protected by %local_lock. + * @local_page_list: List of DMA addressable pages used to broadcast + * %local_addr_list. Protected by %local_lock. + * @local_lock: Mutex protecting %local_addr_list and %local_page_list. + * @peer_work: Work item to broadcast peer addresses to VMs. */ struct siena_nic_data { + struct efx_nic *efx; int wol_filter_id; u64 stats[SIENA_STAT_COUNT]; +#ifdef CONFIG_SFC_SRIOV + struct efx_channel *vfdi_channel; + unsigned vf_buftbl_base; + struct efx_buffer vfdi_status; + struct list_head local_addr_list; + struct list_head local_page_list; + struct mutex local_lock; + struct work_struct peer_work; +#endif }; enum { @@ -522,62 +540,88 @@ struct efx_ef10_nic_data { #ifdef CONFIG_SFC_SRIOV -static inline bool efx_sriov_wanted(struct efx_nic *efx) +/* SIENA */ +static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return efx->vf_count != 0; } -static inline bool efx_sriov_enabled(struct efx_nic *efx) + +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return efx->vf_init_count != 0; } + static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 1 << efx->vi_scale; } int efx_init_sriov(void); -void efx_sriov_probe(struct efx_nic *efx); -int efx_sriov_init(struct efx_nic *efx); -void efx_sriov_mac_address_changed(struct efx_nic *efx); -void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); -void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); -void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event); -void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); -void efx_sriov_flr(struct efx_nic *efx, unsigned flr); -void efx_sriov_reset(struct efx_nic *efx); -void efx_sriov_fini(struct efx_nic *efx); +void efx_siena_sriov_probe(struct efx_nic *efx); +int efx_siena_sriov_init(struct efx_nic *efx); +void efx_siena_sriov_mac_address_changed(struct efx_nic *efx); +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event); +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); +void efx_siena_sriov_reset(struct efx_nic *efx); +void efx_siena_sriov_fini(struct efx_nic *efx); void efx_fini_sriov(void); +/* EF10 */ +static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; } +static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {} + #else -static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; } -static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; } +/* SIENA */ +static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; } +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; } static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; } - static inline int efx_init_sriov(void) { return 0; } -static inline void efx_sriov_probe(struct efx_nic *efx) {} -static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } -static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {} -static inline void efx_sriov_tx_flush_done(struct efx_nic *efx, - efx_qword_t *event) {} -static inline void efx_sriov_rx_flush_done(struct efx_nic *efx, - efx_qword_t *event) {} -static inline void efx_sriov_event(struct efx_channel *channel, - efx_qword_t *event) {} -static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {} -static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {} -static inline void efx_sriov_reset(struct efx_nic *efx) {} -static inline void efx_sriov_fini(struct efx_nic *efx) {} +static inline void efx_siena_sriov_probe(struct efx_nic *efx) {} +static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, + efx_qword_t *event) {} +static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, + efx_qword_t *event) {} +static inline void efx_siena_sriov_event(struct efx_channel *channel, + efx_qword_t *event) {} +static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, + unsigned dmaq) {} +static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {} +static inline void efx_siena_sriov_reset(struct efx_nic *efx) {} +static inline void efx_siena_sriov_fini(struct efx_nic *efx) {} static inline void efx_fini_sriov(void) {} +/* EF10 */ +static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; } +static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {} + #endif -int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); -int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos); -int efx_sriov_get_vf_config(struct net_device *dev, int vf, - struct ifla_vf_info *ivf); -int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, - bool spoofchk); +/* FALCON */ +static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; } +static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {} +static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {} + +int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); +int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf, + u16 vlan, u8 qos); +int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivf); +int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, + bool spoofchk); struct ethtool_ts_info; int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index ae696855f21a..3583f0208a6e 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -251,6 +251,7 @@ static int siena_probe_nic(struct efx_nic *efx) nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); if (!nic_data) return -ENOMEM; + nic_data->efx = efx; efx->nic_data = nic_data; if (efx_farch_fpga_ver(efx) != 0) { @@ -306,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx) if (rc) goto fail5; - efx_sriov_probe(efx); + efx_siena_sriov_probe(efx); efx_ptp_defer_probe_with_channel(efx); return 0; @@ -996,6 +997,11 @@ const struct efx_nic_type siena_a0_nic_type = { #endif .ptp_write_host_time = siena_ptp_write_host_time, .ptp_set_ts_config = siena_ptp_set_ts_config, + .sriov_init = efx_siena_sriov_init, + .sriov_fini = efx_siena_sriov_fini, + .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed, + .sriov_wanted = efx_siena_sriov_wanted, + .sriov_reset = efx_siena_sriov_reset, .revision = EFX_REV_SIENA_A0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 43d2e64546ed..a8bbbad68a88 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -66,7 +66,7 @@ enum efx_vf_tx_filter_mode { * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr, * @peer_page_addrs and @peer_page_count from simultaneous * updates by the VM and consumption by - * efx_sriov_update_vf_addr() + * efx_siena_sriov_update_vf_addr() * @peer_page_addrs: Pointer to an array of guest pages for local addresses. * @peer_page_count: Number of entries in @peer_page_count. * @evq0_addrs: Array of guest pages backing evq0. @@ -194,8 +194,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index) return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; } -static int efx_sriov_cmd(struct efx_nic *efx, bool enable, - unsigned *vi_scale_out, unsigned *vf_total_out) +static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable, + unsigned *vi_scale_out, unsigned *vf_total_out) { MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN); @@ -227,18 +227,20 @@ static int efx_sriov_cmd(struct efx_nic *efx, bool enable, return 0; } -static void efx_sriov_usrev(struct efx_nic *efx, bool enabled) +static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled) { + struct siena_nic_data *nic_data = efx->nic_data; efx_oword_t reg; EFX_POPULATE_OWORD_2(reg, FRF_CZ_USREV_DIS, enabled ? 0 : 1, - FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel); + FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel); efx_writeo(efx, ®, FR_CZ_USR_EV_CFG); } -static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, - unsigned int count) +static int efx_siena_sriov_memcpy(struct efx_nic *efx, + struct efx_memcpy_req *req, + unsigned int count) { MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1); MCDI_DECLARE_STRUCT_PTR(record); @@ -297,7 +299,7 @@ out: /* The TX filter is entirely controlled by this driver, and is modified * underneath the feet of the VF */ -static void efx_sriov_reset_tx_filter(struct efx_vf *vf) +static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct efx_filter_spec filter; @@ -341,7 +343,7 @@ static void efx_sriov_reset_tx_filter(struct efx_vf *vf) } /* The RX filter is managed here on behalf of the VF driver */ -static void efx_sriov_reset_rx_filter(struct efx_vf *vf) +static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct efx_filter_spec filter; @@ -380,22 +382,26 @@ static void efx_sriov_reset_rx_filter(struct efx_vf *vf) } } -static void __efx_sriov_update_vf_addr(struct efx_vf *vf) +static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf) { - efx_sriov_reset_tx_filter(vf); - efx_sriov_reset_rx_filter(vf); - queue_work(vfdi_workqueue, &vf->efx->peer_work); + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + + efx_siena_sriov_reset_tx_filter(vf); + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); } /* Push the peer list to this VF. The caller must hold status_lock to interlock * with VFDI requests, and they must be serialised against manipulation of * local_page_list, either by acquiring local_lock or by running from - * efx_sriov_peer_work() + * efx_siena_sriov_peer_work() */ -static void __efx_sriov_push_vf_status(struct efx_vf *vf) +static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; - struct vfdi_status *status = efx->vfdi_status.addr; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *status = nic_data->vfdi_status.addr; struct efx_memcpy_req copy[4]; struct efx_endpoint_page *epp; unsigned int pos, count; @@ -421,7 +427,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) */ data_offset = offsetof(struct vfdi_status, version); copy[1].from_rid = efx->pci_dev->devfn; - copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset; + copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset; copy[1].to_rid = vf->pci_rid; copy[1].to_addr = vf->status_addr + data_offset; copy[1].length = status->length - data_offset; @@ -429,7 +435,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) /* Copy the peer pages */ pos = 2; count = 0; - list_for_each_entry(epp, &efx->local_page_list, link) { + list_for_each_entry(epp, &nic_data->local_page_list, link) { if (count == vf->peer_page_count) { /* The VF driver will know they need to provide more * pages because peer_addr_count is too large. @@ -444,7 +450,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) copy[pos].length = EFX_PAGE_SIZE; if (++pos == ARRAY_SIZE(copy)) { - efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); + efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); pos = 0; } ++count; @@ -456,7 +462,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status, generation_end); copy[pos].length = sizeof(status->generation_end); - efx_sriov_memcpy(efx, copy, pos + 1); + efx_siena_sriov_memcpy(efx, copy, pos + 1); /* Notify the guest */ EFX_POPULATE_QWORD_3(event, @@ -469,8 +475,8 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) &event); } -static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset, - u64 *addr, unsigned count) +static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset, + u64 *addr, unsigned count) { efx_qword_t buf; unsigned pos; @@ -539,7 +545,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf) return VFDI_RC_EINVAL; } - efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); + efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); EFX_POPULATE_OWORD_3(reg, FRF_CZ_TIMER_Q_EN, 1, @@ -584,7 +590,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf) } if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask)) ++vf->rxq_count; - efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); + efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL); EFX_POPULATE_OWORD_6(reg, @@ -628,7 +634,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf) if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask)) ++vf->txq_count; mutex_unlock(&vf->txq_lock); - efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); + efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON; @@ -742,8 +748,8 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, vf_offset + index); } - efx_sriov_bufs(efx, vf->buftbl_base, NULL, - EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); + efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL, + EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); efx_vfdi_flush_clear(vf); vf->evq0_count = 0; @@ -754,6 +760,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) static int efx_vfdi_insert_filter(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; struct vfdi_req *req = vf->buf.addr; unsigned vf_rxq = req->u.mac_filter.rxq; unsigned flags; @@ -776,17 +783,20 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf) vf->rx_filter_qid = vf_rxq; vf->rx_filtering = true; - efx_sriov_reset_rx_filter(vf); - queue_work(vfdi_workqueue, &efx->peer_work); + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); return VFDI_RC_SUCCESS; } static int efx_vfdi_remove_all_filters(struct efx_vf *vf) { + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + vf->rx_filtering = false; - efx_sriov_reset_rx_filter(vf); - queue_work(vfdi_workqueue, &vf->efx->peer_work); + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); return VFDI_RC_SUCCESS; } @@ -794,6 +804,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf) static int efx_vfdi_set_status_page(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; struct vfdi_req *req = vf->buf.addr; u64 page_count = req->u.set_status_page.peer_page_count; u64 max_page_count = @@ -809,7 +820,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf) return VFDI_RC_EINVAL; } - mutex_lock(&efx->local_lock); + mutex_lock(&nic_data->local_lock); mutex_lock(&vf->status_lock); vf->status_addr = req->u.set_status_page.dma_addr; @@ -828,9 +839,9 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf) } } - __efx_sriov_push_vf_status(vf); + __efx_siena_sriov_push_vf_status(vf); mutex_unlock(&vf->status_lock); - mutex_unlock(&efx->local_lock); + mutex_unlock(&nic_data->local_lock); return VFDI_RC_SUCCESS; } @@ -857,7 +868,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = { [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page, }; -static void efx_sriov_vfdi(struct work_struct *work) +static void efx_siena_sriov_vfdi(struct work_struct *work) { struct efx_vf *vf = container_of(work, struct efx_vf, req); struct efx_nic *efx = vf->efx; @@ -872,7 +883,7 @@ static void efx_sriov_vfdi(struct work_struct *work) copy[0].to_rid = efx->pci_dev->devfn; copy[0].to_addr = vf->buf.dma_addr; copy[0].length = EFX_PAGE_SIZE; - rc = efx_sriov_memcpy(efx, copy, 1); + rc = efx_siena_sriov_memcpy(efx, copy, 1); if (rc) { /* If we can't get the request, we can't reply to the caller */ if (net_ratelimit()) @@ -916,7 +927,7 @@ static void efx_sriov_vfdi(struct work_struct *work) copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op); copy[1].length = sizeof(req->op); - (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); + (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); } @@ -925,7 +936,8 @@ static void efx_sriov_vfdi(struct work_struct *work) * event ring in guest memory with VFDI reset events, then (re-initialise) the * event queue to raise an interrupt. The guest driver will then recover. */ -static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) +static void efx_siena_sriov_reset_vf(struct efx_vf *vf, + struct efx_buffer *buffer) { struct efx_nic *efx = vf->efx; struct efx_memcpy_req copy_req[4]; @@ -961,7 +973,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) copy_req[k].to_addr = vf->evq0_addrs[pos + k]; copy_req[k].length = EFX_PAGE_SIZE; } - rc = efx_sriov_memcpy(efx, copy_req, count); + rc = efx_siena_sriov_memcpy(efx, copy_req, count); if (rc) { if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, @@ -974,7 +986,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) /* Reinitialise, arm and trigger evq0 */ abs_evq = abs_index(vf, 0); buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0); - efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); + efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); EFX_POPULATE_OWORD_3(reg, FRF_CZ_TIMER_Q_EN, 1, @@ -992,19 +1004,19 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) mutex_unlock(&vf->status_lock); } -static void efx_sriov_reset_vf_work(struct work_struct *work) +static void efx_siena_sriov_reset_vf_work(struct work_struct *work) { struct efx_vf *vf = container_of(work, struct efx_vf, req); struct efx_nic *efx = vf->efx; struct efx_buffer buf; if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { - efx_sriov_reset_vf(vf, &buf); + efx_siena_sriov_reset_vf(vf, &buf); efx_nic_free_buffer(efx, &buf); } } -static void efx_sriov_handle_no_channel(struct efx_nic *efx) +static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx) { netif_err(efx, drv, efx->net_dev, "ERROR: IOV requires MSI-X and 1 additional interrupt" @@ -1012,35 +1024,38 @@ static void efx_sriov_handle_no_channel(struct efx_nic *efx) efx->vf_count = 0; } -static int efx_sriov_probe_channel(struct efx_channel *channel) +static int efx_siena_sriov_probe_channel(struct efx_channel *channel) { - channel->efx->vfdi_channel = channel; + struct siena_nic_data *nic_data = channel->efx->nic_data; + nic_data->vfdi_channel = channel; + return 0; } static void -efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len) +efx_siena_sriov_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) { snprintf(buf, len, "%s-iov", channel->efx->name); } -static const struct efx_channel_type efx_sriov_channel_type = { - .handle_no_channel = efx_sriov_handle_no_channel, - .pre_probe = efx_sriov_probe_channel, +static const struct efx_channel_type efx_siena_sriov_channel_type = { + .handle_no_channel = efx_siena_sriov_handle_no_channel, + .pre_probe = efx_siena_sriov_probe_channel, .post_remove = efx_channel_dummy_op_void, - .get_name = efx_sriov_get_channel_name, + .get_name = efx_siena_sriov_get_channel_name, /* no copy operation; channel must not be reallocated */ .keep_eventq = true, }; -void efx_sriov_probe(struct efx_nic *efx) +void efx_siena_sriov_probe(struct efx_nic *efx) { unsigned count; if (!max_vfs) return; - if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count)) + if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) return; if (count > 0 && count > max_vfs) count = max_vfs; @@ -1048,17 +1063,20 @@ void efx_sriov_probe(struct efx_nic *efx) /* efx_nic_dimension_resources() will reduce vf_count as appopriate */ efx->vf_count = count; - efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type; + efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type; } /* Copy the list of individual addresses into the vfdi_status.peers * array and auxillary pages, protected by %local_lock. Drop that lock * and then broadcast the address list to every VF. */ -static void efx_sriov_peer_work(struct work_struct *data) +static void efx_siena_sriov_peer_work(struct work_struct *data) { - struct efx_nic *efx = container_of(data, struct efx_nic, peer_work); - struct vfdi_status *vfdi_status = efx->vfdi_status.addr; + struct siena_nic_data *nic_data = container_of(data, + struct siena_nic_data, + peer_work); + struct efx_nic *efx = nic_data->efx; + struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; struct efx_vf *vf; struct efx_local_addr *local_addr; struct vfdi_endpoint *peer; @@ -1068,11 +1086,11 @@ static void efx_sriov_peer_work(struct work_struct *data) unsigned int peer_count; unsigned int pos; - mutex_lock(&efx->local_lock); + mutex_lock(&nic_data->local_lock); /* Move the existing peer pages off %local_page_list */ INIT_LIST_HEAD(&pages); - list_splice_tail_init(&efx->local_page_list, &pages); + list_splice_tail_init(&nic_data->local_page_list, &pages); /* Populate the VF addresses starting from entry 1 (entry 0 is * the PF address) @@ -1094,7 +1112,7 @@ static void efx_sriov_peer_work(struct work_struct *data) } /* Fill the remaining addresses */ - list_for_each_entry(local_addr, &efx->local_addr_list, link) { + list_for_each_entry(local_addr, &nic_data->local_addr_list, link) { ether_addr_copy(peer->mac_addr, local_addr->addr); peer->tci = 0; ++peer; @@ -1117,13 +1135,13 @@ static void efx_sriov_peer_work(struct work_struct *data) list_del(&epp->link); } - list_add_tail(&epp->link, &efx->local_page_list); + list_add_tail(&epp->link, &nic_data->local_page_list); peer = (struct vfdi_endpoint *)epp->ptr; peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint); } } vfdi_status->peer_count = peer_count; - mutex_unlock(&efx->local_lock); + mutex_unlock(&nic_data->local_lock); /* Free any now unused endpoint pages */ while (!list_empty(&pages)) { @@ -1141,25 +1159,26 @@ static void efx_sriov_peer_work(struct work_struct *data) mutex_lock(&vf->status_lock); if (vf->status_addr) - __efx_sriov_push_vf_status(vf); + __efx_siena_sriov_push_vf_status(vf); mutex_unlock(&vf->status_lock); } } -static void efx_sriov_free_local(struct efx_nic *efx) +static void efx_siena_sriov_free_local(struct efx_nic *efx) { + struct siena_nic_data *nic_data = efx->nic_data; struct efx_local_addr *local_addr; struct efx_endpoint_page *epp; - while (!list_empty(&efx->local_addr_list)) { - local_addr = list_first_entry(&efx->local_addr_list, + while (!list_empty(&nic_data->local_addr_list)) { + local_addr = list_first_entry(&nic_data->local_addr_list, struct efx_local_addr, link); list_del(&local_addr->link); kfree(local_addr); } - while (!list_empty(&efx->local_page_list)) { - epp = list_first_entry(&efx->local_page_list, + while (!list_empty(&nic_data->local_page_list)) { + epp = list_first_entry(&nic_data->local_page_list, struct efx_endpoint_page, link); list_del(&epp->link); dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, @@ -1168,7 +1187,7 @@ static void efx_sriov_free_local(struct efx_nic *efx) } } -static int efx_sriov_vf_alloc(struct efx_nic *efx) +static int efx_siena_sriov_vf_alloc(struct efx_nic *efx) { unsigned index; struct efx_vf *vf; @@ -1185,8 +1204,8 @@ static int efx_sriov_vf_alloc(struct efx_nic *efx) vf->rx_filter_id = -1; vf->tx_filter_mode = VF_TX_FILTER_AUTO; vf->tx_filter_id = -1; - INIT_WORK(&vf->req, efx_sriov_vfdi); - INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work); + INIT_WORK(&vf->req, efx_siena_sriov_vfdi); + INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work); init_waitqueue_head(&vf->flush_waitq); mutex_init(&vf->status_lock); mutex_init(&vf->txq_lock); @@ -1195,7 +1214,7 @@ static int efx_sriov_vf_alloc(struct efx_nic *efx) return 0; } -static void efx_sriov_vfs_fini(struct efx_nic *efx) +static void efx_siena_sriov_vfs_fini(struct efx_nic *efx) { struct efx_vf *vf; unsigned int pos; @@ -1212,9 +1231,10 @@ static void efx_sriov_vfs_fini(struct efx_nic *efx) } } -static int efx_sriov_vfs_init(struct efx_nic *efx) +static int efx_siena_sriov_vfs_init(struct efx_nic *efx) { struct pci_dev *pci_dev = efx->pci_dev; + struct siena_nic_data *nic_data = efx->nic_data; unsigned index, devfn, sriov, buftbl_base; u16 offset, stride; struct efx_vf *vf; @@ -1227,7 +1247,7 @@ static int efx_sriov_vfs_init(struct efx_nic *efx) pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); - buftbl_base = efx->vf_buftbl_base; + buftbl_base = nic_data->vf_buftbl_base; devfn = pci_dev->devfn + offset; for (index = 0; index < efx->vf_count; ++index) { vf = efx->vf + index; @@ -1253,13 +1273,14 @@ static int efx_sriov_vfs_init(struct efx_nic *efx) return 0; fail: - efx_sriov_vfs_fini(efx); + efx_siena_sriov_vfs_fini(efx); return rc; } -int efx_sriov_init(struct efx_nic *efx) +int efx_siena_sriov_init(struct efx_nic *efx) { struct net_device *net_dev = efx->net_dev; + struct siena_nic_data *nic_data = efx->nic_data; struct vfdi_status *vfdi_status; int rc; @@ -1271,15 +1292,15 @@ int efx_sriov_init(struct efx_nic *efx) if (efx->vf_count == 0) return 0; - rc = efx_sriov_cmd(efx, true, NULL, NULL); + rc = efx_siena_sriov_cmd(efx, true, NULL, NULL); if (rc) goto fail_cmd; - rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status), - GFP_KERNEL); + rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status, + sizeof(*vfdi_status), GFP_KERNEL); if (rc) goto fail_status; - vfdi_status = efx->vfdi_status.addr; + vfdi_status = nic_data->vfdi_status.addr; memset(vfdi_status, 0, sizeof(*vfdi_status)); vfdi_status->version = 1; vfdi_status->length = sizeof(*vfdi_status); @@ -1289,16 +1310,16 @@ int efx_sriov_init(struct efx_nic *efx) vfdi_status->peer_count = 1 + efx->vf_count; vfdi_status->timer_quantum_ns = efx->timer_quantum_ns; - rc = efx_sriov_vf_alloc(efx); + rc = efx_siena_sriov_vf_alloc(efx); if (rc) goto fail_alloc; - mutex_init(&efx->local_lock); - INIT_WORK(&efx->peer_work, efx_sriov_peer_work); - INIT_LIST_HEAD(&efx->local_addr_list); - INIT_LIST_HEAD(&efx->local_page_list); + mutex_init(&nic_data->local_lock); + INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work); + INIT_LIST_HEAD(&nic_data->local_addr_list); + INIT_LIST_HEAD(&nic_data->local_page_list); - rc = efx_sriov_vfs_init(efx); + rc = efx_siena_sriov_vfs_init(efx); if (rc) goto fail_vfs; @@ -1307,7 +1328,7 @@ int efx_sriov_init(struct efx_nic *efx) efx->vf_init_count = efx->vf_count; rtnl_unlock(); - efx_sriov_usrev(efx, true); + efx_siena_sriov_usrev(efx, true); /* At this point we must be ready to accept VFDI requests */ @@ -1321,34 +1342,35 @@ int efx_sriov_init(struct efx_nic *efx) return 0; fail_pci: - efx_sriov_usrev(efx, false); + efx_siena_sriov_usrev(efx, false); rtnl_lock(); efx->vf_init_count = 0; rtnl_unlock(); - efx_sriov_vfs_fini(efx); + efx_siena_sriov_vfs_fini(efx); fail_vfs: - cancel_work_sync(&efx->peer_work); - efx_sriov_free_local(efx); + cancel_work_sync(&nic_data->peer_work); + efx_siena_sriov_free_local(efx); kfree(efx->vf); fail_alloc: - efx_nic_free_buffer(efx, &efx->vfdi_status); + efx_nic_free_buffer(efx, &nic_data->vfdi_status); fail_status: - efx_sriov_cmd(efx, false, NULL, NULL); + efx_siena_sriov_cmd(efx, false, NULL, NULL); fail_cmd: return rc; } -void efx_sriov_fini(struct efx_nic *efx) +void efx_siena_sriov_fini(struct efx_nic *efx) { struct efx_vf *vf; unsigned int pos; + struct siena_nic_data *nic_data = efx->nic_data; if (efx->vf_init_count == 0) return; /* Disable all interfaces to reconfiguration */ - BUG_ON(efx->vfdi_channel->enabled); - efx_sriov_usrev(efx, false); + BUG_ON(nic_data->vfdi_channel->enabled); + efx_siena_sriov_usrev(efx, false); rtnl_lock(); efx->vf_init_count = 0; rtnl_unlock(); @@ -1359,19 +1381,19 @@ void efx_sriov_fini(struct efx_nic *efx) cancel_work_sync(&vf->req); cancel_work_sync(&vf->reset_work); } - cancel_work_sync(&efx->peer_work); + cancel_work_sync(&nic_data->peer_work); pci_disable_sriov(efx->pci_dev); /* Tear down back-end state */ - efx_sriov_vfs_fini(efx); - efx_sriov_free_local(efx); + efx_siena_sriov_vfs_fini(efx); + efx_siena_sriov_free_local(efx); kfree(efx->vf); - efx_nic_free_buffer(efx, &efx->vfdi_status); - efx_sriov_cmd(efx, false, NULL, NULL); + efx_nic_free_buffer(efx, &nic_data->vfdi_status); + efx_siena_sriov_cmd(efx, false, NULL, NULL); } -void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event) +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; struct efx_vf *vf; @@ -1428,7 +1450,7 @@ error: vf->req_seqno = seq + 1; } -void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i) +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i) { struct efx_vf *vf; @@ -1445,18 +1467,19 @@ void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i) vf->evq0_count = 0; } -void efx_sriov_mac_address_changed(struct efx_nic *efx) +void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) { - struct vfdi_status *vfdi_status = efx->vfdi_status.addr; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; if (!efx->vf_init_count) return; ether_addr_copy(vfdi_status->peers[0].mac_addr, efx->net_dev->dev_addr); - queue_work(vfdi_workqueue, &efx->peer_work); + queue_work(vfdi_workqueue, &nic_data->peer_work); } -void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) { struct efx_vf *vf; unsigned queue, qid; @@ -1475,7 +1498,7 @@ void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) wake_up(&vf->flush_waitq); } -void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) { struct efx_vf *vf; unsigned ev_failed, queue, qid; @@ -1500,7 +1523,7 @@ void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) } /* Called from napi. Schedule the reset work item */ -void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) { struct efx_vf *vf; unsigned int rel; @@ -1516,7 +1539,7 @@ void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) } /* Reset all VFs */ -void efx_sriov_reset(struct efx_nic *efx) +void efx_siena_sriov_reset(struct efx_nic *efx) { unsigned int vf_i; struct efx_buffer buf; @@ -1527,15 +1550,15 @@ void efx_sriov_reset(struct efx_nic *efx) if (efx->vf_init_count == 0) return; - efx_sriov_usrev(efx, true); - (void)efx_sriov_cmd(efx, true, NULL, NULL); + efx_siena_sriov_usrev(efx, true); + (void)efx_siena_sriov_cmd(efx, true, NULL, NULL); if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) return; for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { vf = efx->vf + vf_i; - efx_sriov_reset_vf(vf, &buf); + efx_siena_sriov_reset_vf(vf, &buf); } efx_nic_free_buffer(efx, &buf); @@ -1543,8 +1566,8 @@ void efx_sriov_reset(struct efx_nic *efx) int efx_init_sriov(void) { - /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and - * efx_sriov_peer_work() spend almost all their time sleeping for + /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and + * efx_siena_sriov_peer_work() spend almost all their time sleeping for * MCDI to complete anyway */ vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi"); @@ -1559,7 +1582,7 @@ void efx_fini_sriov(void) destroy_workqueue(vfdi_workqueue); } -int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) +int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; @@ -1570,14 +1593,14 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) mutex_lock(&vf->status_lock); ether_addr_copy(vf->addr.mac_addr, mac); - __efx_sriov_update_vf_addr(vf); + __efx_siena_sriov_update_vf_addr(vf); mutex_unlock(&vf->status_lock); return 0; } -int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, - u16 vlan, u8 qos) +int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, + u16 vlan, u8 qos) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; @@ -1590,14 +1613,14 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, mutex_lock(&vf->status_lock); tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT); vf->addr.tci = htons(tci); - __efx_sriov_update_vf_addr(vf); + __efx_siena_sriov_update_vf_addr(vf); mutex_unlock(&vf->status_lock); return 0; } -int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, - bool spoofchk) +int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, + bool spoofchk) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; @@ -1620,8 +1643,8 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, return rc; } -int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, - struct ifla_vf_info *ivi) +int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i, + struct ifla_vf_info *ivi) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index ee84a90e371c..aaf2987512b5 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -343,8 +343,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) unsigned short dma_flags; int i = 0; - EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); - if (skb_shinfo(skb)->gso_size) return efx_enqueue_skb_tso(tx_queue, skb); @@ -1258,8 +1256,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, /* Find the packet protocol and sanity-check it */ state.protocol = efx_tso_check_protocol(skb); - EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); - rc = tso_start(&state, efx, skb); if (rc) goto mem_err; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 5e94d00b96b3..6cc3cf6f17c8 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -81,6 +81,7 @@ static const char version[] = #include <linux/workqueue.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_gpio.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -2188,6 +2189,41 @@ static const struct of_device_id smc91x_match[] = { {}, }; MODULE_DEVICE_TABLE(of, smc91x_match); + +/** + * of_try_set_control_gpio - configure a gpio if it exists + */ +static int try_toggle_control_gpio(struct device *dev, + struct gpio_desc **desc, + const char *name, int index, + int value, unsigned int nsdelay) +{ + struct gpio_desc *gpio = *desc; + int res; + + gpio = devm_gpiod_get_index(dev, name, index); + if (IS_ERR(gpio)) { + if (PTR_ERR(gpio) == -ENOENT) { + *desc = NULL; + return 0; + } + + return PTR_ERR(gpio); + } + res = gpiod_direction_output(gpio, !value); + if (res) { + dev_err(dev, "unable to toggle gpio %s: %i\n", name, res); + devm_gpiod_put(dev, gpio); + gpio = NULL; + return res; + } + if (nsdelay) + usleep_range(nsdelay, 2 * nsdelay); + gpiod_set_value_cansleep(gpio, value); + *desc = gpio; + + return 0; +} #endif /* @@ -2207,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev) const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; - struct resource *res, *ires; + struct resource *res; unsigned int __iomem *addr; unsigned long irq_flags = SMC_IRQ_FLAGS; + unsigned long irq_resflags; int ret; ndev = alloc_etherdev(sizeof(struct smc_local)); @@ -2237,6 +2274,28 @@ static int smc_drv_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; u32 val; + /* Optional pwrdwn GPIO configured? */ + ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, + "power", 0, 0, 100); + if (ret) + return ret; + + /* + * Optional reset GPIO configured? Minimum 100 ns reset needed + * according to LAN91C96 datasheet page 14. + */ + ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, + "reset", 0, 0, 100); + if (ret) + return ret; + + /* + * Need to wait for optional EEPROM to load, max 750 us according + * to LAN91C96 datasheet page 55. + */ + if (lp->reset_gpio) + usleep_range(750, 1000); + /* Combination of IO widths supported, default to 16-bit */ if (!of_property_read_u32(np, "reg-io-width", &val)) { if (val & 1) @@ -2279,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev) goto out_free_netdev; } - ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!ires) { + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq <= 0) { ret = -ENODEV; goto out_release_io; } - - ndev->irq = ires->start; - - if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) - irq_flags = ires->flags & IRQF_TRIGGER_MASK; + /* + * If this platform does not specify any special irqflags, or if + * the resource supplies a trigger, override the irqflags with + * the trigger flags from the resource. + */ + irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); + if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) + irq_flags = irq_resflags & IRQF_TRIGGER_MASK; ret = smc_request_attrib(pdev, ndev); if (ret) diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 47dce918eb0f..2a38dacbbd27 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -298,6 +298,9 @@ struct smc_local { struct sk_buff *pending_tx_skb; struct tasklet_struct tx_task; + struct gpio_desc *power_gpio; + struct gpio_desc *reset_gpio; + /* version/revision of the SMC91x chip */ int version; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index affb29da353e..f9c87624a0af 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -59,6 +59,8 @@ #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/of_net.h> +#include <linux/pm_runtime.h> + #include "smsc911x.h" #define SMSC_CHIPNAME "smsc911x" @@ -1342,6 +1344,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) spin_unlock(&pdata->mac_lock); } +static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) +{ + int rc = 0; + + if (!pdata->phy_dev) + return rc; + + /* If the internal PHY is in General Power-Down mode, all, except the + * management interface, is powered-down and stays in that condition as + * long as Phy register bit 0.11 is HIGH. + * + * In that case, clear the bit 0.11, so the PHY powers up and we can + * access to the phy registers. + */ + rc = phy_read(pdata->phy_dev, MII_BMCR); + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); + return rc; + } + + /* If the PHY general power-down bit is not set is not necessary to + * disable the general power down-mode. + */ + if (rc & BMCR_PDOWN) { + rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); + return rc; + } + + usleep_range(1000, 1500); + } + + return 0; +} + static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) { int rc = 0; @@ -1356,12 +1394,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) return rc; } - /* - * If energy is detected the PHY is already awake so is not necessary - * to disable the energy detect power-down mode. - */ - if ((rc & MII_LAN83C185_EDPWRDOWN) && - !(rc & MII_LAN83C185_ENERGYON)) { + /* Only disable if energy detect mode is already enabled */ + if (rc & MII_LAN83C185_EDPWRDOWN) { /* Disable energy detect mode for this SMSC Transceivers */ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, rc & (~MII_LAN83C185_EDPWRDOWN)); @@ -1370,8 +1404,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); return rc; } - - mdelay(1); + /* Allow PHY to wakeup */ + mdelay(2); } return 0; @@ -1393,7 +1427,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) /* Only enable if energy detect mode is already disabled */ if (!(rc & MII_LAN83C185_EDPWRDOWN)) { - mdelay(100); /* Enable energy detect mode for this SMSC Transceivers */ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, rc | MII_LAN83C185_EDPWRDOWN); @@ -1402,8 +1435,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); return rc; } - - mdelay(1); } return 0; } @@ -1415,6 +1446,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata) int ret; /* + * Make sure to power-up the PHY chip before doing a reset, otherwise + * the reset fails. + */ + ret = smsc911x_phy_general_power_up(pdata); + if (ret) { + SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip"); + return ret; + } + + /* * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that * are initialized in a Energy Detect Power-Down mode that prevents * the MAC chip to be software reseted. So we have to wakeup the PHY @@ -2299,6 +2340,9 @@ static int smsc911x_drv_remove(struct platform_device *pdev) free_netdev(dev); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; } @@ -2452,6 +2496,9 @@ static int smsc911x_drv_probe(struct platform_device *pdev) if (pdata->config.shift) pdata->ops = &shifted_smsc911x_ops; + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + retval = smsc911x_init(dev); if (retval < 0) goto out_disable_resources; @@ -2533,6 +2580,8 @@ out_unregister_netdev_5: out_free_irq: free_irq(dev->irq, dev); out_disable_resources: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); (void)smsc911x_disable_resources(pdev); out_enable_resources_fail: smsc911x_free_resources(pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index b02d4a3ffa37..7d3af190be55 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -14,62 +14,20 @@ config STMMAC_ETH if STMMAC_ETH config STMMAC_PLATFORM - bool "STMMAC Platform bus support" + tristate "STMMAC Platform bus support" depends on STMMAC_ETH default y ---help--- - This selects the platform specific bus support for - the stmmac device driver. This is the driver used - on many embedded STM platforms based on ARM and SuperH - processors. + This selects the platform specific bus support for the stmmac driver. + This is the driver used on several SoCs: + STi, Allwinner, Amlogic Meson, Altera SOCFPGA. + If you have a controller with this interface, say Y or M here. If unsure, say N. -config DWMAC_MESON - bool "Amlogic Meson dwmac support" - depends on STMMAC_PLATFORM && ARCH_MESON - help - Support for Ethernet controller on Amlogic Meson SoCs. - - This selects the Amlogic Meson SoC glue layer support for - the stmmac device driver. This driver is used for Meson6 and - Meson8 SoCs. - -config DWMAC_SOCFPGA - bool "SOCFPGA dwmac support" - depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST) - help - Support for ethernet controller on Altera SOCFPGA - - This selects the Altera SOCFPGA SoC glue layer support - for the stmmac device driver. This driver is used for - arria5 and cyclone5 FPGA SoCs. - -config DWMAC_SUNXI - bool "Allwinner GMAC support" - depends on STMMAC_PLATFORM && ARCH_SUNXI - default y - ---help--- - Support for Allwinner A20/A31 GMAC ethernet controllers. - - This selects Allwinner SoC glue layer support for the - stmmac device driver. This driver is used for A20/A31 - GMAC ethernet controller. - -config DWMAC_STI - bool "STi GMAC support" - depends on STMMAC_PLATFORM && ARCH_STI - default y - ---help--- - Support for ethernet controller on STi SOCs. - - This selects STi SoC glue layer support for the stmmac - device driver. This driver is used on for the STi series - SOCs GMAC ethernet controller. - config STMMAC_PCI - bool "STMMAC PCI bus support" + tristate "STMMAC PCI bus support" depends on STMMAC_ETH && PCI ---help--- This is to select the Synopsys DWMAC available on PCI devices, @@ -79,22 +37,4 @@ config STMMAC_PCI D1215994A VIRTEX FPGA board. If unsure, say N. - -config STMMAC_DEBUG_FS - bool "Enable monitoring via sysFS " - default n - depends on STMMAC_ETH && DEBUG_FS - ---help--- - The stmmac entry in /sys reports DMA TX/RX rings - or (if supported) the HW cap register. - -config STMMAC_DA - bool "STMMAC DMA arbitration scheme" - default n - ---help--- - Selecting this option, rx has priority over Tx (only for Giga - Ethernet device). - By default, the DMA arbitration scheme is based on Round-robin - (rx:tx priority is 1:1). - endif diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 0533d0ba783d..ac4d5629d905 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -1,11 +1,12 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o -stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o -stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o -stmmac-$(CONFIG_DWMAC_MESON) += dwmac-meson.o -stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o -stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o -stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ - chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ - dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ + chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ + dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y) + +obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o +stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \ + dwmac-sti.o dwmac-socfpga.o + +obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o +stmmac-pci-objs:= stmmac_pci.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 593e6c4144a7..cd77289c3cfe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -44,6 +44,7 @@ #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */ +/* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { /* Transmit errors */ unsigned long tx_underflow ____cacheline_aligned; @@ -220,6 +221,7 @@ enum dma_irq_status { handle_tx = 0x8, }; +/* EEE and LPI defines */ #define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) #define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) @@ -229,6 +231,7 @@ enum dma_irq_status { #define CORE_PCS_LINK_STATUS (1 << 6) #define CORE_RGMII_IRQ (1 << 7) +/* Physical Coding Sublayer */ struct rgmii_adv { unsigned int pause; unsigned int duplex; @@ -294,6 +297,7 @@ struct dma_features { #define JUMBO_LEN 9000 +/* Descriptors helpers */ struct stmmac_desc_ops { /* DMA RX descriptor ring initialization */ void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode, @@ -341,6 +345,10 @@ struct stmmac_desc_ops { int (*get_rx_timestamp_status) (void *desc, u32 ats); }; +extern const struct stmmac_desc_ops enh_desc_ops; +extern const struct stmmac_desc_ops ndesc_ops; + +/* Specific DMA helpers */ struct stmmac_dma_ops { /* DMA core initialization */ int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, @@ -370,6 +378,7 @@ struct stmmac_dma_ops { struct mac_device_info; +/* Helpers to program the MAC core */ struct stmmac_ops { /* MAC core initialization */ void (*core_init)(struct mac_device_info *hw, int mtu); @@ -400,6 +409,7 @@ struct stmmac_ops { void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv); }; +/* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); void (*config_sub_second_increment) (void __iomem *ioaddr); @@ -410,6 +420,8 @@ struct stmmac_hwtimestamp { u64(*get_systime) (void __iomem *ioaddr); }; +extern const struct stmmac_hwtimestamp stmmac_ptp; + struct mac_link { int port; int duplex; @@ -421,6 +433,7 @@ struct mii_regs { unsigned int data; /* MII Data */ }; +/* Helpers to manage the descriptors for chain and ring modes */ struct stmmac_mode_ops { void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, unsigned int extend_desc); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index d225a603e604..cca028d632f6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -18,6 +18,8 @@ #include <linux/platform_device.h> #include <linux/stmmac.h> +#include "stmmac_platform.h" + #define ETHMAC_SPEED_100 BIT(1) struct meson_dwmac { @@ -56,7 +58,7 @@ static void *meson6_dwmac_setup(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); dwmac->reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dwmac->reg)) - return dwmac->reg; + return ERR_CAST(dwmac->reg); return dwmac; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 3aad413e74b4..e97074cd5800 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -23,7 +23,9 @@ #include <linux/regmap.h> #include <linux/reset.h> #include <linux/stmmac.h> + #include "stmmac.h" +#include "stmmac_platform.h" #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index ccfe7e510418..0e137751e76e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -1,4 +1,4 @@ -/** +/* * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer * * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited @@ -22,6 +22,8 @@ #include <linux/of.h> #include <linux/of_net.h> +#include "stmmac_platform.h" + #define DWMAC_125MHZ 125000000 #define DWMAC_50MHZ 50000000 #define DWMAC_25MHZ 25000000 @@ -35,9 +37,8 @@ #define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \ iface == PHY_INTERFACE_MODE_GMII) -/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families) */ - -/** +/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families) + * * Below table summarizes the clock requirement and clock sources for * supported phy interface modes with link speeds. * ________________________________________________ @@ -76,9 +77,7 @@ #define STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) #define STIH4XX_ETH_SEL_TXCLK_NOT_CLK125 BIT(6) -/* STiD127 register definitions */ - -/** +/* STiD127 register definitions *----------------------- * src |BIT(6)| BIT(7)| *----------------------- @@ -104,13 +103,13 @@ #define EN_MASK GENMASK(1, 1) #define EN BIT(1) -/** +/* * 3 bits [4:2] * 000-GMII/MII * 001-RGMII * 010-SGMII * 100-RMII -*/ + */ #define MII_PHY_SEL_MASK GENMASK(4, 2) #define ETH_PHY_SEL_RMII BIT(4) #define ETH_PHY_SEL_SGMII BIT(3) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 771cd15fca18..c5ea9ab75b03 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -1,4 +1,4 @@ -/** +/* * dwmac-sunxi.c - Allwinner sunxi DWMAC specific glue layer * * Copyright (C) 2013 Chen-Yu Tsai @@ -22,6 +22,8 @@ #include <linux/of_net.h> #include <linux/regulator/consumer.h> +#include "stmmac_platform.h" + struct sunxi_priv_data { int interface; int clk_enabled; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 0c2058a69fd2..59d92e811750 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -70,10 +70,6 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, if (mb) value |= DMA_BUS_MODE_MB; -#ifdef CONFIG_STMMAC_DA - value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ -#endif - if (atds) value |= DMA_BUS_MODE_ATDS; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index c3c40650b309..c0a391983372 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -122,9 +122,7 @@ int stmmac_mdio_unregister(struct net_device *ndev); int stmmac_mdio_register(struct net_device *ndev); int stmmac_mdio_reset(struct mii_bus *mii); void stmmac_set_ethtool_ops(struct net_device *netdev); -extern const struct stmmac_desc_ops enh_desc_ops; -extern const struct stmmac_desc_ops ndesc_ops; -extern const struct stmmac_hwtimestamp stmmac_ptp; + int stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_resume(struct net_device *ndev); @@ -136,77 +134,4 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, void stmmac_disable_eee_mode(struct stmmac_priv *priv); bool stmmac_eee_init(struct stmmac_priv *priv); -#ifdef CONFIG_STMMAC_PLATFORM -#ifdef CONFIG_DWMAC_MESON -extern const struct stmmac_of_data meson6_dwmac_data; -#endif -#ifdef CONFIG_DWMAC_SUNXI -extern const struct stmmac_of_data sun7i_gmac_data; -#endif -#ifdef CONFIG_DWMAC_STI -extern const struct stmmac_of_data stih4xx_dwmac_data; -extern const struct stmmac_of_data stid127_dwmac_data; -#endif -#ifdef CONFIG_DWMAC_SOCFPGA -extern const struct stmmac_of_data socfpga_gmac_data; -#endif -extern struct platform_driver stmmac_pltfr_driver; -static inline int stmmac_register_platform(void) -{ - int err; - - err = platform_driver_register(&stmmac_pltfr_driver); - if (err) - pr_err("stmmac: failed to register the platform driver\n"); - - return err; -} - -static inline void stmmac_unregister_platform(void) -{ - platform_driver_unregister(&stmmac_pltfr_driver); -} -#else -static inline int stmmac_register_platform(void) -{ - pr_debug("stmmac: do not register the platf driver\n"); - - return 0; -} - -static inline void stmmac_unregister_platform(void) -{ -} -#endif /* CONFIG_STMMAC_PLATFORM */ - -#ifdef CONFIG_STMMAC_PCI -extern struct pci_driver stmmac_pci_driver; -static inline int stmmac_register_pci(void) -{ - int err; - - err = pci_register_driver(&stmmac_pci_driver); - if (err) - pr_err("stmmac: failed to register the PCI driver\n"); - - return err; -} - -static inline void stmmac_unregister_pci(void) -{ - pci_unregister_driver(&stmmac_pci_driver); -} -#else -static inline int stmmac_register_pci(void) -{ - pr_debug("stmmac: do not register the PCI driver\n"); - - return 0; -} - -static inline void stmmac_unregister_pci(void) -{ -} -#endif /* CONFIG_STMMAC_PCI */ - #endif /* __STMMAC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6f77a46c7e2c..118a427d1942 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -44,10 +44,10 @@ #include <linux/slab.h> #include <linux/prefetch.h> #include <linux/pinctrl/consumer.h> -#ifdef CONFIG_STMMAC_DEBUG_FS +#ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> -#endif /* CONFIG_STMMAC_DEBUG_FS */ +#endif /* CONFIG_DEBUG_FS */ #include <linux/net_tstamp.h> #include "stmmac_ptp.h" #include "stmmac.h" @@ -116,7 +116,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); -#ifdef CONFIG_STMMAC_DEBUG_FS +#ifdef CONFIG_DEBUG_FS static int stmmac_init_fs(struct net_device *dev); static void stmmac_exit_fs(void); #endif @@ -125,8 +125,8 @@ static void stmmac_exit_fs(void); /** * stmmac_verify_args - verify the driver parameters. - * Description: it verifies if some wrong parameter is passed to the driver. - * Note that wrong parameters are replaced with the default values. + * Description: it checks the driver parameters and set a default in case of + * errors. */ static void stmmac_verify_args(void) { @@ -191,14 +191,8 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) static void print_pkt(unsigned char *buf, int len) { - int j; - pr_debug("len = %d byte, buf addr: 0x%p", len, buf); - for (j = 0; j < len; j++) { - if ((j % 16) == 0) - pr_debug("\n %03x:", j); - pr_debug(" %02x", buf[j]); - } - pr_debug("\n"); + pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); } /* minimum number of free TX descriptors required to wake up TX process */ @@ -210,7 +204,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) } /** - * stmmac_hw_fix_mac_speed: callback for speed selection + * stmmac_hw_fix_mac_speed - callback for speed selection * @priv: driver private structure * Description: on some platforms (e.g. ST), some HW system configuraton * registers have to be set according to the link speed negotiated. @@ -224,9 +218,10 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) } /** - * stmmac_enable_eee_mode: Check and enter in LPI mode + * stmmac_enable_eee_mode - check and enter in LPI mode * @priv: driver private structure - * Description: this function is to verify and enter in LPI mode for EEE. + * Description: this function is to verify and enter in LPI mode in case of + * EEE. */ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) { @@ -237,7 +232,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) } /** - * stmmac_disable_eee_mode: disable/exit from EEE + * stmmac_disable_eee_mode - disable and exit from LPI mode * @priv: driver private structure * Description: this function is to exit and disable EEE in case of * LPI state is true. This is called by the xmit. @@ -250,7 +245,7 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv) } /** - * stmmac_eee_ctrl_timer: EEE TX SW timer. + * stmmac_eee_ctrl_timer - EEE TX SW timer. * @arg : data hook * Description: * if there is no data transfer and if we are not in LPI state, @@ -265,17 +260,17 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) } /** - * stmmac_eee_init: init EEE + * stmmac_eee_init - init EEE * @priv: driver private structure * Description: - * If the EEE support has been enabled while configuring the driver, - * if the GMAC actually supports the EEE (from the HW cap reg) and the - * phy can also manage EEE, so enable the LPI state and start the timer - * to verify if the tx path can enter in LPI state. + * if the GMAC supports the EEE (from the HW cap reg) and the phy device + * can also manage EEE, this function enable the LPI state and start related + * timer. */ bool stmmac_eee_init(struct stmmac_priv *priv) { char *phy_bus_name = priv->plat->phy_bus_name; + unsigned long flags; bool ret = false; /* Using PCS we cannot dial with the phy registers at this stage @@ -300,6 +295,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) * changed). * In that case the driver disable own timers. */ + spin_lock_irqsave(&priv->lock, flags); if (priv->eee_active) { pr_debug("stmmac: disable EEE\n"); del_timer_sync(&priv->eee_ctrl_timer); @@ -307,9 +303,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) tx_lpi_timer); } priv->eee_active = 0; + spin_unlock_irqrestore(&priv->lock, flags); goto out; } /* Activate the EEE and start timers */ + spin_lock_irqsave(&priv->lock, flags); if (!priv->eee_active) { priv->eee_active = 1; init_timer(&priv->eee_ctrl_timer); @@ -325,15 +323,16 @@ bool stmmac_eee_init(struct stmmac_priv *priv) /* Set HW EEE according to the speed */ priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); - pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); - ret = true; + spin_unlock_irqrestore(&priv->lock, flags); + + pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); } out: return ret; } -/* stmmac_get_tx_hwtstamp: get HW TX timestamps +/* stmmac_get_tx_hwtstamp - get HW TX timestamps * @priv: driver private structure * @entry : descriptor index to be used. * @skb : the socket buffer @@ -375,7 +374,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, return; } -/* stmmac_get_rx_hwtstamp: get HW RX timestamps +/* stmmac_get_rx_hwtstamp - get HW RX timestamps * @priv: driver private structure * @entry : descriptor index to be used. * @skb : the socket buffer @@ -631,11 +630,11 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) } /** - * stmmac_init_ptp: init PTP + * stmmac_init_ptp - init PTP * @priv: driver private structure - * Description: this is to verify if the HW supports the PTPv1 or v2. + * Description: this is to verify if the HW supports the PTPv1 or PTPv2. * This is done by looking at the HW cap. register. - * Also it registers the ptp driver. + * This function also registers the ptp driver. */ static int stmmac_init_ptp(struct stmmac_priv *priv) { @@ -677,9 +676,13 @@ static void stmmac_release_ptp(struct stmmac_priv *priv) } /** - * stmmac_adjust_link + * stmmac_adjust_link - adjusts the link parameters * @dev: net device structure - * Description: it adjusts the link parameters. + * Description: this is the helper called by the physical abstraction layer + * drivers to communicate the phy link status. According the speed and duplex + * this driver can invoke registered glue-logic as well. + * It also invoke the eee initialization because it could happen when switch + * on different networks (that are eee capable). */ static void stmmac_adjust_link(struct net_device *dev) { @@ -760,16 +763,16 @@ static void stmmac_adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); + spin_unlock_irqrestore(&priv->lock, flags); + /* At this stage, it could be needed to setup the EEE or adjust some * MAC related HW registers. */ priv->eee_enabled = stmmac_eee_init(priv); - - spin_unlock_irqrestore(&priv->lock, flags); } /** - * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported + * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported * @priv: driver private structure * Description: this is to verify if the HW supports the PCS. * Physical Coding Sublayer (PCS) interface that can be used when the MAC is @@ -858,7 +861,7 @@ static int stmmac_init_phy(struct net_device *dev) } /** - * stmmac_display_ring: display ring + * stmmac_display_ring - display ring * @head: pointer to the head of the ring passed. * @size: size of the ring. * @extend_desc: to verify if extended descriptors are used. @@ -926,7 +929,7 @@ static int stmmac_set_bfsize(int mtu, int bufsize) } /** - * stmmac_clear_descriptors: clear descriptors + * stmmac_clear_descriptors - clear descriptors * @priv: driver private structure * Description: this function is called to clear the tx and rx descriptors * in case of both basic and extended descriptors are used. @@ -958,13 +961,22 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) (i == txsize - 1)); } +/** + * stmmac_init_rx_buffers - init the RX descriptor buffer. + * @priv: driver private structure + * @p: descriptor pointer + * @i: descriptor index + * @flags: gfp flag. + * Description: this function is called to allocate a receive buffer, perform + * the DMA mapping and init the descriptor. + */ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, - int i) + int i, gfp_t flags) { struct sk_buff *skb; skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, - GFP_KERNEL); + flags); if (!skb) { pr_err("%s: Rx init fails; skb is NULL\n", __func__); return -ENOMEM; @@ -1002,11 +1014,12 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) /** * init_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure - * Description: this function initializes the DMA RX/TX descriptors + * @flags: gfp flag. + * Description: this function initializes the DMA RX/TX descriptors * and allocates the socket buffers. It suppors the chained and ring * modes. */ -static int init_dma_desc_rings(struct net_device *dev) +static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) { int i; struct stmmac_priv *priv = netdev_priv(dev); @@ -1041,7 +1054,7 @@ static int init_dma_desc_rings(struct net_device *dev) else p = priv->dma_rx + i; - ret = stmmac_init_rx_buffers(priv, p, i); + ret = stmmac_init_rx_buffers(priv, p, i, flags); if (ret) goto err_init_rx_buffers; @@ -1139,6 +1152,14 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) } } +/** + * alloc_dma_desc_resources - alloc TX/RX resources. + * @priv: private structure + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ static int alloc_dma_desc_resources(struct stmmac_priv *priv) { unsigned int txsize = priv->dma_tx_size; @@ -1250,8 +1271,8 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) /** * stmmac_dma_operation_mode - HW DMA operation mode * @priv: driver private structure - * Description: it sets the DMA operation mode: tx/rx DMA thresholds - * or Store-And-Forward capability. + * Description: it is used for configuring the DMA operation mode register in + * order to program the tx/rx DMA thresholds or Store-And-Forward mode. */ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) { @@ -1272,9 +1293,9 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) } /** - * stmmac_tx_clean: + * stmmac_tx_clean - to manage the transmission completion * @priv: driver private structure - * Description: it reclaims resources after transmission completes. + * Description: it reclaims the transmit resources after transmission completes. */ static void stmmac_tx_clean(struct stmmac_priv *priv) { @@ -1373,10 +1394,10 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) } /** - * stmmac_tx_err: irq tx error mng function + * stmmac_tx_err - to manage the tx error * @priv: driver private structure * Description: it cleans the descriptors and restarts the transmission - * in case of errors. + * in case of transmission errors. */ static void stmmac_tx_err(struct stmmac_priv *priv) { @@ -1404,12 +1425,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv) } /** - * stmmac_dma_interrupt: DMA ISR + * stmmac_dma_interrupt - DMA ISR * @priv: driver private structure * Description: this is the DMA ISR. It is called by the main ISR. - * It calls the dwmac dma routine to understand which type of interrupt - * happened. In case of there is a Normal interrupt and either TX or RX - * interrupt happened so the NAPI is scheduled. + * It calls the dwmac dma routine and schedule poll method in case of some + * work can be done. */ static void stmmac_dma_interrupt(struct stmmac_priv *priv) { @@ -1452,6 +1472,12 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) pr_info(" No MAC Management Counters available\n"); } +/** + * stmmac_get_synopsys_id - return the SYINID. + * @priv: driver private structure + * Description: this simple function is to decode and return the SYINID + * starting from the HW core register. + */ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) { u32 hwid = priv->hw->synopsys_uid; @@ -1470,11 +1496,11 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) } /** - * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors + * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors * @priv: driver private structure * Description: select the Enhanced/Alternate or Normal descriptors. - * In case of Enhanced/Alternate, it looks at the extended descriptors are - * supported by the HW cap. register. + * In case of Enhanced/Alternate, it checks if the extended descriptors are + * supported by the HW capability register. */ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) { @@ -1496,7 +1522,7 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) } /** - * stmmac_get_hw_features: get MAC capabilities from the HW cap. register. + * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. * @priv: driver private structure * Description: * new GMAC chip generations have a new register to indicate the @@ -1554,7 +1580,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv) } /** - * stmmac_check_ether_addr: check if the MAC addr is valid + * stmmac_check_ether_addr - check if the MAC addr is valid * @priv: driver private structure * Description: * it is to verify if the MAC address is valid, in case of failures it @@ -1573,7 +1599,7 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) } /** - * stmmac_init_dma_engine: DMA init. + * stmmac_init_dma_engine - DMA init. * @priv: driver private structure * Description: * It inits the DMA invoking the specific MAC/GMAC callback. @@ -1602,7 +1628,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) } /** - * stmmac_tx_timer: mitigation sw timer for tx. + * stmmac_tx_timer - mitigation sw timer for tx. * @data: data pointer * Description: * This is the timer handler to directly invoke the stmmac_tx_clean. @@ -1615,7 +1641,7 @@ static void stmmac_tx_timer(unsigned long data) } /** - * stmmac_init_tx_coalesce: init tx mitigation options. + * stmmac_init_tx_coalesce - init tx mitigation options. * @priv: driver private structure * Description: * This inits the transmit coalesce parameters: i.e. timer rate, @@ -1634,10 +1660,13 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) } /** - * stmmac_hw_setup: setup mac in a usable state. + * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. * Description: - * This function sets up the ip in a usable state. + * this is the main function to setup the HW in a usable state because the + * dma engine is reset, the core registers are configured (e.g. AXI, + * Checksum features, timers). The DMA is ready to start receiving and + * transmitting. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. @@ -1647,11 +1676,6 @@ static int stmmac_hw_setup(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); int ret; - ret = init_dma_desc_rings(dev); - if (ret < 0) { - pr_err("%s: DMA descriptors initialization failed\n", __func__); - return ret; - } /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { @@ -1688,7 +1712,7 @@ static int stmmac_hw_setup(struct net_device *dev) if (ret && ret != -EOPNOTSUPP) pr_warn("%s: failed PTP initialisation\n", __func__); -#ifdef CONFIG_STMMAC_DEBUG_FS +#ifdef CONFIG_DEBUG_FS ret = stmmac_init_fs(dev); if (ret < 0) pr_warn("%s: failed debugFS registration\n", __func__); @@ -1705,10 +1729,6 @@ static int stmmac_hw_setup(struct net_device *dev) } priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; - priv->eee_enabled = stmmac_eee_init(priv); - - stmmac_init_tx_coalesce(priv); - if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { priv->rx_riwt = MAX_DMA_RIWT; priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); @@ -1761,12 +1781,20 @@ static int stmmac_open(struct net_device *dev) goto dma_desc_error; } + ret = init_dma_desc_rings(dev, GFP_KERNEL); + if (ret < 0) { + pr_err("%s: DMA descriptors initialization failed\n", __func__); + goto init_error; + } + ret = stmmac_hw_setup(dev); if (ret < 0) { pr_err("%s: Hw setup failed\n", __func__); goto init_error; } + stmmac_init_tx_coalesce(priv); + if (priv->phydev) phy_start(priv->phydev); @@ -1866,7 +1894,7 @@ static int stmmac_release(struct net_device *dev) netif_carrier_off(dev); -#ifdef CONFIG_STMMAC_DEBUG_FS +#ifdef CONFIG_DEBUG_FS stmmac_exit_fs(); #endif @@ -1876,7 +1904,7 @@ static int stmmac_release(struct net_device *dev) } /** - * stmmac_xmit: Tx entry point of the driver + * stmmac_xmit - Tx entry point of the driver * @skb : the socket buffer * @dev : device pointer * Description : this is the tx entry point of the driver. @@ -1894,7 +1922,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int nopaged_len = skb_headlen(skb); unsigned int enh_desc = priv->plat->enh_desc; + spin_lock(&priv->tx_lock); + if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { + spin_unlock(&priv->tx_lock); if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); /* This is a hard error, log it. */ @@ -1903,8 +1934,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - spin_lock(&priv->tx_lock); - if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); @@ -2025,6 +2054,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; dma_map_err: + spin_unlock(&priv->tx_lock); dev_err(priv->device, "Tx dma map failed\n"); dev_kfree_skb(skb); priv->dev->stats.tx_dropped++; @@ -2049,7 +2079,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) /** - * stmmac_rx_refill: refill used skb preallocated buffers + * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure * Description : this is to reallocate the skb for the reception process * that is based on zero-copy. @@ -2100,7 +2130,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) } /** - * stmmac_rx_refill: refill used skb preallocated buffers + * stmmac_rx - manage the receive process * @priv: driver private structure * @limit: napi bugget. * Description : this the function called by the napi poll method. @@ -2281,9 +2311,7 @@ static void stmmac_set_rx_mode(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - spin_lock(&priv->lock); priv->hw->mac->set_filter(priv->hw, dev); - spin_unlock(&priv->lock); } /** @@ -2371,8 +2399,11 @@ static int stmmac_set_features(struct net_device *netdev, * @irq: interrupt number. * @dev_id: to pass the net device pointer. * Description: this is the main driver interrupt service routine. - * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI - * interrupts. + * It can call: + * o DMA service routine (to manage incoming frame reception and transmission + * status) + * o Core interrupts to manage: remote wake-up, management counter, LPI + * interrupts. */ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) { @@ -2453,7 +2484,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return ret; } -#ifdef CONFIG_STMMAC_DEBUG_FS +#ifdef CONFIG_DEBUG_FS static struct dentry *stmmac_fs_dir; static struct dentry *stmmac_rings_status; static struct dentry *stmmac_dma_cap; @@ -2638,7 +2669,7 @@ static void stmmac_exit_fs(void) debugfs_remove(stmmac_dma_cap); debugfs_remove(stmmac_fs_dir); } -#endif /* CONFIG_STMMAC_DEBUG_FS */ +#endif /* CONFIG_DEBUG_FS */ static const struct net_device_ops stmmac_netdev_ops = { .ndo_open = stmmac_open, @@ -2659,11 +2690,10 @@ static const struct net_device_ops stmmac_netdev_ops = { /** * stmmac_hw_init - Init the MAC device * @priv: driver private structure - * Description: this function detects which MAC device - * (GMAC/MAC10-100) has to attached, checks the HW capability - * (if supported) and sets the driver's features (for example - * to use the ring or chaine mode or support the normal/enh - * descriptor structure). + * Description: this function is to configure the MAC device according to + * some platform parameters or the HW capability register. It prepares the + * driver to use either ring or chain modes and to setup either enhanced or + * normal descriptors. */ static int stmmac_hw_init(struct stmmac_priv *priv) { @@ -2887,6 +2917,7 @@ error_clk_get: return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(stmmac_dvr_probe); /** * stmmac_dvr_remove @@ -2916,8 +2947,15 @@ int stmmac_dvr_remove(struct net_device *ndev) return 0; } +EXPORT_SYMBOL_GPL(stmmac_dvr_remove); -#ifdef CONFIG_PM +/** + * stmmac_suspend - suspend callback + * @ndev: net device pointer + * Description: this is the function to suspend the device and it is called + * by the platform driver to stop the network queue, release the resources, + * program the PMT register (for WoL), clean and release driver resources. + */ int stmmac_suspend(struct net_device *ndev) { struct stmmac_priv *priv = netdev_priv(ndev); @@ -2950,7 +2988,7 @@ int stmmac_suspend(struct net_device *ndev) stmmac_set_mac(priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ - clk_disable_unprepare(priv->stmmac_clk); + clk_disable(priv->stmmac_clk); } spin_unlock_irqrestore(&priv->lock, flags); @@ -2959,7 +2997,14 @@ int stmmac_suspend(struct net_device *ndev) priv->oldduplex = -1; return 0; } +EXPORT_SYMBOL_GPL(stmmac_suspend); +/** + * stmmac_resume - resume callback + * @ndev: net device pointer + * Description: when resume this function is invoked to setup the DMA and CORE + * in a usable state. + */ int stmmac_resume(struct net_device *ndev) { struct stmmac_priv *priv = netdev_priv(ndev); @@ -2982,7 +3027,7 @@ int stmmac_resume(struct net_device *ndev) } else { pinctrl_pm_select_default_state(priv->device); /* enable the clk prevously disabled */ - clk_prepare_enable(priv->stmmac_clk); + clk_enable(priv->stmmac_clk); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); @@ -2990,7 +3035,9 @@ int stmmac_resume(struct net_device *ndev) netif_device_attach(ndev); + init_dma_desc_rings(ndev, GFP_ATOMIC); stmmac_hw_setup(ndev); + stmmac_init_tx_coalesce(priv); napi_enable(&priv->napi); @@ -3003,37 +3050,7 @@ int stmmac_resume(struct net_device *ndev) return 0; } -#endif /* CONFIG_PM */ - -/* Driver can be configured w/ and w/ both PCI and Platf drivers - * depending on the configuration selected. - */ -static int __init stmmac_init(void) -{ - int ret; - - ret = stmmac_register_platform(); - if (ret) - goto err; - ret = stmmac_register_pci(); - if (ret) - goto err_pci; - return 0; -err_pci: - stmmac_unregister_platform(); -err: - pr_err("stmmac: driver registration failed\n"); - return ret; -} - -static void __exit stmmac_exit(void) -{ - stmmac_unregister_platform(); - stmmac_unregister_pci(); -} - -module_init(stmmac_init); -module_exit(stmmac_exit); +EXPORT_SYMBOL_GPL(stmmac_resume); #ifndef MODULE static int __init stmmac_cmdline_opt(char *str) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 655a23bbc451..054520d67de4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -26,27 +26,26 @@ #include <linux/pci.h> #include "stmmac.h" -static struct plat_stmmacenet_data plat_dat; -static struct stmmac_mdio_bus_data mdio_data; -static struct stmmac_dma_cfg dma_cfg; - -static void stmmac_default_data(void) +static void stmmac_default_data(struct plat_stmmacenet_data *plat) { - memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); - plat_dat.bus_id = 1; - plat_dat.phy_addr = 0; - plat_dat.interface = PHY_INTERFACE_MODE_GMII; - plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ - plat_dat.has_gmac = 1; - plat_dat.force_sf_dma_mode = 1; - - mdio_data.phy_reset = NULL; - mdio_data.phy_mask = 0; - plat_dat.mdio_bus_data = &mdio_data; - - dma_cfg.pbl = 32; - dma_cfg.burst_len = DMA_AXI_BLEN_256; - plat_dat.dma_cfg = &dma_cfg; + plat->bus_id = 1; + plat->phy_addr = 0; + plat->interface = PHY_INTERFACE_MODE_GMII; + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ + plat->has_gmac = 1; + plat->force_sf_dma_mode = 1; + + plat->mdio_bus_data->phy_reset = NULL; + plat->mdio_bus_data->phy_mask = 0; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->burst_len = DMA_AXI_BLEN_256; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; } /** @@ -64,64 +63,61 @@ static void stmmac_default_data(void) static int stmmac_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - int ret = 0; - void __iomem *addr = NULL; - struct stmmac_priv *priv = NULL; + struct plat_stmmacenet_data *plat; + struct stmmac_priv *priv; int i; + int ret; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + + plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), + GFP_KERNEL); + if (!plat->dma_cfg) + return -ENOMEM; /* Enable pci device */ - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) { - pr_err("%s : ERROR: failed to enable %s device\n", __func__, - pci_name(pdev)); + dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", + __func__); return ret; } - if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) { - pr_err("%s: ERROR: failed to get PCI region\n", __func__); - ret = -ENODEV; - goto err_out_req_reg_failed; - } /* Get the base address of device */ - for (i = 0; i <= 5; i++) { + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { if (pci_resource_len(pdev, i) == 0) continue; - addr = pci_iomap(pdev, i, 0); - if (addr == NULL) { - pr_err("%s: ERROR: cannot map register memory aborting", - __func__); - ret = -EIO; - goto err_out_map_failed; - } + ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev)); + if (ret) + return ret; break; } + pci_set_master(pdev); - stmmac_default_data(); + stmmac_default_data(plat); - priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr); + priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]); if (IS_ERR(priv)) { - pr_err("%s: main driver probe failed", __func__); - ret = PTR_ERR(priv); - goto err_out; + dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__); + return PTR_ERR(priv); } priv->dev->irq = pdev->irq; priv->wol_irq = pdev->irq; pci_set_drvdata(pdev, priv->dev); - pr_debug("STMMAC platform driver registration completed"); + dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n"); return 0; - -err_out: - pci_clear_master(pdev); -err_out_map_failed: - pci_release_regions(pdev); -err_out_req_reg_failed: - pci_disable_device(pdev); - - return ret; } /** @@ -134,39 +130,30 @@ err_out_req_reg_failed: static void stmmac_pci_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); stmmac_dvr_remove(ndev); - - pci_iounmap(pdev, priv->ioaddr); - pci_release_regions(pdev); - pci_disable_device(pdev); } -#ifdef CONFIG_PM -static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state) +#ifdef CONFIG_PM_SLEEP +static int stmmac_pci_suspend(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct net_device *ndev = pci_get_drvdata(pdev); - int ret; - ret = stmmac_suspend(ndev); - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return ret; + return stmmac_suspend(ndev); } -static int stmmac_pci_resume(struct pci_dev *pdev) +static int stmmac_pci_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct net_device *ndev = pci_get_drvdata(pdev); - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - return stmmac_resume(ndev); } #endif +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); + #define STMMAC_VENDOR_ID 0x700 #define STMMAC_DEVICE_ID 0x1108 @@ -178,17 +165,18 @@ static const struct pci_device_id stmmac_id_table[] = { MODULE_DEVICE_TABLE(pci, stmmac_id_table); -struct pci_driver stmmac_pci_driver = { +static struct pci_driver stmmac_pci_driver = { .name = STMMAC_RESOURCE_NAME, .id_table = stmmac_id_table, .probe = stmmac_pci_probe, .remove = stmmac_pci_remove, -#ifdef CONFIG_PM - .suspend = stmmac_pci_suspend, - .resume = stmmac_pci_resume, -#endif + .driver = { + .pm = &stmmac_pm_ops, + }, }; +module_pci_driver(stmmac_pci_driver); + MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index db56fa7ce8f9..07054ce84ba8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -27,25 +27,19 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_device.h> + #include "stmmac.h" +#include "stmmac_platform.h" static const struct of_device_id stmmac_dt_ids[] = { -#ifdef CONFIG_DWMAC_MESON + /* SoC specific glue layers should come before generic bindings */ { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data}, -#endif -#ifdef CONFIG_DWMAC_SUNXI { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, -#endif -#ifdef CONFIG_DWMAC_STI { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data}, { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data}, { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data}, { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data}, -#endif -#ifdef CONFIG_DWMAC_SOCFPGA { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data }, -#endif - /* SoC specific glue layers should come before generic bindings */ { .compatible = "st,spear600-gmac"}, { .compatible = "snps,dwmac-3.610"}, { .compatible = "snps,dwmac-3.70a"}, @@ -57,7 +51,11 @@ MODULE_DEVICE_TABLE(of, stmmac_dt_ids); #ifdef CONFIG_OF -/* This function validates the number of Multicast filtering bins specified +/** + * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins + * @mcast_bins: Multicast filtering bins + * Description: + * this function validates the number of Multicast filtering bins specified * by the configuration through the device tree. The Synopsys GMAC supports * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds @@ -83,7 +81,11 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins) return x; } -/* This function validates the number of Unicast address entries supported +/** + * dwmac1000_validate_ucast_entries - validate the Unicast address entries + * @ucast_entries: number of Unicast address entries + * Description: + * This function validates the number of Unicast address entries supported * by a particular Synopsys 10/100/1000 controller. The Synopsys controller * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter * logic. This function validates a valid, supported configuration is @@ -109,6 +111,15 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) return x; } +/** + * stmmac_probe_config_dt - parse device-tree driver parameters + * @pdev: platform_device structure + * @plat: driver data platform structure + * @mac: MAC address to use + * Description: + * this function is to read the driver parameters from device-tree and + * set some private fields that will be used by the main at runtime. + */ static int stmmac_probe_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat, const char **mac) @@ -177,12 +188,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, */ plat->maxmtu = JUMBO_LEN; - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat->unicast_filter_entries = 1; - /* * Currently only the properties needed on SPEAr600 * are provided. All other properties should be added @@ -248,11 +253,11 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, #endif /* CONFIG_OF */ /** - * stmmac_pltfr_probe + * stmmac_pltfr_probe - platform driver probe. * @pdev: platform device pointer - * Description: platform_device probe function. It allocates - * the necessary resources and invokes the main to init - * the net device, register the mdio bus etc. + * Description: platform_device probe function. It is to allocate + * the necessary platform resources, invoke custom helper (if required) and + * invoke the main probe function. */ static int stmmac_pltfr_probe(struct platform_device *pdev) { @@ -270,6 +275,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) return PTR_ERR(addr); plat_dat = dev_get_platdata(&pdev->dev); + + /* Set default value for multicast hash bins */ + plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat_dat->unicast_filter_entries = 1; + if (pdev->dev.of_node) { if (!plat_dat) plat_dat = devm_kzalloc(&pdev->dev, @@ -368,7 +380,14 @@ static int stmmac_pltfr_remove(struct platform_device *pdev) return ret; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +/** + * stmmac_pltfr_suspend + * @dev: device pointer + * Description: this function is invoked when suspend the driver and it direcly + * call the main suspend function and then, if required, on some platform, it + * can call an exit helper. + */ static int stmmac_pltfr_suspend(struct device *dev) { int ret; @@ -383,6 +402,13 @@ static int stmmac_pltfr_suspend(struct device *dev) return ret; } +/** + * stmmac_pltfr_resume + * @dev: device pointer + * Description: this function is invoked when resume the driver before calling + * the main resume function, on some platforms, it can call own init helper + * if required. + */ static int stmmac_pltfr_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); @@ -394,13 +420,12 @@ static int stmmac_pltfr_resume(struct device *dev) return stmmac_resume(ndev); } - -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, - stmmac_pltfr_suspend, stmmac_pltfr_resume); + stmmac_pltfr_suspend, stmmac_pltfr_resume); -struct platform_driver stmmac_pltfr_driver = { +static struct platform_driver stmmac_pltfr_driver = { .probe = stmmac_pltfr_probe, .remove = stmmac_pltfr_remove, .driver = { @@ -408,9 +433,11 @@ struct platform_driver stmmac_pltfr_driver = { .owner = THIS_MODULE, .pm = &stmmac_pltfr_pm_ops, .of_match_table = of_match_ptr(stmmac_dt_ids), - }, + }, }; +module_platform_driver(stmmac_pltfr_driver); + MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h new file mode 100644 index 000000000000..25dd1f7ace02 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -0,0 +1,28 @@ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#ifndef __STMMAC_PLATFORM_H__ +#define __STMMAC_PLATFORM_H__ + +extern const struct stmmac_of_data meson6_dwmac_data; +extern const struct stmmac_of_data sun7i_gmac_data; +extern const struct stmmac_of_data stih4xx_dwmac_data; +extern const struct stmmac_of_data stid127_dwmac_data; +extern const struct stmmac_of_data socfpga_gmac_data; + +#endif /* __STMMAC_PLATFORM_H__ */ diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 02d370e58110..3dc1f68b322d 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -5179,8 +5179,7 @@ static void cas_remove_one(struct pci_dev *pdev) cp = netdev_priv(dev); unregister_netdev(dev); - if (cp->fw_data) - vfree(cp->fw_data); + vfree(cp->fw_data); mutex_lock(&cp->pm_mutex); cancel_work_sync(&cp->reset_task); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 904fd1ab5f6e..4aaa3240453a 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6651,13 +6651,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - if (skb->len < ETH_ZLEN) { - unsigned int pad_bytes = ETH_ZLEN - skb->len; - - if (skb_pad(skb, pad_bytes)) - goto out; - skb_put(skb, pad_bytes); - } + if (eth_skb_pad(skb)) + goto out; len = sizeof(struct tx_pkt_hdr) + 15; if (skb_headroom(skb) < len) { diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 72c8525d5457..9c014803b03b 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp) HMD(("init rxring, ")); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; + u32 mapping; skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (!skb) { @@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp) /* Because we reserve afterwards. */ skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); + mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(hp->dma_dev, mapping)) { + dev_kfree_skb_any(skb); + hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); + continue; + } hme_write_rxd(hp, &hb->happy_meal_rxd[i], (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), - dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, - DMA_FROM_DEVICE)); + mapping); skb_reserve(skb, RX_OFFSET); } @@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) skb = hp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; + u32 mapping; /* Now refill the entry, if we can. */ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); @@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) drops++; goto drop_it; } + skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); + mapping = dma_map_single(hp->dma_dev, new_skb->data, + RX_BUF_ALLOC_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { + dev_kfree_skb_any(new_skb); + drops++; + goto drop_it; + } + dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); hp->rx_skbs[elem] = new_skb; - skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), - dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, - DMA_FROM_DEVICE)); + mapping); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ @@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } +static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping, + u32 first_len, u32 first_entry, u32 entry) +{ + struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; + + dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE); + + first_entry = NEXT_TX(first_entry); + while (first_entry != entry) { + struct happy_meal_txd *this = &txbase[first_entry]; + u32 addr, len; + + addr = hme_read_desc32(hp, &this->tx_addr); + len = hme_read_desc32(hp, &this->tx_flags); + len &= TXFLAG_SIZE; + dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE); + } +} + static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, len = skb->len; mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) + goto out_dma_error; tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], (tx_flags | (len & TXFLAG_SIZE)), @@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, first_len = skb_headlen(skb); first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping))) + goto out_dma_error; entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { @@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, len = skb_frag_size(this_frag); mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 0, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { + unmap_partial_tx_skb(hp, first_mapping, first_len, + first_entry, entry); + goto out_dma_error; + } this_txflags = tx_flags; if (frag == skb_shinfo(skb)->nr_frags - 1) this_txflags |= TXFLAG_EOP; @@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); return NETDEV_TX_OK; + +out_dma_error: + hp->tx_skbs[hp->tx_new] = NULL; + spin_unlock_irq(&hp->happy_lock); + + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; } static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 3652afd3ec78..90c86cd3be14 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -15,12 +15,14 @@ #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/mutex.h> +#include <linux/highmem.h> #include <linux/if_vlan.h> #if IS_ENABLED(CONFIG_IPV6) #include <linux/icmpv6.h> #endif +#include <net/ip.h> #include <net/icmp.h> #include <net/route.h> @@ -40,6 +42,8 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); +#define VNET_MAX_TXQS 16 + /* Heuristic for the number of times to exponentially backoff and * retry sending an LDC trigger when EAGAIN is encountered */ @@ -49,6 +53,8 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start); /* Ordered from largest major to lowest */ static struct vio_version vnet_versions[] = { + { .major = 1, .minor = 8 }, + { .major = 1, .minor = 7 }, { .major = 1, .minor = 6 }, { .major = 1, .minor = 0 }, }; @@ -71,13 +77,19 @@ static int vnet_handle_unknown(struct vnet_port *port, void *arg) return -ECONNRESET; } +static int vnet_port_alloc_tx_ring(struct vnet_port *port); + static int vnet_send_attr(struct vio_driver_state *vio) { struct vnet_port *port = to_vnet_port(vio); struct net_device *dev = port->vp->dev; struct vio_net_attr_info pkt; int framelen = ETH_FRAME_LEN; - int i; + int i, err; + + err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); + if (err) + return err; memset(&pkt, 0, sizeof(pkt)); pkt.tag.type = VIO_TYPE_CTRL; @@ -108,8 +120,15 @@ static int vnet_send_attr(struct vio_driver_state *vio) pkt.mtu = framelen + VLAN_HLEN; } - pkt.plnk_updt = PHYSLINK_UPDATE_NONE; pkt.cflags = 0; + if (vio_version_after_eq(vio, 1, 7) && port->tso) { + pkt.cflags |= VNET_LSO_IPV4_CAPAB; + if (!port->tsolen) + port->tsolen = VNET_MAXTSO; + pkt.ipv4_lso_maxlen = port->tsolen; + } + + pkt.plnk_updt = PHYSLINK_UPDATE_NONE; viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " @@ -163,6 +182,26 @@ static int handle_attr_info(struct vio_driver_state *vio, } port->rmtu = localmtu; + /* LSO negotiation */ + if (vio_version_after_eq(vio, 1, 7)) + port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); + else + port->tso = false; + if (port->tso) { + if (!port->tsolen) + port->tsolen = VNET_MAXTSO; + port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); + if (port->tsolen < VNET_MINTSO) { + port->tso = false; + port->tsolen = 0; + pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; + } + pkt->ipv4_lso_maxlen = port->tsolen; + } else { + pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; + pkt->ipv4_lso_maxlen = 0; + } + /* for version >= 1.6, ACK packet mode we support */ if (vio_version_after_eq(vio, 1, 6)) { pkt->xfer_mode = VIO_NEW_DRING_MODE; @@ -274,10 +313,42 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev, return skb; } -static int vnet_rx_one(struct vnet_port *port, unsigned int len, - struct ldc_trans_cookie *cookies, int ncookies) +static inline void vnet_fullcsum(struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + int offset = skb_transport_offset(skb); + + if (skb->protocol != htons(ETH_P_IP)) + return; + if (iph->protocol != IPPROTO_TCP && + iph->protocol != IPPROTO_UDP) + return; + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 1; + skb->csum = 0; + if (iph->protocol == IPPROTO_TCP) { + struct tcphdr *ptcp = tcp_hdr(skb); + + ptcp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - offset, IPPROTO_TCP, + skb->csum); + } else if (iph->protocol == IPPROTO_UDP) { + struct udphdr *pudp = udp_hdr(skb); + + pudp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - offset, IPPROTO_UDP, + skb->csum); + } +} + +static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) { struct net_device *dev = port->vp->dev; + unsigned int len = desc->size; unsigned int copy_len; struct sk_buff *skb; int err; @@ -299,7 +370,7 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len, skb_put(skb, copy_len); err = ldc_copy(port->vio.lp, LDC_COPY_IN, skb->data, copy_len, 0, - cookies, ncookies); + desc->cookies, desc->ncookies); if (unlikely(err < 0)) { dev->stats.rx_frame_errors++; goto out_free_skb; @@ -309,11 +380,33 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len, skb_trim(skb, len); skb->protocol = eth_type_trans(skb, dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; + if (vio_version_after_eq(&port->vio, 1, 8)) { + struct vio_net_dext *dext = vio_net_ext(desc); - netif_rx(skb); + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { + if (skb->protocol == ETH_P_IP) { + struct iphdr *iph = (struct iphdr *)skb->data; + iph->check = 0; + ip_send_check(iph); + } + } + if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && + skb->ip_summed == CHECKSUM_NONE) + vnet_fullcsum(skb); + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_level = 0; + if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) + skb->csum_level = 1; + } + } + + skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + napi_gro_receive(&port->napi, skb); return 0; out_free_skb: @@ -430,6 +523,7 @@ static int vnet_walk_rx_one(struct vnet_port *port, struct vio_driver_state *vio = &port->vio; int err; + BUG_ON(desc == NULL); if (IS_ERR(desc)) return PTR_ERR(desc); @@ -444,7 +538,7 @@ static int vnet_walk_rx_one(struct vnet_port *port, desc->cookies[0].cookie_addr, desc->cookies[0].cookie_size); - err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); + err = vnet_rx_one(port, desc); if (err == -ECONNRESET) return err; desc->hdr.state = VIO_DESC_DONE; @@ -456,10 +550,11 @@ static int vnet_walk_rx_one(struct vnet_port *port, } static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, - u32 start, u32 end) + u32 start, u32 end, int *npkts, int budget) { struct vio_driver_state *vio = &port->vio; int ack_start = -1, ack_end = -1; + bool send_ack = true; end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); @@ -471,6 +566,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, return err; if (err != 0) break; + (*npkts)++; if (ack_start == -1) ack_start = start; ack_end = start; @@ -482,13 +578,26 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, return err; ack_start = -1; } + if ((*npkts) >= budget) { + send_ack = false; + break; + } } if (unlikely(ack_start == -1)) ack_start = ack_end = prev_idx(start, dr); - return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); + if (send_ack) { + port->napi_resume = false; + return vnet_send_ack(port, dr, ack_start, ack_end, + VIO_DRING_STOPPED); + } else { + port->napi_resume = true; + port->napi_stop_idx = ack_end; + return 1; + } } -static int vnet_rx(struct vnet_port *port, void *msgbuf) +static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, + int budget) { struct vio_dring_data *pkt = msgbuf; struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; @@ -505,11 +614,13 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf) return 0; } - dr->rcv_nxt++; + if (!port->napi_resume) + dr->rcv_nxt++; /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ - return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx); + return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, + npkts, budget); } static int idx_is_pending(struct vio_dring_state *dr, u32 end) @@ -535,19 +646,26 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) struct vnet *vp; u32 end; struct vio_net_desc *desc; + struct netdev_queue *txq; + if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) return 0; end = pkt->end_idx; - if (unlikely(!idx_is_pending(dr, end))) + vp = port->vp; + dev = vp->dev; + netif_tx_lock(dev); + if (unlikely(!idx_is_pending(dr, end))) { + netif_tx_unlock(dev); return 0; + } /* sync for race conditions with vnet_start_xmit() and tell xmit it * is time to send a trigger. */ dr->cons = next_idx(end, dr); desc = vio_dring_entry(dr, dr->cons); - if (desc->hdr.state == VIO_DESC_READY && port->start_cons) { + if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { /* vnet_start_xmit() just populated this dring but missed * sending the "start" LDC message to the consumer. * Send a "start" trigger on its behalf. @@ -559,11 +677,10 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) } else { port->start_cons = true; } + netif_tx_unlock(dev); - - vp = port->vp; - dev = vp->dev; - if (unlikely(netif_queue_stopped(dev) && + txq = netdev_get_tx_queue(dev, port->q_index); + if (unlikely(netif_tx_queue_stopped(txq) && vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) return 1; @@ -591,58 +708,64 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf) return 0; } -static void maybe_tx_wakeup(unsigned long param) +/* Got back a STOPPED LDC message on port. If the queue is stopped, + * wake it up so that we'll send out another START message at the + * next TX. + */ +static void maybe_tx_wakeup(struct vnet_port *port) { - struct vnet *vp = (struct vnet *)param; - struct net_device *dev = vp->dev; + struct netdev_queue *txq; - netif_tx_lock(dev); - if (likely(netif_queue_stopped(dev))) { - struct vnet_port *port; - int wake = 1; + txq = netdev_get_tx_queue(port->vp->dev, port->q_index); + __netif_tx_lock(txq, smp_processor_id()); + if (likely(netif_tx_queue_stopped(txq))) { + struct vio_dring_state *dr; - list_for_each_entry(port, &vp->port_list, list) { - struct vio_dring_state *dr; - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - if (vnet_tx_dring_avail(dr) < - VNET_TX_WAKEUP_THRESH(dr)) { - wake = 0; - break; - } - } - if (wake) - netif_wake_queue(dev); + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + netif_tx_wake_queue(txq); } - netif_tx_unlock(dev); + __netif_tx_unlock(txq); } -static void vnet_event(void *arg, int event) +static inline bool port_is_up(struct vnet_port *vnet) +{ + struct vio_driver_state *vio = &vnet->vio; + + return !!(vio->hs_state & VIO_HS_COMPLETE); +} + +static int vnet_event_napi(struct vnet_port *port, int budget) { - struct vnet_port *port = arg; struct vio_driver_state *vio = &port->vio; - unsigned long flags; int tx_wakeup, err; + int npkts = 0; + int event = (port->rx_event & LDC_EVENT_RESET); - spin_lock_irqsave(&vio->lock, flags); - +ldc_ctrl: if (unlikely(event == LDC_EVENT_RESET || event == LDC_EVENT_UP)) { vio_link_state_change(vio, event); - spin_unlock_irqrestore(&vio->lock, flags); if (event == LDC_EVENT_RESET) { port->rmtu = 0; + port->tso = true; + port->tsolen = 0; vio_port_up(vio); } - return; + port->rx_event = 0; + return 0; } + /* We may have multiple LDC events in rx_event. Unroll send_events() */ + event = (port->rx_event & LDC_EVENT_UP); + port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); + if (event == LDC_EVENT_UP) + goto ldc_ctrl; + event = port->rx_event; + if (!(event & LDC_EVENT_DATA_READY)) + return 0; - if (unlikely(event != LDC_EVENT_DATA_READY)) { - pr_warn("Unexpected LDC event %d\n", event); - spin_unlock_irqrestore(&vio->lock, flags); - return; - } + /* we dont expect any other bits than RESET, UP, DATA_READY */ + BUG_ON(event != LDC_EVENT_DATA_READY); tx_wakeup = err = 0; while (1) { @@ -651,6 +774,20 @@ static void vnet_event(void *arg, int event) u64 raw[8]; } msgbuf; + if (port->napi_resume) { + struct vio_dring_data *pkt = + (struct vio_dring_data *)&msgbuf; + struct vio_dring_state *dr = + &port->vio.drings[VIO_DRIVER_RX_RING]; + + pkt->tag.type = VIO_TYPE_DATA; + pkt->tag.stype = VIO_SUBTYPE_INFO; + pkt->tag.stype_env = VIO_DRING_DATA; + pkt->seq = dr->rcv_nxt; + pkt->start_idx = next_idx(port->napi_stop_idx, dr); + pkt->end_idx = -1; + goto napi_resume; + } err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); if (unlikely(err < 0)) { if (err == -ECONNRESET) @@ -667,10 +804,22 @@ static void vnet_event(void *arg, int event) err = vio_validate_sid(vio, &msgbuf.tag); if (err < 0) break; - +napi_resume: if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { - err = vnet_rx(port, &msgbuf); + if (!port_is_up(port)) { + /* failures like handshake_failure() + * may have cleaned up dring, but + * NAPI polling may bring us here. + */ + err = -ECONNRESET; + break; + } + err = vnet_rx(port, &msgbuf, &npkts, budget); + if (npkts >= budget) + break; + if (npkts == 0) + break; } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { err = vnet_ack(port, &msgbuf); if (err > 0) @@ -691,15 +840,34 @@ static void vnet_event(void *arg, int event) if (err == -ECONNRESET) break; } - spin_unlock(&vio->lock); - /* Kick off a tasklet to wake the queue. We cannot call - * maybe_tx_wakeup directly here because we could deadlock on - * netif_tx_lock() with dev_watchdog() - */ if (unlikely(tx_wakeup && err != -ECONNRESET)) - tasklet_schedule(&port->vp->vnet_tx_wakeup); + maybe_tx_wakeup(port); + return npkts; +} + +static int vnet_poll(struct napi_struct *napi, int budget) +{ + struct vnet_port *port = container_of(napi, struct vnet_port, napi); + struct vio_driver_state *vio = &port->vio; + int processed = vnet_event_napi(port, budget); + + if (processed < budget) { + napi_complete(napi); + port->rx_event &= ~LDC_EVENT_DATA_READY; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); + } + return processed; +} + +static void vnet_event(void *arg, int event) +{ + struct vnet_port *port = arg; + struct vio_driver_state *vio = &port->vio; + + port->rx_event |= event; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); + napi_schedule(&port->napi); - local_irq_restore(flags); } static int __vnet_tx_trigger(struct vnet_port *port, u32 start) @@ -746,26 +914,19 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start) return err; } -static inline bool port_is_up(struct vnet_port *vnet) -{ - struct vio_driver_state *vio = &vnet->vio; - - return !!(vio->hs_state & VIO_HS_COMPLETE); -} - struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) { unsigned int hash = vnet_hashfn(skb->data); struct hlist_head *hp = &vp->port_hash[hash]; struct vnet_port *port; - hlist_for_each_entry(port, hp, hash) { + hlist_for_each_entry_rcu(port, hp, hash) { if (!port_is_up(port)) continue; if (ether_addr_equal(port->raddr, skb->data)) return port; } - list_for_each_entry(port, &vp->port_list, list) { + list_for_each_entry_rcu(port, &vp->port_list, list) { if (!port->switch_port) continue; if (!port_is_up(port)) @@ -775,18 +936,6 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) return NULL; } -struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) -{ - struct vnet_port *ret; - unsigned long flags; - - spin_lock_irqsave(&vp->lock, flags); - ret = __tx_port_find(vp, skb); - spin_unlock_irqrestore(&vp->lock, flags); - - return ret; -} - static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, unsigned *pending) { @@ -847,11 +996,10 @@ static void vnet_clean_timer_expire(unsigned long port0) struct vnet_port *port = (struct vnet_port *)port0; struct sk_buff *freeskbs; unsigned pending; - unsigned long flags; - spin_lock_irqsave(&port->vio.lock, flags); + netif_tx_lock(port->vp->dev); freeskbs = vnet_clean_tx_ring(port, &pending); - spin_unlock_irqrestore(&port->vio.lock, flags); + netif_tx_unlock(port->vp->dev); vnet_free_skbs(freeskbs); @@ -862,11 +1010,54 @@ static void vnet_clean_timer_expire(unsigned long port0) del_timer(&port->clean_timer); } -static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart, - int *plen) +static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, + struct ldc_trans_cookie *cookies, int ncookies, + unsigned int map_perm) +{ + int i, nc, err, blen; + + /* header */ + blen = skb_headlen(skb); + if (blen < ETH_ZLEN) + blen = ETH_ZLEN; + blen += VNET_PACKET_SKIP; + blen += 8 - (blen & 7); + + err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies, + ncookies, map_perm); + if (err < 0) + return err; + nc = err; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u8 *vaddr; + + if (nc < ncookies) { + vaddr = kmap_atomic(skb_frag_page(f)); + blen = skb_frag_size(f); + blen += 8 - (blen & 7); + err = ldc_map_single(lp, vaddr + f->page_offset, + blen, cookies + nc, ncookies - nc, + map_perm); + kunmap_atomic(vaddr); + } else { + err = -EMSGSIZE; + } + + if (err < 0) { + ldc_unmap(lp, cookies, nc); + return err; + } + nc += err; + } + return nc; +} + +static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) { struct sk_buff *nskb; - int len, pad; + int i, len, pad, docopy; len = skb->len; pad = 0; @@ -876,51 +1067,223 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart, } len += VNET_PACKET_SKIP; pad += 8 - (len & 7); - len += 8 - (len & 7); + /* make sure we have enough cookies and alignment in every frag */ + docopy = skb_shinfo(skb)->nr_frags >= ncookies; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + + docopy |= f->page_offset & 7; + } if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || skb_tailroom(skb) < pad || - skb_headroom(skb) < VNET_PACKET_SKIP) { - nskb = alloc_and_align_skb(skb->dev, skb->len); + skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { + int start = 0, offset; + __wsum csum; + + len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; + nskb = alloc_and_align_skb(skb->dev, len); + if (nskb == NULL) { + dev_kfree_skb(skb); + return NULL; + } skb_reserve(nskb, VNET_PACKET_SKIP); - if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { + + nskb->protocol = skb->protocol; + offset = skb_mac_header(skb) - skb->data; + skb_set_mac_header(nskb, offset); + offset = skb_network_header(skb) - skb->data; + skb_set_network_header(nskb, offset); + offset = skb_transport_header(skb) - skb->data; + skb_set_transport_header(nskb, offset); + + offset = 0; + nskb->csum_offset = skb->csum_offset; + nskb->ip_summed = skb->ip_summed; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + start = skb_checksum_start_offset(skb); + if (start) { + struct iphdr *iph = ip_hdr(nskb); + int offset = start + nskb->csum_offset; + + if (skb_copy_bits(skb, 0, nskb->data, start)) { + dev_kfree_skb(nskb); + dev_kfree_skb(skb); + return NULL; + } + *(__sum16 *)(skb->data + offset) = 0; + csum = skb_copy_and_csum_bits(skb, start, + nskb->data + start, + skb->len - start, 0); + if (iph->protocol == IPPROTO_TCP || + iph->protocol == IPPROTO_UDP) { + csum = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - start, + iph->protocol, csum); + } + *(__sum16 *)(nskb->data + offset) = csum; + + nskb->ip_summed = CHECKSUM_NONE; + } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { dev_kfree_skb(nskb); dev_kfree_skb(skb); return NULL; } (void)skb_put(nskb, skb->len); + if (skb_is_gso(skb)) { + skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; + skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; + } dev_kfree_skb(skb); skb = nskb; } - - *pstart = skb->data - VNET_PACKET_SKIP; - *plen = len; return skb; } +static u16 +vnet_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port = __tx_port_find(vp, skb); + + if (port == NULL) + return 0; + return port->q_index; +} + +static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev); + +static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) +{ + struct net_device *dev = port->vp->dev; + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct sk_buff *segs; + int maclen, datalen; + int status; + int gso_size, gso_type, gso_segs; + int hlen = skb_transport_header(skb) - skb_mac_header(skb); + int proto = IPPROTO_IP; + + if (skb->protocol == htons(ETH_P_IP)) + proto = ip_hdr(skb)->protocol; + else if (skb->protocol == htons(ETH_P_IPV6)) + proto = ipv6_hdr(skb)->nexthdr; + + if (proto == IPPROTO_TCP) + hlen += tcp_hdr(skb)->doff * 4; + else if (proto == IPPROTO_UDP) + hlen += sizeof(struct udphdr); + else { + pr_err("vnet_handle_offloads GSO with unknown transport " + "protocol %d tproto %d\n", skb->protocol, proto); + hlen = 128; /* XXX */ + } + datalen = port->tsolen - hlen; + + gso_size = skb_shinfo(skb)->gso_size; + gso_type = skb_shinfo(skb)->gso_type; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (port->tso && gso_size < datalen) + gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); + + if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, port->q_index); + netif_tx_stop_queue(txq); + if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) + return NETDEV_TX_BUSY; + netif_tx_wake_queue(txq); + } + + maclen = skb_network_header(skb) - skb_mac_header(skb); + skb_pull(skb, maclen); + + if (port->tso && gso_size < datalen) { + /* segment to TSO size */ + skb_shinfo(skb)->gso_size = datalen; + skb_shinfo(skb)->gso_segs = gso_segs; + + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); + + /* restore gso_size & gso_segs */ + skb_shinfo(skb)->gso_size = gso_size; + skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hlen, + gso_size); + } else + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); + if (IS_ERR(segs)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + skb_push(skb, maclen); + skb_reset_mac_header(skb); + + status = 0; + while (segs) { + struct sk_buff *curr = segs; + + segs = segs->next; + curr->next = NULL; + if (port->tso && curr->len > dev->mtu) { + skb_shinfo(curr)->gso_size = gso_size; + skb_shinfo(curr)->gso_type = gso_type; + skb_shinfo(curr)->gso_segs = + DIV_ROUND_UP(curr->len - hlen, gso_size); + } else + skb_shinfo(curr)->gso_size = 0; + + skb_push(curr, maclen); + skb_reset_mac_header(curr); + memcpy(skb_mac_header(curr), skb_mac_header(skb), + maclen); + curr->csum_start = skb_transport_header(curr) - curr->head; + if (ip_hdr(curr)->protocol == IPPROTO_TCP) + curr->csum_offset = offsetof(struct tcphdr, check); + else if (ip_hdr(curr)->protocol == IPPROTO_UDP) + curr->csum_offset = offsetof(struct udphdr, check); + + if (!(status & NETDEV_TX_MASK)) + status = vnet_start_xmit(curr, dev); + if (status & NETDEV_TX_MASK) + dev_kfree_skb_any(curr); + } + + if (!(status & NETDEV_TX_MASK)) + dev_kfree_skb_any(skb); + return status; +} + static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vnet *vp = netdev_priv(dev); - struct vnet_port *port = tx_port_find(vp, skb); + struct vnet_port *port = NULL; struct vio_dring_state *dr; struct vio_net_desc *d; - unsigned long flags; unsigned int len; struct sk_buff *freeskbs = NULL; int i, err, txi; - void *start = NULL; - int nlen = 0; unsigned pending = 0; + struct netdev_queue *txq; - if (unlikely(!port)) + rcu_read_lock(); + port = __tx_port_find(vp, skb); + if (unlikely(!port)) { + rcu_read_unlock(); goto out_dropped; + } - skb = vnet_skb_shape(skb, &start, &nlen); - - if (unlikely(!skb)) - goto out_dropped; + if (skb_is_gso(skb) && skb->len > port->tsolen) { + err = vnet_handle_offloads(port, skb); + rcu_read_unlock(); + return err; + } - if (skb->len > port->rmtu) { + if (!skb_is_gso(skb) && skb->len > port->rmtu) { unsigned long localmtu = port->rmtu - ETH_HLEN; if (vio_version_after_eq(&port->vio, 1, 3)) @@ -937,6 +1300,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) fl4.saddr = ip_hdr(skb)->saddr; rt = ip_route_output_key(dev_net(dev), &fl4); + rcu_read_unlock(); if (!IS_ERR(rt)) { skb_dst_set(skb, &rt->dst); icmp_send(skb, ICMP_DEST_UNREACH, @@ -951,18 +1315,26 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out_dropped; } - spin_lock_irqsave(&port->vio.lock, flags); + skb = vnet_skb_shape(skb, 2); + + if (unlikely(!skb)) + goto out_dropped; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + vnet_fullcsum(skb); dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + i = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, i); if (unlikely(vnet_tx_dring_avail(dr) < 1)) { - if (!netif_queue_stopped(dev)) { - netif_stop_queue(dev); + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); /* This is a hard error, log it. */ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); dev->stats.tx_errors++; } - spin_unlock_irqrestore(&port->vio.lock, flags); + rcu_read_unlock(); return NETDEV_TX_BUSY; } @@ -978,16 +1350,15 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (len < ETH_ZLEN) len = ETH_ZLEN; - port->tx_bufs[txi].skb = skb; - skb = NULL; - - err = ldc_map_single(port->vio.lp, start, nlen, - port->tx_bufs[txi].cookies, VNET_MAXCOOKIES, - (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); + err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, + (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); if (err < 0) { netdev_info(dev, "tx buffer map error %d\n", err); - goto out_dropped_unlock; + goto out_dropped; } + + port->tx_bufs[txi].skb = skb; + skb = NULL; port->tx_bufs[txi].ncookies = err; /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), @@ -1003,6 +1374,21 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) d->ncookies = port->tx_bufs[txi].ncookies; for (i = 0; i < d->ncookies; i++) d->cookies[i] = port->tx_bufs[txi].cookies[i]; + if (vio_version_after_eq(&port->vio, 1, 7)) { + struct vio_net_dext *dext = vio_net_ext(d); + + memset(dext, 0, sizeof(*dext)); + if (skb_is_gso(port->tx_bufs[txi].skb)) { + dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) + ->gso_size; + dext->flags |= VNET_PKT_IPV4_LSO; + } + if (vio_version_after_eq(&port->vio, 1, 8) && + !port->switch_port) { + dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; + dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; + } + } /* This has to be a non-SMP write barrier because we are writing * to memory which is shared with the peer LDOM. @@ -1039,7 +1425,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) netdev_info(dev, "TX trigger error %d\n", err); d->hdr.state = VIO_DESC_FREE; dev->stats.tx_carrier_errors++; - goto out_dropped_unlock; + goto out_dropped; } ldc_start_done: @@ -1050,31 +1436,29 @@ ldc_start_done: dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); if (unlikely(vnet_tx_dring_avail(dr) < 1)) { - netif_stop_queue(dev); + netif_tx_stop_queue(txq); if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) - netif_wake_queue(dev); + netif_tx_wake_queue(txq); } - spin_unlock_irqrestore(&port->vio.lock, flags); + (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); + rcu_read_unlock(); vnet_free_skbs(freeskbs); - (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); - return NETDEV_TX_OK; -out_dropped_unlock: - spin_unlock_irqrestore(&port->vio.lock, flags); - out_dropped: - if (skb) - dev_kfree_skb(skb); - vnet_free_skbs(freeskbs); if (pending) (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); else if (port) del_timer(&port->clean_timer); + if (port) + rcu_read_unlock(); + if (skb) + dev_kfree_skb(skb); + vnet_free_skbs(freeskbs); dev->stats.tx_dropped++; return NETDEV_TX_OK; } @@ -1087,14 +1471,14 @@ static void vnet_tx_timeout(struct net_device *dev) static int vnet_open(struct net_device *dev) { netif_carrier_on(dev); - netif_start_queue(dev); + netif_tx_start_all_queues(dev); return 0; } static int vnet_close(struct net_device *dev) { - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); netif_carrier_off(dev); return 0; @@ -1204,18 +1588,17 @@ static void vnet_set_rx_mode(struct net_device *dev) { struct vnet *vp = netdev_priv(dev); struct vnet_port *port; - unsigned long flags; - spin_lock_irqsave(&vp->lock, flags); - if (!list_empty(&vp->port_list)) { - port = list_entry(vp->port_list.next, struct vnet_port, list); + rcu_read_lock(); + list_for_each_entry_rcu(port, &vp->port_list, list) { if (port->switch_port) { __update_mc_list(vp, dev); __send_mc_list(vp, port); + break; } } - spin_unlock_irqrestore(&vp->lock, flags); + rcu_read_unlock(); } static int vnet_change_mtu(struct net_device *dev, int new_mtu) @@ -1295,18 +1678,20 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port) } } -static int vnet_port_alloc_tx_bufs(struct vnet_port *port) +static int vnet_port_alloc_tx_ring(struct vnet_port *port) { struct vio_dring_state *dr; - unsigned long len; + unsigned long len, elen; int i, err, ncookies; void *dring; dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - len = (VNET_TX_RING_SIZE * - (sizeof(struct vio_net_desc) + - (sizeof(struct ldc_trans_cookie) * 2))); + elen = sizeof(struct vio_net_desc) + + sizeof(struct ldc_trans_cookie) * 2; + if (vio_version_after_eq(&port->vio, 1, 7)) + elen += sizeof(struct vio_net_dext); + len = VNET_TX_RING_SIZE * elen; ncookies = VIO_MAX_RING_COOKIES; dring = ldc_alloc_exp_dring(port->vio.lp, len, @@ -1320,8 +1705,7 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port) } dr->base = dring; - dr->entry_size = (sizeof(struct vio_net_desc) + - (sizeof(struct ldc_trans_cookie) * 2)); + dr->entry_size = elen; dr->num_entries = VNET_TX_RING_SIZE; dr->prod = dr->cons = 0; port->start_cons = true; /* need an initial trigger */ @@ -1342,6 +1726,21 @@ err_out: return err; } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void vnet_poll_controller(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port; + unsigned long flags; + + spin_lock_irqsave(&vp->lock, flags); + if (!list_empty(&vp->port_list)) { + port = list_entry(vp->port_list.next, struct vnet_port, list); + napi_schedule(&port->napi); + } + spin_unlock_irqrestore(&vp->lock, flags); +} +#endif static LIST_HEAD(vnet_list); static DEFINE_MUTEX(vnet_list_mutex); @@ -1354,6 +1753,10 @@ static const struct net_device_ops vnet_ops = { .ndo_tx_timeout = vnet_tx_timeout, .ndo_change_mtu = vnet_change_mtu, .ndo_start_xmit = vnet_start_xmit, + .ndo_select_queue = vnet_select_queue, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vnet_poll_controller, +#endif }; static struct vnet *vnet_new(const u64 *local_mac) @@ -1362,7 +1765,7 @@ static struct vnet *vnet_new(const u64 *local_mac) struct vnet *vp; int err, i; - dev = alloc_etherdev(sizeof(*vp)); + dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1); if (!dev) return ERR_PTR(-ENOMEM); dev->needed_headroom = VNET_PACKET_SKIP + 8; @@ -1374,7 +1777,6 @@ static struct vnet *vnet_new(const u64 *local_mac) vp = netdev_priv(dev); spin_lock_init(&vp->lock); - tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp); vp->dev = dev; INIT_LIST_HEAD(&vp->port_list); @@ -1387,6 +1789,10 @@ static struct vnet *vnet_new(const u64 *local_mac) dev->ethtool_ops = &vnet_ethtool_ops; dev->watchdog_timeo = VNET_TX_TIMEOUT; + dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | + NETIF_F_HW_CSUM | NETIF_F_SG; + dev->features = dev->hw_features; + err = register_netdev(dev); if (err) { pr_err("Cannot register net device, aborting\n"); @@ -1434,7 +1840,6 @@ static void vnet_cleanup(void) vp = list_first_entry(&vnet_list, struct vnet, list); list_del(&vp->list); dev = vp->dev; - tasklet_kill(&vp->vnet_tx_wakeup); /* vio_unregister_driver() should have cleaned up port_list */ BUG_ON(!list_empty(&vp->port_list)); unregister_netdev(dev); @@ -1489,6 +1894,25 @@ static void print_version(void) const char *remote_macaddr_prop = "remote-mac-address"; +static void +vnet_port_add_txq(struct vnet_port *port) +{ + struct vnet *vp = port->vp; + int n; + + n = vp->nports++; + n = n & (VNET_MAX_TXQS - 1); + port->q_index = n; + netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index)); +} + +static void +vnet_port_rm_txq(struct vnet_port *port) +{ + port->vp->nports--; + netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index)); +} + static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct mdesc_handle *hp; @@ -1536,9 +1960,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (err) goto err_out_free_port; - err = vnet_port_alloc_tx_bufs(port); - if (err) - goto err_out_free_ldc; + netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT); INIT_HLIST_NODE(&port->hash); INIT_LIST_HEAD(&port->list); @@ -1547,13 +1969,17 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) switch_port = 1; port->switch_port = switch_port; + port->tso = true; + port->tsolen = 0; spin_lock_irqsave(&vp->lock, flags); if (switch_port) - list_add(&port->list, &vp->port_list); + list_add_rcu(&port->list, &vp->port_list); else - list_add_tail(&port->list, &vp->port_list); - hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); + list_add_tail_rcu(&port->list, &vp->port_list); + hlist_add_head_rcu(&port->hash, + &vp->port_hash[vnet_hashfn(port->raddr)]); + vnet_port_add_txq(port); spin_unlock_irqrestore(&vp->lock, flags); dev_set_drvdata(&vdev->dev, port); @@ -1564,15 +1990,13 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) setup_timer(&port->clean_timer, vnet_clean_timer_expire, (unsigned long)port); + napi_enable(&port->napi); vio_port_up(&port->vio); mdesc_release(hp); return 0; -err_out_free_ldc: - vio_ldc_free(&port->vio); - err_out_free_port: kfree(port); @@ -1586,17 +2010,18 @@ static int vnet_port_remove(struct vio_dev *vdev) struct vnet_port *port = dev_get_drvdata(&vdev->dev); if (port) { - struct vnet *vp = port->vp; - unsigned long flags; del_timer_sync(&port->vio.timer); - del_timer_sync(&port->clean_timer); - spin_lock_irqsave(&vp->lock, flags); - list_del(&port->list); - hlist_del(&port->hash); - spin_unlock_irqrestore(&vp->lock, flags); + napi_disable(&port->napi); + + list_del_rcu(&port->list); + hlist_del_rcu(&port->hash); + synchronize_rcu(); + del_timer_sync(&port->clean_timer); + vnet_port_rm_txq(port); + netif_napi_del(&port->napi); vnet_port_free_tx_bufs(port); vio_ldc_free(&port->vio); diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h index c91104542619..01ca78191683 100644 --- a/drivers/net/ethernet/sun/sunvnet.h +++ b/drivers/net/ethernet/sun/sunvnet.h @@ -20,6 +20,9 @@ #define VNET_TX_RING_SIZE 512 #define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) +#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */ +#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */ + /* VNET packets are sent in buffers with the first 6 bytes skipped * so that after the ethernet header the IPv4/IPv6 headers are aligned * properly. @@ -40,8 +43,9 @@ struct vnet_port { struct hlist_node hash; u8 raddr[ETH_ALEN]; - u8 switch_port; - u8 __pad; + unsigned switch_port:1; + unsigned tso:1; + unsigned __pad:14; struct vnet *vp; @@ -56,6 +60,13 @@ struct vnet_port { struct timer_list clean_timer; u64 rmtu; + u16 tsolen; + + struct napi_struct napi; + u32 napi_stop_idx; + bool napi_resume; + int rx_event; + u16 q_index; }; static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) @@ -97,7 +108,7 @@ struct vnet { struct list_head list; u64 local_mac; - struct tasklet_struct vnet_tx_wakeup; + int nports; }; #endif /* _SUNVNET_H */ diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 5d8cb7956113..605dd909bcc3 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_TI bool "Texas Instruments (TI) devices" default y - depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE)) + depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC config TI_DAVINCI_MDIO tristate "TI DaVinci MDIO Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE ) + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE select PHYLIB ---help--- This driver supports TI's DaVinci MDIO module. @@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO config TI_DAVINCI_CPDMA tristate "TI DaVinci CPDMA Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX ) + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS ---help--- This driver supports TI's DaVinci CPDMA dma engine. @@ -58,7 +58,7 @@ config TI_CPSW_PHY_SEL config TI_CPSW tristate "TI CPSW Switch Support" - depends on ARM && (ARCH_DAVINCI || SOC_AM33XX) + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS select TI_DAVINCI_CPDMA select TI_DAVINCI_MDIO select TI_CPSW_PHY_SEL diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 952e1e4764b7..c560f9aeb55d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -129,9 +129,9 @@ do { \ #define CPSW_VLAN_AWARE BIT(1) #define CPSW_ALE_VLAN_AWARE 1 -#define CPSW_FIFO_NORMAL_MODE (0 << 15) -#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) -#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) +#define CPSW_FIFO_NORMAL_MODE (0 << 16) +#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16) +#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16) #define CPSW_INTPACEEN (0x3f << 16) #define CPSW_INTPRESCALE_MASK (0x7FF << 0) @@ -591,8 +591,8 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) if (enable) { unsigned long timeout = jiffies + HZ; - /* Disable Learn for all ports */ - for (i = 0; i < priv->data.slaves; i++) { + /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ + for (i = 0; i <= priv->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 1); cpsw_ale_control_set(ale, i, @@ -616,11 +616,11 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); dev_dbg(&ndev->dev, "promiscuity enabled\n"); } else { - /* Flood All Unicast Packets to Host port */ + /* Don't Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); - /* Enable Learn for all ports */ - for (i = 0; i < priv->data.slaves; i++) { + /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ + for (i = 0; i <= priv->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 0); cpsw_ale_control_set(ale, i, @@ -638,12 +638,16 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ cpsw_set_promiscious(ndev, true); + cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI); return; } else { /* Disable promiscuous mode */ cpsw_set_promiscious(ndev, false); } + /* Restore allmulti on vlans if necessary */ + cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); + /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); @@ -1149,6 +1153,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) const int port = priv->host_port; u32 reg; int i; + int unreg_mcast_mask; reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : CPSW2_PORT_VLAN; @@ -1158,9 +1163,14 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) for (i = 0; i < priv->data.slaves; i++) slave_write(priv->slaves + i, vlan, reg); + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = ALE_ALL_PORTS; + else + unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; + cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, - (ALE_PORT_1 | ALE_PORT_2) << port); + unreg_mcast_mask << port); } static void cpsw_init_host_port(struct cpsw_priv *priv) @@ -1620,11 +1630,17 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, unsigned short vid) { int ret; + int unreg_mcast_mask; + + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = ALE_ALL_PORTS; + else + unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; ret = cpsw_ale_add_vlan(priv->ale, vid, ALE_ALL_PORTS << priv->host_port, 0, ALE_ALL_PORTS << priv->host_port, - (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); + unreg_mcast_mask << priv->host_port); if (ret != 0) return ret; @@ -2006,7 +2022,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, parp = of_get_property(slave_node, "phy_id", &lenp); if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); - return -EINVAL; + goto no_phy_slave; } mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); phyid = be32_to_cpup(parp+1); @@ -2019,6 +2035,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); + slave_data->phy_if = of_get_phy_mode(slave_node); + if (slave_data->phy_if < 0) { + dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", + i); + return slave_data->phy_if; + } + +no_phy_slave: mac_addr = of_get_mac_address(slave_node); if (mac_addr) { memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); @@ -2030,14 +2054,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, return ret; } } - - slave_data->phy_if = of_get_phy_mode(slave_node); - if (slave_data->phy_if < 0) { - dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", - i); - return slave_data->phy_if; - } - if (data->dual_emac) { if (of_property_read_u32(slave_node, "dual_emac_res_vlan", &prop)) { diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 0579b2243bb6..097ebe7077ac 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -443,6 +443,35 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) return 0; } +void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + int unreg_mcast = 0; + + /* Only bother doing the work if the setting is actually changing */ + if (ale->allmulti == allmulti) + return; + + /* Remember the new setting to check against next time */ + ale->allmulti = allmulti; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_VLAN) + continue; + + unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry); + if (allmulti) + unreg_mcast |= 1; + else + unreg_mcast &= ~1; + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_write(ale, idx, ale_entry); + } +} + struct ale_control_info { const char *name; int offset, port_offset; @@ -756,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale) { if (!ale) return -EINVAL; - cpsw_ale_stop(ale); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); kfree(ale); return 0; diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 31cf43cab42e..c0d4127aa549 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -27,6 +27,7 @@ struct cpsw_ale { struct cpsw_ale_params params; struct timer_list timer; unsigned long ageout; + int allmulti; }; enum cpsw_ale_control { @@ -103,6 +104,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast); int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); +void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); int cpsw_ale_control_set(struct cpsw_ale *ale, int port, diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index ab92f67da035..4a4388b813ac 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, switch (ptp_class & PTP_CLASS_PMASK) { case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; break; case PTP_CLASS_IPV6: offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 0f56b1c0e082..70a930ac4fa9 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -638,14 +638,12 @@ static int w5100_hw_probe(struct platform_device *pdev) } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) - return -ENXIO; - mem_size = resource_size(mem); - priv->base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + mem_size = resource_size(mem); + spin_lock_init(&priv->reg_lock); priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; if (priv->indirect) { diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index f961f14a0473..7974b7d90fcc 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -558,14 +558,12 @@ static int w5300_hw_probe(struct platform_device *pdev) } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) - return -ENXIO; - mem_size = resource_size(mem); - priv->base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + mem_size = resource_size(mem); + spin_lock_init(&priv->reg_lock); priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE; if (priv->indirect) { diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 629077050fce..9c2d91ea0af4 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -224,8 +224,7 @@ static void temac_dma_bd_release(struct net_device *ndev) dma_free_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, lp->tx_bd_v, lp->tx_bd_p); - if (lp->rx_skb) - kfree(lp->rx_skb); + kfree(lp->rx_skb); } /** diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 28dbbdc393eb..24858799c204 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1200,8 +1200,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev) unregister_netdev(ndev); - if (lp->phy_node) - of_node_put(lp->phy_node); + of_node_put(lp->phy_node); lp->phy_node = NULL; xemaclite_remove_ndev(ndev); diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index caed6eee289c..7f975a2c8990 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -414,7 +414,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) * ================ * * Overview: - * Retrieves the address range used to access control and status + * Retrieves the address ranges used to access control and status * registers. * * Returns: @@ -422,8 +422,8 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) * * Arguments: * bdev - pointer to device information - * bar_start - pointer to store the start address - * bar_len - pointer to store the length of the area + * bar_start - pointer to store the start addresses + * bar_len - pointer to store the lengths of the areas * * Assumptions: * I am sure there are some. @@ -442,38 +442,47 @@ static void dfx_get_bars(struct device *bdev, if (dfx_bus_pci) { int num = dfx_use_mmio ? 0 : 1; - *bar_start = pci_resource_start(to_pci_dev(bdev), num); - *bar_len = pci_resource_len(to_pci_dev(bdev), num); + bar_start[0] = pci_resource_start(to_pci_dev(bdev), num); + bar_len[0] = pci_resource_len(to_pci_dev(bdev), num); + bar_start[2] = bar_start[1] = 0; + bar_len[2] = bar_len[1] = 0; } if (dfx_bus_eisa) { unsigned long base_addr = to_eisa_device(bdev)->base_addr; - resource_size_t bar; + resource_size_t bar_lo; + resource_size_t bar_hi; if (dfx_use_mmio) { - bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2); - bar <<= 8; - bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1); - bar <<= 8; - bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0); - bar <<= 16; - *bar_start = bar; - bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2); - bar <<= 8; - bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1); - bar <<= 8; - bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0); - bar <<= 16; - *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1; + bar_lo = inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_2); + bar_lo <<= 8; + bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_1); + bar_lo <<= 8; + bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_0); + bar_lo <<= 8; + bar_start[0] = bar_lo; + bar_hi = inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_2); + bar_hi <<= 8; + bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_1); + bar_hi <<= 8; + bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_0); + bar_hi <<= 8; + bar_len[0] = ((bar_hi - bar_lo) | PI_MEM_ADD_MASK_M) + + 1; } else { - *bar_start = base_addr; - *bar_len = PI_ESIC_K_CSR_IO_LEN + - PI_ESIC_K_BURST_HOLDOFF_LEN; + bar_start[0] = base_addr; + bar_len[0] = PI_ESIC_K_CSR_IO_LEN; } + bar_start[1] = base_addr + PI_DEFEA_K_BURST_HOLDOFF; + bar_len[1] = PI_ESIC_K_BURST_HOLDOFF_LEN; + bar_start[2] = base_addr + PI_ESIC_K_ESIC_CSR; + bar_len[2] = PI_ESIC_K_ESIC_CSR_LEN; } if (dfx_bus_tc) { - *bar_start = to_tc_dev(bdev)->resource.start + - PI_TC_K_CSR_OFFSET; - *bar_len = PI_TC_K_CSR_LEN; + bar_start[0] = to_tc_dev(bdev)->resource.start + + PI_TC_K_CSR_OFFSET; + bar_len[0] = PI_TC_K_CSR_LEN; + bar_start[2] = bar_start[1] = 0; + bar_len[2] = bar_len[1] = 0; } } @@ -518,13 +527,14 @@ static int dfx_register(struct device *bdev) { static int version_disp; int dfx_bus_pci = dev_is_pci(bdev); + int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; const char *print_name = dev_name(bdev); struct net_device *dev; DFX_board_t *bp; /* board pointer */ - resource_size_t bar_start = 0; /* pointer to port */ - resource_size_t bar_len = 0; /* resource length */ + resource_size_t bar_start[3]; /* pointers to ports */ + resource_size_t bar_len[3]; /* resource length */ int alloc_size; /* total buffer size used */ struct resource *region; int err = 0; @@ -542,10 +552,13 @@ static int dfx_register(struct device *bdev) } /* Enable PCI device. */ - if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) { - printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n", - print_name); - goto err_out; + if (dfx_bus_pci) { + err = pci_enable_device(to_pci_dev(bdev)); + if (err) { + pr_err("%s: Cannot enable PCI device, aborting\n", + print_name); + goto err_out; + } } SET_NETDEV_DEV(dev, bdev); @@ -554,31 +567,62 @@ static int dfx_register(struct device *bdev) bp->bus_dev = bdev; dev_set_drvdata(bdev, dev); - dfx_get_bars(bdev, &bar_start, &bar_len); + dfx_get_bars(bdev, bar_start, bar_len); + if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) { + pr_err("%s: Cannot use MMIO, no address set, aborting\n", + print_name); + pr_err("%s: Run ECU and set adapter's MMIO location\n", + print_name); + pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\"" + "\n", print_name); + err = -ENXIO; + goto err_out; + } if (dfx_use_mmio) - region = request_mem_region(bar_start, bar_len, print_name); + region = request_mem_region(bar_start[0], bar_len[0], + print_name); else - region = request_region(bar_start, bar_len, print_name); + region = request_region(bar_start[0], bar_len[0], print_name); if (!region) { - printk(KERN_ERR "%s: Cannot reserve I/O resource " - "0x%lx @ 0x%lx, aborting\n", - print_name, (long)bar_len, (long)bar_start); + pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, " + "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name, + (long)bar_len[0], (long)bar_start[0]); err = -EBUSY; goto err_out_disable; } + if (bar_start[1] != 0) { + region = request_region(bar_start[1], bar_len[1], print_name); + if (!region) { + pr_err("%s: Cannot reserve I/O resource " + "0x%lx @ 0x%lx, aborting\n", print_name, + (long)bar_len[1], (long)bar_start[1]); + err = -EBUSY; + goto err_out_csr_region; + } + } + if (bar_start[2] != 0) { + region = request_region(bar_start[2], bar_len[2], print_name); + if (!region) { + pr_err("%s: Cannot reserve I/O resource " + "0x%lx @ 0x%lx, aborting\n", print_name, + (long)bar_len[2], (long)bar_start[2]); + err = -EBUSY; + goto err_out_bh_region; + } + } /* Set up I/O base address. */ if (dfx_use_mmio) { - bp->base.mem = ioremap_nocache(bar_start, bar_len); + bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]); if (!bp->base.mem) { printk(KERN_ERR "%s: Cannot map MMIO\n", print_name); err = -ENOMEM; - goto err_out_region; + goto err_out_esic_region; } } else { - bp->base.port = bar_start; - dev->base_addr = bar_start; + bp->base.port = bar_start[0]; + dev->base_addr = bar_start[0]; } /* Initialize new device structure */ @@ -587,7 +631,7 @@ static int dfx_register(struct device *bdev) if (dfx_bus_pci) pci_set_master(to_pci_dev(bdev)); - if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) { + if (dfx_driver_init(dev, print_name, bar_start[0]) != DFX_K_SUCCESS) { err = -ENODEV; goto err_out_unmap; } @@ -615,11 +659,19 @@ err_out_unmap: if (dfx_use_mmio) iounmap(bp->base.mem); -err_out_region: +err_out_esic_region: + if (bar_start[2] != 0) + release_region(bar_start[2], bar_len[2]); + +err_out_bh_region: + if (bar_start[1] != 0) + release_region(bar_start[1], bar_len[1]); + +err_out_csr_region: if (dfx_use_mmio) - release_mem_region(bar_start, bar_len); + release_mem_region(bar_start[0], bar_len[0]); else - release_region(bar_start, bar_len); + release_region(bar_start[0], bar_len[0]); err_out_disable: if (dfx_bus_pci) @@ -711,13 +763,14 @@ static void dfx_bus_init(struct net_device *dev) } /* - * Enable memory decoding (MEMCS0) and/or port decoding + * Enable memory decoding (MEMCS1) and/or port decoding * (IOCS1/IOCS0) as appropriate in Function Control - * Register. IOCS0 is used for PDQ registers, taking 16 - * 32-bit words, while IOCS1 is used for the Burst Holdoff - * register, taking a single 32-bit word only. We use the - * slot-specific I/O range as per the ESIC spec, that is - * set bits 15:12 in the mask registers to mask them out. + * Register. MEMCS1 or IOCS0 is used for PDQ registers, + * taking 16 32-bit words, while IOCS1 is used for the + * Burst Holdoff register, taking a single 32-bit word + * only. We use the slot-specific I/O range as per the + * ESIC spec, that is set bits 15:12 in the mask registers + * to mask them out. */ /* Set the decode range of the board. */ @@ -742,9 +795,11 @@ static void dfx_bus_init(struct net_device *dev) outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0); /* Enable the decoders. */ - val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0; + val = PI_FUNCTION_CNTRL_M_IOCS1; if (dfx_use_mmio) - val |= PI_FUNCTION_CNTRL_M_MEMCS0; + val |= PI_FUNCTION_CNTRL_M_MEMCS1; + else + val |= PI_FUNCTION_CNTRL_M_IOCS0; outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL); /* @@ -838,6 +893,12 @@ static void dfx_bus_uninit(struct net_device *dev) val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); val &= ~PI_CONFIG_STAT_0_M_INT_ENB; outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); + + /* Disable the board. */ + outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL); + + /* Disable memory and port decoders. */ + outb(0, base_addr + PI_ESIC_K_FUNCTION_CNTRL); } if (dfx_bus_pci) { /* Disable interrupts at PCI bus interface chip (PFI) */ @@ -1061,8 +1122,8 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name, board_name = "DEFEA"; if (dfx_bus_pci) board_name = "DEFPA"; - pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n", - print_name, board_name, dfx_use_mmio ? "" : "I/O ", + pr_info("%s: %s at %s addr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n", + print_name, board_name, dfx_use_mmio ? "MMIO" : "I/O", (long long)bar_start, dev->irq, dev->dev_addr); /* @@ -3636,8 +3697,8 @@ static void dfx_unregister(struct device *bdev) int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; - resource_size_t bar_start = 0; /* pointer to port */ - resource_size_t bar_len = 0; /* resource length */ + resource_size_t bar_start[3]; /* pointers to ports */ + resource_size_t bar_len[3]; /* resource lengths */ int alloc_size; /* total buffer size used */ unregister_netdev(dev); @@ -3655,12 +3716,16 @@ static void dfx_unregister(struct device *bdev) dfx_bus_uninit(dev); - dfx_get_bars(bdev, &bar_start, &bar_len); + dfx_get_bars(bdev, bar_start, bar_len); + if (bar_start[2] != 0) + release_region(bar_start[2], bar_len[2]); + if (bar_start[1] != 0) + release_region(bar_start[1], bar_len[1]); if (dfx_use_mmio) { iounmap(bp->base.mem); - release_mem_region(bar_start, bar_len); + release_mem_region(bar_start[0], bar_len[0]); } else - release_region(bar_start, bar_len); + release_region(bar_start[0], bar_len[0]); if (dfx_bus_pci) pci_disable_device(to_pci_dev(bdev)); diff --git a/drivers/net/fddi/defxx.h b/drivers/net/fddi/defxx.h index 9527f0182fd4..9d30fde2ef3c 100644 --- a/drivers/net/fddi/defxx.h +++ b/drivers/net/fddi/defxx.h @@ -1481,9 +1481,11 @@ typedef union #define PI_ESIC_K_CSR_IO_LEN 0x40 /* 64 bytes */ #define PI_ESIC_K_BURST_HOLDOFF_LEN 0x04 /* 4 bytes */ +#define PI_ESIC_K_ESIC_CSR_LEN 0x40 /* 64 bytes */ #define PI_DEFEA_K_CSR_IO 0x000 #define PI_DEFEA_K_BURST_HOLDOFF 0x040 +#define PI_ESIC_K_ESIC_CSR 0xC80 #define PI_ESIC_K_SLOT_ID 0xC80 #define PI_ESIC_K_SLOT_CNTRL 0xC84 @@ -1556,7 +1558,7 @@ typedef union #define PI_BURST_HOLDOFF_V_RESERVED 1 #define PI_BURST_HOLDOFF_V_MEM_MAP 0 -/* Define the implicit mask of the Memory Address Mask Register. */ +/* Define the implicit mask of the Memory Address Compare registers. */ #define PI_MEM_ADD_MASK_M 0x3ff diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index c3c4051a089d..daca0dee88f3 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -675,8 +675,7 @@ out_free: kfree(xbuff); kfree(rbuff); - if (dev) - free_netdev(dev); + free_netdev(dev); out: return err; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 7d76c9523395..dd867e6cabd6 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -440,7 +440,8 @@ static int negotiate_nvsp_ver(struct hv_device *device, /* NVSPv2 only: Send NDIS config */ memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; - init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu; + init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu + + ETH_HLEN; init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; ret = vmbus_sendpacket(device->channel, init_packet, @@ -560,9 +561,7 @@ int netvsc_device_remove(struct hv_device *device) vmbus_close(device->channel); /* Release all resources */ - if (net_device->sub_cb_buf) - vfree(net_device->sub_cb_buf); - + vfree(net_device->sub_cb_buf); free_netvsc_device(net_device); return 0; } @@ -765,6 +764,9 @@ int netvsc_send(struct hv_device *device, out_channel = device->channel; packet->channel = out_channel; + if (out_channel->rescind) + return -ENODEV; + if (packet->page_buf_cnt) { ret = vmbus_sendpacket_pagebuffer(out_channel, packet->page_buf, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 9e17d1a91e71..15d82eda0baf 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -193,7 +193,9 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) struct flow_keys flow; int data_len; - if (!skb_flow_dissect(skb, &flow) || flow.n_proto != htons(ETH_P_IP)) + if (!skb_flow_dissect(skb, &flow) || + !(flow.n_proto == htons(ETH_P_IP) || + flow.n_proto == htons(ETH_P_IPV6))) return false; if (flow.ip_proto == IPPROTO_TCP) @@ -550,6 +552,7 @@ do_lso: do_send: /* Start filling in the page buffers with the rndis hdr */ rndis_msg->msg_len += rndis_msg_size; + packet->total_data_buflen = rndis_msg->msg_len; packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, skb, &packet->page_buf[0]); @@ -696,9 +699,10 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return -ENODEV; if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) - limit = NETVSC_MTU; + limit = NETVSC_MTU - ETH_HLEN; - if (mtu < 68 || mtu > limit) + /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */ + if (mtu < ETH_DATA_LEN || mtu > limit) return -EINVAL; nvdev->start_remove = true; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2b86f0b6f6d1..ec0c40a8f653 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -728,7 +728,8 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) rssp->hdr.size = sizeof(struct ndis_recv_scale_param); rssp->flag = 0; rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | - NDIS_HASH_TCP_IPV4; + NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | + NDIS_HASH_TCP_IPV6; rssp->indirect_tabsize = 4*ITAB_NUM; rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); rssp->hashkey_size = HASH_KEYLEN; @@ -957,6 +958,9 @@ static int rndis_filter_close_device(struct rndis_device *dev) return 0; ret = rndis_filter_set_packet_filter(dev, 0); + if (ret == -ENODEV) + ret = 0; + if (ret == 0) dev->state = RNDIS_DEV_INITIALIZED; @@ -997,6 +1001,7 @@ int rndis_filter_device_add(struct hv_device *dev, int t; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); + u32 mtu, size; rndis_device = get_rndis_device(); if (!rndis_device) @@ -1028,6 +1033,14 @@ int rndis_filter_device_add(struct hv_device *dev, return ret; } + /* Get the MTU from the host */ + size = sizeof(u32); + ret = rndis_filter_query_device(rndis_device, + RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, + &mtu, &size); + if (ret == 0 && size == sizeof(u32)) + net_device->ndev->mtu = mtu; + /* Get the mac address */ ret = rndis_filter_query_device_mac(rndis_device); if (ret != 0) { diff --git a/drivers/net/ipvlan/Makefile b/drivers/net/ipvlan/Makefile new file mode 100644 index 000000000000..df79910192d6 --- /dev/null +++ b/drivers/net/ipvlan/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Ethernet Ipvlan driver +# + +obj-$(CONFIG_IPVLAN) += ipvlan.o + +ipvlan-objs := ipvlan_core.o ipvlan_main.o diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h new file mode 100644 index 000000000000..2729f64b3e7e --- /dev/null +++ b/drivers/net/ipvlan/ipvlan.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + */ +#ifndef __IPVLAN_H +#define __IPVLAN_H + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/rculist.h> +#include <linux/notifier.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> +#include <linux/if_link.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/inetdevice.h> +#include <net/ip.h> +#include <net/ip6_route.h> +#include <net/rtnetlink.h> +#include <net/route.h> +#include <net/addrconf.h> + +#define IPVLAN_DRV "ipvlan" +#define IPV_DRV_VER "0.1" + +#define IPVLAN_HASH_SIZE (1 << BITS_PER_BYTE) +#define IPVLAN_HASH_MASK (IPVLAN_HASH_SIZE - 1) + +#define IPVLAN_MAC_FILTER_BITS 8 +#define IPVLAN_MAC_FILTER_SIZE (1 << IPVLAN_MAC_FILTER_BITS) +#define IPVLAN_MAC_FILTER_MASK (IPVLAN_MAC_FILTER_SIZE - 1) + +typedef enum { + IPVL_IPV6 = 0, + IPVL_ICMPV6, + IPVL_IPV4, + IPVL_ARP, +} ipvl_hdr_type; + +struct ipvl_pcpu_stats { + u64 rx_pkts; + u64 rx_bytes; + u64 rx_mcast; + u64 tx_pkts; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_errs; + u32 tx_drps; +}; + +struct ipvl_port; + +struct ipvl_dev { + struct net_device *dev; + struct list_head pnode; + struct ipvl_port *port; + struct net_device *phy_dev; + struct list_head addrs; + int ipv4cnt; + int ipv6cnt; + struct ipvl_pcpu_stats *pcpu_stats; + DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); + netdev_features_t sfeatures; + u32 msg_enable; + u16 mtu_adj; +}; + +struct ipvl_addr { + struct ipvl_dev *master; /* Back pointer to master */ + union { + struct in6_addr ip6; /* IPv6 address on logical interface */ + struct in_addr ip4; /* IPv4 address on logical interface */ + } ipu; +#define ip6addr ipu.ip6 +#define ip4addr ipu.ip4 + struct hlist_node hlnode; /* Hash-table linkage */ + struct list_head anode; /* logical-interface linkage */ + struct rcu_head rcu; + ipvl_hdr_type atype; +}; + +struct ipvl_port { + struct net_device *dev; + struct hlist_head hlhead[IPVLAN_HASH_SIZE]; + struct list_head ipvlans; + struct rcu_head rcu; + int count; + u16 mode; +}; + +static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) +{ + return rcu_dereference(d->rx_handler_data); +} + +static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) +{ + return rtnl_dereference(d->rx_handler_data); +} + +void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev); +void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval); +void ipvlan_init_secret(void); +unsigned int ipvlan_mac_hash(const unsigned char *addr); +rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); +int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev); +void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); +bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6); +struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, + const void *iaddr, bool is_v6); +void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); +#endif /* __IPVLAN_H */ diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c new file mode 100644 index 000000000000..a14d87783245 --- /dev/null +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -0,0 +1,607 @@ +/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + */ + +#include "ipvlan.h" + +static u32 ipvlan_jhash_secret; + +void ipvlan_init_secret(void) +{ + net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret)); +} + +static void ipvlan_count_rx(const struct ipvl_dev *ipvlan, + unsigned int len, bool success, bool mcast) +{ + if (!ipvlan) + return; + + if (likely(success)) { + struct ipvl_pcpu_stats *pcptr; + + pcptr = this_cpu_ptr(ipvlan->pcpu_stats); + u64_stats_update_begin(&pcptr->syncp); + pcptr->rx_pkts++; + pcptr->rx_bytes += len; + if (mcast) + pcptr->rx_mcast++; + u64_stats_update_end(&pcptr->syncp); + } else { + this_cpu_inc(ipvlan->pcpu_stats->rx_errs); + } +} + +static u8 ipvlan_get_v6_hash(const void *iaddr) +{ + const struct in6_addr *ip6_addr = iaddr; + + return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) & + IPVLAN_HASH_MASK; +} + +static u8 ipvlan_get_v4_hash(const void *iaddr) +{ + const struct in_addr *ip4_addr = iaddr; + + return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) & + IPVLAN_HASH_MASK; +} + +struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, + const void *iaddr, bool is_v6) +{ + struct ipvl_addr *addr; + u8 hash; + + hash = is_v6 ? ipvlan_get_v6_hash(iaddr) : + ipvlan_get_v4_hash(iaddr); + hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) { + if (is_v6 && addr->atype == IPVL_IPV6 && + ipv6_addr_equal(&addr->ip6addr, iaddr)) + return addr; + else if (!is_v6 && addr->atype == IPVL_IPV4 && + addr->ip4addr.s_addr == + ((struct in_addr *)iaddr)->s_addr) + return addr; + } + return NULL; +} + +void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) +{ + struct ipvl_port *port = ipvlan->port; + u8 hash; + + hash = (addr->atype == IPVL_IPV6) ? + ipvlan_get_v6_hash(&addr->ip6addr) : + ipvlan_get_v4_hash(&addr->ip4addr); + hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); +} + +void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) +{ + hlist_del_rcu(&addr->hlnode); + if (sync) + synchronize_rcu(); +} + +bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) +{ + struct ipvl_port *port = ipvlan->port; + struct ipvl_addr *addr; + + list_for_each_entry(addr, &ipvlan->addrs, anode) { + if ((is_v6 && addr->atype == IPVL_IPV6 && + ipv6_addr_equal(&addr->ip6addr, iaddr)) || + (!is_v6 && addr->atype == IPVL_IPV4 && + addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) + return true; + } + + if (ipvlan_ht_addr_lookup(port, iaddr, is_v6)) + return true; + + return false; +} + +static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type) +{ + void *lyr3h = NULL; + + switch (skb->protocol) { + case htons(ETH_P_ARP): { + struct arphdr *arph; + + if (unlikely(!pskb_may_pull(skb, sizeof(*arph)))) + return NULL; + + arph = arp_hdr(skb); + *type = IPVL_ARP; + lyr3h = arph; + break; + } + case htons(ETH_P_IP): { + u32 pktlen; + struct iphdr *ip4h; + + if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h)))) + return NULL; + + ip4h = ip_hdr(skb); + pktlen = ntohs(ip4h->tot_len); + if (ip4h->ihl < 5 || ip4h->version != 4) + return NULL; + if (skb->len < pktlen || pktlen < (ip4h->ihl * 4)) + return NULL; + + *type = IPVL_IPV4; + lyr3h = ip4h; + break; + } + case htons(ETH_P_IPV6): { + struct ipv6hdr *ip6h; + + if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h)))) + return NULL; + + ip6h = ipv6_hdr(skb); + if (ip6h->version != 6) + return NULL; + + *type = IPVL_IPV6; + lyr3h = ip6h; + /* Only Neighbour Solicitation pkts need different treatment */ + if (ipv6_addr_any(&ip6h->saddr) && + ip6h->nexthdr == NEXTHDR_ICMP) { + *type = IPVL_ICMPV6; + lyr3h = ip6h + 1; + } + break; + } + default: + return NULL; + } + + return lyr3h; +} + +unsigned int ipvlan_mac_hash(const unsigned char *addr) +{ + u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2), + ipvlan_jhash_secret); + + return hash & IPVLAN_MAC_FILTER_MASK; +} + +static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, + const struct ipvl_dev *in_dev, bool local) +{ + struct ethhdr *eth = eth_hdr(skb); + struct ipvl_dev *ipvlan; + struct sk_buff *nskb; + unsigned int len; + unsigned int mac_hash; + int ret; + + if (skb->protocol == htons(ETH_P_PAUSE)) + return; + + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + if (local && (ipvlan == in_dev)) + continue; + + mac_hash = ipvlan_mac_hash(eth->h_dest); + if (!test_bit(mac_hash, ipvlan->mac_filters)) + continue; + + ret = NET_RX_DROP; + len = skb->len + ETH_HLEN; + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + goto mcast_acct; + + if (ether_addr_equal(eth->h_dest, ipvlan->phy_dev->broadcast)) + nskb->pkt_type = PACKET_BROADCAST; + else + nskb->pkt_type = PACKET_MULTICAST; + + nskb->dev = ipvlan->dev; + if (local) + ret = dev_forward_skb(ipvlan->dev, nskb); + else + ret = netif_rx(nskb); +mcast_acct: + ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); + } + + /* Locally generated? ...Forward a copy to the main-device as + * well. On the RX side we'll ignore it (wont give it to any + * of the virtual devices. + */ + if (local) { + nskb = skb_clone(skb, GFP_ATOMIC); + if (nskb) { + if (ether_addr_equal(eth->h_dest, port->dev->broadcast)) + nskb->pkt_type = PACKET_BROADCAST; + else + nskb->pkt_type = PACKET_MULTICAST; + + dev_forward_skb(port->dev, nskb); + } + } +} + +static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, + bool local) +{ + struct ipvl_dev *ipvlan = addr->master; + struct net_device *dev = ipvlan->dev; + unsigned int len; + rx_handler_result_t ret = RX_HANDLER_CONSUMED; + bool success = false; + + len = skb->len + ETH_HLEN; + if (unlikely(!(dev->flags & IFF_UP))) { + kfree_skb(skb); + goto out; + } + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto out; + + skb->dev = dev; + skb->pkt_type = PACKET_HOST; + + if (local) { + if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) + success = true; + } else { + ret = RX_HANDLER_ANOTHER; + success = true; + } + +out: + ipvlan_count_rx(ipvlan, len, success, false); + return ret; +} + +static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, + void *lyr3h, int addr_type, + bool use_dest) +{ + struct ipvl_addr *addr = NULL; + + if (addr_type == IPVL_IPV6) { + struct ipv6hdr *ip6h; + struct in6_addr *i6addr; + + ip6h = (struct ipv6hdr *)lyr3h; + i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr; + addr = ipvlan_ht_addr_lookup(port, i6addr, true); + } else if (addr_type == IPVL_ICMPV6) { + struct nd_msg *ndmh; + struct in6_addr *i6addr; + + /* Make sure that the NeighborSolicitation ICMPv6 packets + * are handled to avoid DAD issue. + */ + ndmh = (struct nd_msg *)lyr3h; + if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { + i6addr = &ndmh->target; + addr = ipvlan_ht_addr_lookup(port, i6addr, true); + } + } else if (addr_type == IPVL_IPV4) { + struct iphdr *ip4h; + __be32 *i4addr; + + ip4h = (struct iphdr *)lyr3h; + i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr; + addr = ipvlan_ht_addr_lookup(port, i4addr, false); + } else if (addr_type == IPVL_ARP) { + struct arphdr *arph; + unsigned char *arp_ptr; + __be32 dip; + + arph = (struct arphdr *)lyr3h; + arp_ptr = (unsigned char *)(arph + 1); + if (use_dest) + arp_ptr += (2 * port->dev->addr_len) + 4; + else + arp_ptr += port->dev->addr_len; + + memcpy(&dip, arp_ptr, 4); + addr = ipvlan_ht_addr_lookup(port, &dip, false); + } + + return addr; +} + +static int ipvlan_process_v4_outbound(struct sk_buff *skb) +{ + const struct iphdr *ip4h = ip_hdr(skb); + struct net_device *dev = skb->dev; + struct rtable *rt; + int err, ret = NET_XMIT_DROP; + struct flowi4 fl4 = { + .flowi4_oif = dev->iflink, + .flowi4_tos = RT_TOS(ip4h->tos), + .flowi4_flags = FLOWI_FLAG_ANYSRC, + .daddr = ip4h->daddr, + .saddr = ip4h->saddr, + }; + + rt = ip_route_output_flow(dev_net(dev), &fl4, NULL); + if (IS_ERR(rt)) + goto err; + + if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { + ip_rt_put(rt); + goto err; + } + skb_dst_drop(skb); + skb_dst_set(skb, &rt->dst); + err = ip_local_out(skb); + if (unlikely(net_xmit_eval(err))) + dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + goto out; +err: + dev->stats.tx_errors++; + kfree_skb(skb); +out: + return ret; +} + +static int ipvlan_process_v6_outbound(struct sk_buff *skb) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct net_device *dev = skb->dev; + struct dst_entry *dst; + int err, ret = NET_XMIT_DROP; + struct flowi6 fl6 = { + .flowi6_iif = skb->dev->ifindex, + .daddr = ip6h->daddr, + .saddr = ip6h->saddr, + .flowi6_flags = FLOWI_FLAG_ANYSRC, + .flowlabel = ip6_flowinfo(ip6h), + .flowi6_mark = skb->mark, + .flowi6_proto = ip6h->nexthdr, + }; + + dst = ip6_route_output(dev_net(dev), NULL, &fl6); + if (IS_ERR(dst)) + goto err; + + skb_dst_drop(skb); + skb_dst_set(skb, dst); + err = ip6_local_out(skb); + if (unlikely(net_xmit_eval(err))) + dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + goto out; +err: + dev->stats.tx_errors++; + kfree_skb(skb); +out: + return ret; +} + +static int ipvlan_process_outbound(struct sk_buff *skb, + const struct ipvl_dev *ipvlan) +{ + struct ethhdr *ethh = eth_hdr(skb); + int ret = NET_XMIT_DROP; + + /* In this mode we dont care about multicast and broadcast traffic */ + if (is_multicast_ether_addr(ethh->h_dest)) { + pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n", + ntohs(skb->protocol)); + kfree_skb(skb); + goto out; + } + + /* The ipvlan is a pseudo-L2 device, so the packets that we receive + * will have L2; which need to discarded and processed further + * in the net-ns of the main-device. + */ + if (skb_mac_header_was_set(skb)) { + skb_pull(skb, sizeof(*ethh)); + skb->mac_header = (typeof(skb->mac_header))~0U; + skb_reset_network_header(skb); + } + + if (skb->protocol == htons(ETH_P_IPV6)) + ret = ipvlan_process_v6_outbound(skb); + else if (skb->protocol == htons(ETH_P_IP)) + ret = ipvlan_process_v4_outbound(skb); + else { + pr_warn_ratelimited("Dropped outbound packet type=%x\n", + ntohs(skb->protocol)); + kfree_skb(skb); + } +out: + return ret; +} + +static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + void *lyr3h; + struct ipvl_addr *addr; + int addr_type; + + lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + if (!lyr3h) + goto out; + + addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); + if (addr) + return ipvlan_rcv_frame(addr, skb, true); + +out: + skb->dev = ipvlan->phy_dev; + return ipvlan_process_outbound(skb, ipvlan); +} + +static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ethhdr *eth = eth_hdr(skb); + struct ipvl_addr *addr; + void *lyr3h; + int addr_type; + + if (ether_addr_equal(eth->h_dest, eth->h_source)) { + lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + if (lyr3h) { + addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); + if (addr) + return ipvlan_rcv_frame(addr, skb, true); + } + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return NET_XMIT_DROP; + + /* Packet definitely does not belong to any of the + * virtual devices, but the dest is local. So forward + * the skb for the main-dev. At the RX side we just return + * RX_PASS for it to be processed further on the stack. + */ + return dev_forward_skb(ipvlan->phy_dev, skb); + + } else if (is_multicast_ether_addr(eth->h_dest)) { + u8 ip_summed = skb->ip_summed; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true); + skb->ip_summed = ip_summed; + } + + skb->dev = ipvlan->phy_dev; + return dev_queue_xmit(skb); +} + +int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); + + if (!port) + goto out; + + if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) + goto out; + + switch(port->mode) { + case IPVLAN_MODE_L2: + return ipvlan_xmit_mode_l2(skb, dev); + case IPVLAN_MODE_L3: + return ipvlan_xmit_mode_l3(skb, dev); + } + + /* Should not reach here */ + WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n", + port->mode); +out: + kfree_skb(skb); + return NET_XMIT_DROP; +} + +static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port) +{ + struct ethhdr *eth = eth_hdr(skb); + struct ipvl_addr *addr; + void *lyr3h; + int addr_type; + + if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) { + lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + if (!lyr3h) + return true; + + addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false); + if (addr) + return false; + } + + return true; +} + +static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb, + struct ipvl_port *port) +{ + void *lyr3h; + int addr_type; + struct ipvl_addr *addr; + struct sk_buff *skb = *pskb; + rx_handler_result_t ret = RX_HANDLER_PASS; + + lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + if (!lyr3h) + goto out; + + addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); + if (addr) + ret = ipvlan_rcv_frame(addr, skb, false); + +out: + return ret; +} + +static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, + struct ipvl_port *port) +{ + struct sk_buff *skb = *pskb; + struct ethhdr *eth = eth_hdr(skb); + rx_handler_result_t ret = RX_HANDLER_PASS; + void *lyr3h; + int addr_type; + + if (is_multicast_ether_addr(eth->h_dest)) { + if (ipvlan_external_frame(skb, port)) + ipvlan_multicast_frame(port, skb, NULL, false); + } else { + struct ipvl_addr *addr; + + lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + if (!lyr3h) + return ret; + + addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); + if (addr) + ret = ipvlan_rcv_frame(addr, skb, false); + } + + return ret; +} + +rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev); + + if (!port) + return RX_HANDLER_PASS; + + switch (port->mode) { + case IPVLAN_MODE_L2: + return ipvlan_handle_mode_l2(pskb, port); + case IPVLAN_MODE_L3: + return ipvlan_handle_mode_l3(pskb, port); + } + + /* Should not reach here */ + WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", + port->mode); + kfree_skb(skb); + return NET_RX_DROP; +} diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c new file mode 100644 index 000000000000..4f4099d5603d --- /dev/null +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -0,0 +1,795 @@ +/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + */ + +#include "ipvlan.h" + +void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev) +{ + ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj; +} + +void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval) +{ + struct ipvl_dev *ipvlan; + + if (port->mode != nval) { + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + if (nval == IPVLAN_MODE_L3) + ipvlan->dev->flags |= IFF_NOARP; + else + ipvlan->dev->flags &= ~IFF_NOARP; + } + port->mode = nval; + } +} + +static int ipvlan_port_create(struct net_device *dev) +{ + struct ipvl_port *port; + int err, idx; + + if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) { + netdev_err(dev, "Master is either lo or non-ether device\n"); + return -EINVAL; + } + + if (netif_is_macvlan_port(dev)) { + netdev_err(dev, "Master is a macvlan port.\n"); + return -EBUSY; + } + + port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->dev = dev; + port->mode = IPVLAN_MODE_L3; + INIT_LIST_HEAD(&port->ipvlans); + for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++) + INIT_HLIST_HEAD(&port->hlhead[idx]); + + err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port); + if (err) + goto err; + + dev->priv_flags |= IFF_IPVLAN_MASTER; + return 0; + +err: + kfree_rcu(port, rcu); + return err; +} + +static void ipvlan_port_destroy(struct net_device *dev) +{ + struct ipvl_port *port = ipvlan_port_get_rtnl(dev); + + dev->priv_flags &= ~IFF_IPVLAN_MASTER; + netdev_rx_handler_unregister(dev); + kfree_rcu(port, rcu); +} + +/* ipvlan network devices have devices nesting below it and are a special + * "super class" of normal network devices; split their locks off into a + * separate class since they always nest. + */ +static struct lock_class_key ipvlan_netdev_xmit_lock_key; +static struct lock_class_key ipvlan_netdev_addr_lock_key; + +#define IPVLAN_FEATURES \ + (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ + NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + +#define IPVLAN_STATE_MASK \ + ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) + +static void ipvlan_set_lockdep_class_one(struct net_device *dev, + struct netdev_queue *txq, + void *_unused) +{ + lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key); +} + +static void ipvlan_set_lockdep_class(struct net_device *dev) +{ + lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key); + netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL); +} + +static int ipvlan_init(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + const struct net_device *phy_dev = ipvlan->phy_dev; + + dev->state = (dev->state & ~IPVLAN_STATE_MASK) | + (phy_dev->state & IPVLAN_STATE_MASK); + dev->features = phy_dev->features & IPVLAN_FEATURES; + dev->features |= NETIF_F_LLTX; + dev->gso_max_size = phy_dev->gso_max_size; + dev->iflink = phy_dev->ifindex; + dev->hard_header_len = phy_dev->hard_header_len; + + ipvlan_set_lockdep_class(dev); + + ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); + if (!ipvlan->pcpu_stats) + return -ENOMEM; + + return 0; +} + +static void ipvlan_uninit(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port = ipvlan->port; + + free_percpu(ipvlan->pcpu_stats); + + port->count -= 1; + if (!port->count) + ipvlan_port_destroy(port->dev); +} + +static int ipvlan_open(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + struct ipvl_addr *addr; + + if (ipvlan->port->mode == IPVLAN_MODE_L3) + dev->flags |= IFF_NOARP; + else + dev->flags &= ~IFF_NOARP; + + if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { + list_for_each_entry(addr, &ipvlan->addrs, anode) + ipvlan_ht_addr_add(ipvlan, addr); + } + return dev_uc_add(phy_dev, phy_dev->dev_addr); +} + +static int ipvlan_stop(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + struct ipvl_addr *addr; + + dev_uc_unsync(phy_dev, dev); + dev_mc_unsync(phy_dev, dev); + + dev_uc_del(phy_dev, phy_dev->dev_addr); + + if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { + list_for_each_entry(addr, &ipvlan->addrs, anode) + ipvlan_ht_addr_del(addr, !dev->dismantle); + } + return 0; +} + +static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + int skblen = skb->len; + int ret; + + ret = ipvlan_queue_xmit(skb, dev); + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { + struct ipvl_pcpu_stats *pcptr; + + pcptr = this_cpu_ptr(ipvlan->pcpu_stats); + + u64_stats_update_begin(&pcptr->syncp); + pcptr->tx_pkts++; + pcptr->tx_bytes += skblen; + u64_stats_update_end(&pcptr->syncp); + } else { + this_cpu_inc(ipvlan->pcpu_stats->tx_drps); + } + return ret; +} + +static netdev_features_t ipvlan_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES); +} + +static void ipvlan_change_rx_flags(struct net_device *dev, int change) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + + if (change & IFF_ALLMULTI) + dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1); +} + +static void ipvlan_set_broadcast_mac_filter(struct ipvl_dev *ipvlan, bool set) +{ + struct net_device *dev = ipvlan->dev; + unsigned int hashbit = ipvlan_mac_hash(dev->broadcast); + + if (set && !test_bit(hashbit, ipvlan->mac_filters)) + __set_bit(hashbit, ipvlan->mac_filters); + else if (!set && test_bit(hashbit, ipvlan->mac_filters)) + __clear_bit(hashbit, ipvlan->mac_filters); +} + +static void ipvlan_set_multicast_mac_filter(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + bitmap_fill(ipvlan->mac_filters, IPVLAN_MAC_FILTER_SIZE); + } else { + struct netdev_hw_addr *ha; + DECLARE_BITMAP(mc_filters, IPVLAN_MAC_FILTER_SIZE); + + bitmap_zero(mc_filters, IPVLAN_MAC_FILTER_SIZE); + netdev_for_each_mc_addr(ha, dev) + __set_bit(ipvlan_mac_hash(ha->addr), mc_filters); + + bitmap_copy(ipvlan->mac_filters, mc_filters, + IPVLAN_MAC_FILTER_SIZE); + } + dev_uc_sync(ipvlan->phy_dev, dev); + dev_mc_sync(ipvlan->phy_dev, dev); +} + +static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + if (ipvlan->pcpu_stats) { + struct ipvl_pcpu_stats *pcptr; + u64 rx_pkts, rx_bytes, rx_mcast, tx_pkts, tx_bytes; + u32 rx_errs = 0, tx_drps = 0; + u32 strt; + int idx; + + for_each_possible_cpu(idx) { + pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx); + do { + strt= u64_stats_fetch_begin_irq(&pcptr->syncp); + rx_pkts = pcptr->rx_pkts; + rx_bytes = pcptr->rx_bytes; + rx_mcast = pcptr->rx_mcast; + tx_pkts = pcptr->tx_pkts; + tx_bytes = pcptr->tx_bytes; + } while (u64_stats_fetch_retry_irq(&pcptr->syncp, + strt)); + + s->rx_packets += rx_pkts; + s->rx_bytes += rx_bytes; + s->multicast += rx_mcast; + s->tx_packets += tx_pkts; + s->tx_bytes += tx_bytes; + + /* u32 values are updated without syncp protection. */ + rx_errs += pcptr->rx_errs; + tx_drps += pcptr->tx_drps; + } + s->rx_errors = rx_errs; + s->rx_dropped = rx_errs; + s->tx_dropped = tx_drps; + } + return s; +} + +static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + + return vlan_vid_add(phy_dev, proto, vid); +} + +static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + + vlan_vid_del(phy_dev, proto, vid); + return 0; +} + +static const struct net_device_ops ipvlan_netdev_ops = { + .ndo_init = ipvlan_init, + .ndo_uninit = ipvlan_uninit, + .ndo_open = ipvlan_open, + .ndo_stop = ipvlan_stop, + .ndo_start_xmit = ipvlan_start_xmit, + .ndo_fix_features = ipvlan_fix_features, + .ndo_change_rx_flags = ipvlan_change_rx_flags, + .ndo_set_rx_mode = ipvlan_set_multicast_mac_filter, + .ndo_get_stats64 = ipvlan_get_stats64, + .ndo_vlan_rx_add_vid = ipvlan_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ipvlan_vlan_rx_kill_vid, +}; + +static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + + /* TODO Probably use a different field than dev_addr so that the + * mac-address on the virtual device is portable and can be carried + * while the packets use the mac-addr on the physical device. + */ + return dev_hard_header(skb, phy_dev, type, daddr, + saddr ? : dev->dev_addr, len); +} + +static const struct header_ops ipvlan_header_ops = { + .create = ipvlan_hard_header, + .rebuild = eth_rebuild_header, + .parse = eth_header_parse, + .cache = eth_header_cache, + .cache_update = eth_header_cache_update, +}; + +static int ipvlan_ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + + return __ethtool_get_settings(ipvlan->phy_dev, cmd); +} + +static void ipvlan_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version)); +} + +static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + + return ipvlan->msg_enable; +} + +static void ipvlan_ethtool_set_msglevel(struct net_device *dev, u32 value) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + ipvlan->msg_enable = value; +} + +static const struct ethtool_ops ipvlan_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_settings = ipvlan_ethtool_get_settings, + .get_drvinfo = ipvlan_ethtool_get_drvinfo, + .get_msglevel = ipvlan_ethtool_get_msglevel, + .set_msglevel = ipvlan_ethtool_set_msglevel, +}; + +static int ipvlan_nl_changelink(struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev); + + if (data && data[IFLA_IPVLAN_MODE]) { + u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); + + ipvlan_set_port_mode(port, nmode); + } + return 0; +} + +static size_t ipvlan_nl_getsize(const struct net_device *dev) +{ + return (0 + + nla_total_size(2) /* IFLA_IPVLAN_MODE */ + ); +} + +static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (data && data[IFLA_IPVLAN_MODE]) { + u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]); + + if (mode < IPVLAN_MODE_L2 || mode >= IPVLAN_MODE_MAX) + return -EINVAL; + } + return 0; +} + +static int ipvlan_nl_fillinfo(struct sk_buff *skb, + const struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev); + int ret = -EINVAL; + + if (!port) + goto err; + + ret = -EMSGSIZE; + if (nla_put_u16(skb, IFLA_IPVLAN_MODE, port->mode)) + goto err; + + return 0; + +err: + return ret; +} + +static int ipvlan_link_new(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port; + struct net_device *phy_dev; + int err; + + if (!tb[IFLA_LINK]) + return -EINVAL; + + phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + if (!phy_dev) + return -ENODEV; + + if (netif_is_ipvlan(phy_dev)) { + struct ipvl_dev *tmp = netdev_priv(phy_dev); + + phy_dev = tmp->phy_dev; + } else if (!netif_is_ipvlan_port(phy_dev)) { + err = ipvlan_port_create(phy_dev); + if (err < 0) + return err; + } + + port = ipvlan_port_get_rtnl(phy_dev); + if (data && data[IFLA_IPVLAN_MODE]) + port->mode = nla_get_u16(data[IFLA_IPVLAN_MODE]); + + ipvlan->phy_dev = phy_dev; + ipvlan->dev = dev; + ipvlan->port = port; + ipvlan->sfeatures = IPVLAN_FEATURES; + INIT_LIST_HEAD(&ipvlan->addrs); + ipvlan->ipv4cnt = 0; + ipvlan->ipv6cnt = 0; + + /* TODO Probably put random address here to be presented to the + * world but keep using the physical-dev address for the outgoing + * packets. + */ + memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN); + + dev->priv_flags |= IFF_IPVLAN_SLAVE; + + port->count += 1; + err = register_netdevice(dev); + if (err < 0) + goto ipvlan_destroy_port; + + err = netdev_upper_dev_link(phy_dev, dev); + if (err) + goto ipvlan_destroy_port; + + list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans); + netif_stacked_transfer_operstate(phy_dev, dev); + return 0; + +ipvlan_destroy_port: + port->count -= 1; + if (!port->count) + ipvlan_port_destroy(phy_dev); + + return err; +} + +static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_addr *addr, *next; + + if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { + list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { + ipvlan_ht_addr_del(addr, !dev->dismantle); + list_del_rcu(&addr->anode); + } + } + list_del_rcu(&ipvlan->pnode); + unregister_netdevice_queue(dev, head); + netdev_upper_dev_unlink(ipvlan->phy_dev, dev); +} + +static void ipvlan_link_setup(struct net_device *dev) +{ + ether_setup(dev); + + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags |= IFF_UNICAST_FLT; + dev->netdev_ops = &ipvlan_netdev_ops; + dev->destructor = free_netdev; + dev->header_ops = &ipvlan_header_ops; + dev->ethtool_ops = &ipvlan_ethtool_ops; + dev->tx_queue_len = 0; +} + +static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] = +{ + [IFLA_IPVLAN_MODE] = { .type = NLA_U16 }, +}; + +static struct rtnl_link_ops ipvlan_link_ops = { + .kind = "ipvlan", + .priv_size = sizeof(struct ipvl_dev), + + .get_size = ipvlan_nl_getsize, + .policy = ipvlan_nl_policy, + .validate = ipvlan_nl_validate, + .fill_info = ipvlan_nl_fillinfo, + .changelink = ipvlan_nl_changelink, + .maxtype = IFLA_IPVLAN_MAX, + + .setup = ipvlan_link_setup, + .newlink = ipvlan_link_new, + .dellink = ipvlan_link_delete, +}; + +static int ipvlan_link_register(struct rtnl_link_ops *ops) +{ + return rtnl_link_register(ops); +} + +static int ipvlan_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ipvl_dev *ipvlan, *next; + struct ipvl_port *port; + LIST_HEAD(lst_kill); + + if (!netif_is_ipvlan_port(dev)) + return NOTIFY_DONE; + + port = ipvlan_port_get_rtnl(dev); + + switch (event) { + case NETDEV_CHANGE: + list_for_each_entry(ipvlan, &port->ipvlans, pnode) + netif_stacked_transfer_operstate(ipvlan->phy_dev, + ipvlan->dev); + break; + + case NETDEV_UNREGISTER: + if (dev->reg_state != NETREG_UNREGISTERING) + break; + + list_for_each_entry_safe(ipvlan, next, &port->ipvlans, + pnode) + ipvlan->dev->rtnl_link_ops->dellink(ipvlan->dev, + &lst_kill); + unregister_netdevice_many(&lst_kill); + break; + + case NETDEV_FEAT_CHANGE: + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + ipvlan->dev->features = dev->features & IPVLAN_FEATURES; + ipvlan->dev->gso_max_size = dev->gso_max_size; + netdev_features_change(ipvlan->dev); + } + break; + + case NETDEV_CHANGEMTU: + list_for_each_entry(ipvlan, &port->ipvlans, pnode) + ipvlan_adjust_mtu(ipvlan, dev); + break; + + case NETDEV_PRE_TYPE_CHANGE: + /* Forbid underlying device to change its type. */ + return NOTIFY_BAD; + } + return NOTIFY_DONE; +} + +static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +{ + struct ipvl_addr *addr; + + if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) { + netif_err(ipvlan, ifup, ipvlan->dev, + "Failed to add IPv6=%pI6c addr for %s intf\n", + ip6_addr, ipvlan->dev->name); + return -EINVAL; + } + addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC); + if (!addr) + return -ENOMEM; + + addr->master = ipvlan; + memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); + addr->atype = IPVL_IPV6; + list_add_tail_rcu(&addr->anode, &ipvlan->addrs); + ipvlan->ipv6cnt++; + ipvlan_ht_addr_add(ipvlan, addr); + + return 0; +} + +static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +{ + struct ipvl_addr *addr; + + addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true); + if (!addr) + return; + + ipvlan_ht_addr_del(addr, true); + list_del_rcu(&addr->anode); + ipvlan->ipv6cnt--; + WARN_ON(ipvlan->ipv6cnt < 0); + kfree_rcu(addr, rcu); + + return; +} + +static int ipvlan_addr6_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *if6 = (struct inet6_ifaddr *)ptr; + struct net_device *dev = (struct net_device *)if6->idev->dev; + struct ipvl_dev *ipvlan = netdev_priv(dev); + + if (!netif_is_ipvlan(dev)) + return NOTIFY_DONE; + + if (!ipvlan || !ipvlan->port) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UP: + if (ipvlan_add_addr6(ipvlan, &if6->addr)) + return NOTIFY_BAD; + break; + + case NETDEV_DOWN: + ipvlan_del_addr6(ipvlan, &if6->addr); + break; + } + + return NOTIFY_OK; +} + +static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) +{ + struct ipvl_addr *addr; + + if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) { + netif_err(ipvlan, ifup, ipvlan->dev, + "Failed to add IPv4=%pI4 on %s intf.\n", + ip4_addr, ipvlan->dev->name); + return -EINVAL; + } + addr = kzalloc(sizeof(struct ipvl_addr), GFP_KERNEL); + if (!addr) + return -ENOMEM; + + addr->master = ipvlan; + memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); + addr->atype = IPVL_IPV4; + list_add_tail_rcu(&addr->anode, &ipvlan->addrs); + ipvlan->ipv4cnt++; + ipvlan_ht_addr_add(ipvlan, addr); + ipvlan_set_broadcast_mac_filter(ipvlan, true); + + return 0; +} + +static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) +{ + struct ipvl_addr *addr; + + addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false); + if (!addr) + return; + + ipvlan_ht_addr_del(addr, true); + list_del_rcu(&addr->anode); + ipvlan->ipv4cnt--; + WARN_ON(ipvlan->ipv4cnt < 0); + if (!ipvlan->ipv4cnt) + ipvlan_set_broadcast_mac_filter(ipvlan, false); + kfree_rcu(addr, rcu); + + return; +} + +static int ipvlan_addr4_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in_ifaddr *if4 = (struct in_ifaddr *)ptr; + struct net_device *dev = (struct net_device *)if4->ifa_dev->dev; + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct in_addr ip4_addr; + + if (!netif_is_ipvlan(dev)) + return NOTIFY_DONE; + + if (!ipvlan || !ipvlan->port) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UP: + ip4_addr.s_addr = if4->ifa_address; + if (ipvlan_add_addr4(ipvlan, &ip4_addr)) + return NOTIFY_BAD; + break; + + case NETDEV_DOWN: + ip4_addr.s_addr = if4->ifa_address; + ipvlan_del_addr4(ipvlan, &ip4_addr); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = { + .notifier_call = ipvlan_addr4_event, +}; + +static struct notifier_block ipvlan_notifier_block __read_mostly = { + .notifier_call = ipvlan_device_event, +}; + +static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = { + .notifier_call = ipvlan_addr6_event, +}; + +static int __init ipvlan_init_module(void) +{ + int err; + + ipvlan_init_secret(); + register_netdevice_notifier(&ipvlan_notifier_block); + register_inet6addr_notifier(&ipvlan_addr6_notifier_block); + register_inetaddr_notifier(&ipvlan_addr4_notifier_block); + + err = ipvlan_link_register(&ipvlan_link_ops); + if (err < 0) + goto error; + + return 0; +error: + unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); + unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block); + unregister_netdevice_notifier(&ipvlan_notifier_block); + return err; +} + +static void __exit ipvlan_cleanup_module(void) +{ + rtnl_link_unregister(&ipvlan_link_ops); + unregister_netdevice_notifier(&ipvlan_notifier_block); + unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); + unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block); +} + +module_init(ipvlan_init_module); +module_exit(ipvlan_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>"); +MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs"); +MODULE_ALIAS_RTNL_LINK("ipvlan"); diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c index 8ff084f1d236..e8917511e1aa 100644 --- a/drivers/net/irda/act200l-sir.c +++ b/drivers/net/irda/act200l-sir.c @@ -107,8 +107,6 @@ static int act200l_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Power on the dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); @@ -124,8 +122,6 @@ static int act200l_open(struct sir_dev *dev) static int act200l_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Power off the dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -143,8 +139,6 @@ static int act200l_change_speed(struct sir_dev *dev, unsigned speed) u8 control[3]; int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Clear DTR and set RTS to enter command mode */ sirdev_set_dtr_rts(dev, FALSE, TRUE); @@ -212,8 +206,6 @@ static int act200l_reset(struct sir_dev *dev) }; int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__ ); - switch (state) { case SIRDEV_STATE_DONGLE_RESET: /* Reset the dongle : set RTS low for 25 ms */ @@ -240,7 +232,8 @@ static int act200l_reset(struct sir_dev *dev) dev->speed = 9600; break; default: - IRDA_ERROR("%s(), unknown state %d\n", __func__, state); + net_err_ratelimited("%s(), unknown state %d\n", + __func__, state); ret = -1; break; } diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c index 50b2141a6103..e224b8b99517 100644 --- a/drivers/net/irda/actisys-sir.c +++ b/drivers/net/irda/actisys-sir.c @@ -165,8 +165,7 @@ static int actisys_change_speed(struct sir_dev *dev, unsigned speed) int ret = 0; int i = 0; - IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __func__, - speed, dev->speed); + pr_debug("%s(), speed=%d (was %d)\n", __func__, speed, dev->speed); /* dongle was already resetted from irda_request state machine, * we are in known state (dongle default) diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index befa45f809c3..588680a72fa1 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -154,12 +154,10 @@ static int __init ali_ircc_init(void) int reg, revision; int i = 0; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); - ret = platform_driver_register(&ali_ircc_driver); if (ret) { - IRDA_ERROR("%s, Can't register driver!\n", - ALI_IRCC_DRIVER_NAME); + net_err_ratelimited("%s, Can't register driver!\n", + ALI_IRCC_DRIVER_NAME); return ret; } @@ -168,7 +166,7 @@ static int __init ali_ircc_init(void) /* Probe for all the ALi chipsets we know about */ for (chip= chips; chip->name; chip++, i++) { - IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name); + pr_debug("%s(), Probing for %s ...\n", __func__, chip->name); /* Try all config registers for this chip */ for (cfg=0; cfg<2; cfg++) @@ -198,12 +196,13 @@ static int __init ali_ircc_init(void) if (reg == chip->cid_value) { - IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base); + pr_debug("%s(), Chip found at 0x%03x\n", + __func__, cfg_base); outb(0x1F, cfg_base); revision = inb(cfg_base+1); - IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__, - chip->name, revision); + pr_debug("%s(), Found %s chip, revision=%d\n", + __func__, chip->name, revision); /* * If the user supplies the base address, then @@ -225,15 +224,14 @@ static int __init ali_ircc_init(void) } else { - IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base); + pr_debug("%s(), No %s chip at 0x%03x\n", + __func__, chip->name, cfg_base); } /* Exit configuration */ outb(0xbb, cfg_base); } } - IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); - if (ret) platform_driver_unregister(&ali_ircc_driver); @@ -250,8 +248,6 @@ static void __exit ali_ircc_cleanup(void) { int i; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); - for (i=0; i < ARRAY_SIZE(dev_self); i++) { if (dev_self[i]) ali_ircc_close(dev_self[i]); @@ -259,7 +255,6 @@ static void __exit ali_ircc_cleanup(void) platform_driver_unregister(&ali_ircc_driver); - IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); } static const struct net_device_ops ali_ircc_sir_ops = { @@ -289,11 +284,9 @@ static int ali_ircc_open(int i, chipio_t *info) int dongle_id; int err; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); - if (i >= ARRAY_SIZE(dev_self)) { - IRDA_ERROR("%s(), maximum number of supported chips reached!\n", - __func__); + net_err_ratelimited("%s(), maximum number of supported chips reached!\n", + __func__); return -ENOMEM; } @@ -303,8 +296,8 @@ static int ali_ircc_open(int i, chipio_t *info) dev = alloc_irdadev(sizeof(*self)); if (dev == NULL) { - IRDA_ERROR("%s(), can't allocate memory for control block!\n", - __func__); + net_err_ratelimited("%s(), can't allocate memory for control block!\n", + __func__); return -ENOMEM; } @@ -328,8 +321,8 @@ static int ali_ircc_open(int i, chipio_t *info) /* Reserve the ioports that we need */ if (!request_region(self->io.fir_base, self->io.fir_ext, ALI_IRCC_DRIVER_NAME)) { - IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__, - self->io.fir_base); + net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n", + __func__, self->io.fir_base); err = -ENODEV; goto err_out1; } @@ -380,19 +373,20 @@ static int ali_ircc_open(int i, chipio_t *info) err = register_netdev(dev); if (err) { - IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); + net_err_ratelimited("%s(), register_netdev() failed!\n", + __func__); goto err_out4; } - IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); + net_info_ratelimited("IrDA: Registered device %s\n", dev->name); /* Check dongle id */ dongle_id = ali_ircc_read_dongle_id(i, info); - IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__, - ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]); + net_info_ratelimited("%s(), %s, Found dongle: %s\n", + __func__, ALI_IRCC_DRIVER_NAME, + dongle_types[dongle_id]); self->io.dongle_id = dongle_id; - IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); return 0; @@ -421,8 +415,6 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self) { int iobase; - IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); iobase = self->io.fir_base; @@ -431,7 +423,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self) unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ - IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base); + pr_debug("%s(), Releasing Region %03x\n", __func__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) @@ -445,7 +437,6 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self) dev_self[self->index] = NULL; free_netdev(self->netdev); - IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); return 0; } @@ -488,7 +479,6 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info) int cfg_base = info->cfg_base; int hi, low, reg; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); /* Enter Configuration */ outb(chip->entr1, cfg_base); @@ -507,13 +497,13 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info) info->sir_base = info->fir_base; - IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base); + pr_debug("%s(), probing fir_base=0x%03x\n", __func__, info->fir_base); /* Read IRQ control register */ outb(0x70, cfg_base); reg = inb(cfg_base+1); info->irq = reg & 0x0f; - IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq); + pr_debug("%s(), probing irq=%d\n", __func__, info->irq); /* Read DMA channel */ outb(0x74, cfg_base); @@ -521,26 +511,26 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info) info->dma = reg & 0x07; if(info->dma == 0x04) - IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__); + net_warn_ratelimited("%s(), No DMA channel assigned !\n", + __func__); else - IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma); + pr_debug("%s(), probing dma=%d\n", __func__, info->dma); /* Read Enabled Status */ outb(0x30, cfg_base); reg = inb(cfg_base+1); info->enabled = (reg & 0x80) && (reg & 0x01); - IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled); + pr_debug("%s(), probing enabled=%d\n", __func__, info->enabled); /* Read Power Status */ outb(0x22, cfg_base); reg = inb(cfg_base+1); info->suspended = (reg & 0x20); - IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended); + pr_debug("%s(), probing suspended=%d\n", __func__, info->suspended); /* Exit configuration */ outb(0xbb, cfg_base); - IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); return 0; } @@ -558,7 +548,6 @@ static int ali_ircc_setup(chipio_t *info) int version; int iobase = info->fir_base; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); /* Locking comments : * Most operations here need to be protected. We are called before @@ -578,8 +567,8 @@ static int ali_ircc_setup(chipio_t *info) /* Should be 0x00 in the M1535/M1535D */ if(version != 0x00) { - IRDA_ERROR("%s, Wrong chip version %02x\n", - ALI_IRCC_DRIVER_NAME, version); + net_err_ratelimited("%s, Wrong chip version %02x\n", + ALI_IRCC_DRIVER_NAME, version); return -1; } @@ -612,14 +601,13 @@ static int ali_ircc_setup(chipio_t *info) /* Switch to SIR space */ FIR2SIR(iobase); - IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n", - ALI_IRCC_DRIVER_NAME); + net_info_ratelimited("%s, driver loaded (Benjamin Kong)\n", + ALI_IRCC_DRIVER_NAME); /* Enable receive interrupts */ // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM // Turn on the interrupts in ali_ircc_net_open - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); return 0; } @@ -636,7 +624,6 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info) int dongle_id, reg; int cfg_base = info->cfg_base; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); /* Enter Configuration */ outb(chips[i].entr1, cfg_base); @@ -650,13 +637,12 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info) outb(0xf0, cfg_base); reg = inb(cfg_base+1); dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01); - IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__, - dongle_id, dongle_types[dongle_id]); + pr_debug("%s(), probing dongle_id=%d, dongle_types=%s\n", + __func__, dongle_id, dongle_types[dongle_id]); /* Exit configuration */ outb(0xbb, cfg_base); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); return dongle_id; } @@ -673,7 +659,6 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id) struct ali_ircc_cb *self; int ret; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); self = netdev_priv(dev); @@ -687,7 +672,6 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id) spin_unlock(&self->lock); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); return ret; } /* @@ -701,7 +685,6 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) __u8 eir, OldMessageCount; int iobase, tmp; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__); iobase = self->io.fir_base; @@ -714,10 +697,10 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM eir = self->InterruptID & self->ier; /* Mask out the interesting ones */ - IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID); - IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus); - IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier); - IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir); + pr_debug("%s(), self->InterruptID = %x\n", __func__, self->InterruptID); + pr_debug("%s(), self->LineStatus = %x\n", __func__, self->LineStatus); + pr_debug("%s(), self->ier = %x\n", __func__, self->ier); + pr_debug("%s(), eir = %x\n", __func__, eir); /* Disable interrupts */ SetCOMInterrupts(self, FALSE); @@ -728,7 +711,8 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) { if (self->io.direction == IO_XMIT) /* TX */ { - IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__); + pr_debug("%s(), ******* IIR_EOM (Tx) *******\n", + __func__); if(ali_ircc_dma_xmit_complete(self)) { @@ -747,23 +731,27 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) } else /* RX */ { - IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__); + pr_debug("%s(), ******* IIR_EOM (Rx) *******\n", + __func__); if(OldMessageCount > ((self->LineStatus+1) & 0x07)) { self->rcvFramesOverflow = TRUE; - IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__); + pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE ********\n", + __func__); } if (ali_ircc_dma_receive_complete(self)) { - IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__); + pr_debug("%s(), ******* receive complete ********\n", + __func__); self->ier = IER_EOM; } else { - IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__); + pr_debug("%s(), ******* Not receive complete ********\n", + __func__); self->ier = IER_EOM | IER_TIMER; } @@ -776,7 +764,8 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) if(OldMessageCount > ((self->LineStatus+1) & 0x07)) { self->rcvFramesOverflow = TRUE; - IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__); + pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE *******\n", + __func__); } /* Disable Timer */ switch_bank(iobase, BANK1); @@ -808,7 +797,6 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) /* Restore Interrupt */ SetCOMInterrupts(self, TRUE); - IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__); return IRQ_RETVAL(eir); } @@ -823,7 +811,6 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self) int iobase; int iir, lsr; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); iobase = self->io.sir_base; @@ -832,13 +819,13 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self) /* Clear interrupt */ lsr = inb(iobase+UART_LSR); - IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__, - iir, lsr, iobase); + pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n", + __func__, iir, lsr, iobase); switch (iir) { case UART_IIR_RLSI: - IRDA_DEBUG(2, "%s(), RLSI\n", __func__); + pr_debug("%s(), RLSI\n", __func__); break; case UART_IIR_RDI: /* Receive interrupt */ @@ -852,15 +839,14 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self) } break; default: - IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir); + pr_debug("%s(), unhandled IIR=%#x\n", + __func__, iir); break; } } - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); - return IRQ_RETVAL(iir); } @@ -876,7 +862,6 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self) int boguscount = 0; int iobase; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); IRDA_ASSERT(self != NULL, return;); iobase = self->io.sir_base; @@ -891,12 +876,11 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self) /* Make sure we don't stay here too long */ if (boguscount++ > 32) { - IRDA_DEBUG(2,"%s(), breaking!\n", __func__); + pr_debug("%s(), breaking!\n", __func__); break; } } while (inb(iobase+UART_LSR) & UART_LSR_DR); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } /* @@ -913,7 +897,6 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self) IRDA_ASSERT(self != NULL, return;); - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); iobase = self->io.sir_base; @@ -932,16 +915,18 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self) { /* We must wait until all data are gone */ while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT)) - IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ ); + pr_debug("%s(), UART_LSR_THRE\n", __func__); - IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed); + pr_debug("%s(), Changing speed! self->new_speed = %d\n", + __func__, self->new_speed); ali_ircc_change_speed(self, self->new_speed); self->new_speed = 0; // benjamin 2000/11/10 06:32PM if (self->io.speed > 115200) { - IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ ); + pr_debug("%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", + __func__); self->ier = IER_EOM; // SetCOMInterrupts(self, TRUE); @@ -959,7 +944,6 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self) outb(UART_IER_RDI, iobase+UART_IER); } - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) @@ -967,9 +951,8 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) struct net_device *dev = self->netdev; int iobase; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); - IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud); + pr_debug("%s(), setting speed = %d\n", __func__, baud); /* This function *must* be called with irq off and spin-lock. * - Jean II */ @@ -1008,7 +991,6 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) netif_wake_queue(self->netdev); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) @@ -1018,14 +1000,14 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) struct ali_ircc_cb *self = priv; struct net_device *dev; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(self != NULL, return;); dev = self->netdev; iobase = self->io.fir_base; - IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud); + pr_debug("%s(), self->io.speed = %d, change to speed = %d\n", + __func__, self->io.speed, baud); /* Come from SIR speed */ if(self->io.speed <=115200) @@ -1039,7 +1021,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) // Set Dongle Speed mode ali_ircc_change_dongle_speed(self, baud); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } /* @@ -1057,9 +1038,8 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) int lcr; /* Line control reg */ int divisor; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); - IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed); + pr_debug("%s(), Setting speed to: %d\n", __func__, speed); IRDA_ASSERT(self != NULL, return;); @@ -1113,7 +1093,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) spin_unlock_irqrestore(&self->lock, flags); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) @@ -1123,14 +1102,14 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) int iobase,dongle_id; int tmp = 0; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */ dongle_id = self->io.dongle_id; /* We are already locked, no need to do it again */ - IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed); + pr_debug("%s(), Set Speed for %s , Speed = %d\n", + __func__, dongle_types[dongle_id], speed); switch_bank(iobase, BANK2); tmp = inb(iobase+FIR_IRDA_CR); @@ -1294,7 +1273,6 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) switch_bank(iobase, BANK0); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } /* @@ -1307,11 +1285,10 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len) { int actual = 0; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); /* Tx FIFO should be empty! */ if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) { - IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ ); + pr_debug("%s(), failed, fifo not empty!\n", __func__); return 0; } @@ -1323,7 +1300,6 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len) actual++; } - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return actual; } @@ -1339,7 +1315,6 @@ static int ali_ircc_net_open(struct net_device *dev) int iobase; char hwname[32]; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); @@ -1352,9 +1327,8 @@ static int ali_ircc_net_open(struct net_device *dev) /* Request IRQ and install Interrupt Handler */ if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev)) { - IRDA_WARNING("%s, unable to allocate irq=%d\n", - ALI_IRCC_DRIVER_NAME, - self->io.irq); + net_warn_ratelimited("%s, unable to allocate irq=%d\n", + ALI_IRCC_DRIVER_NAME, self->io.irq); return -EAGAIN; } @@ -1363,9 +1337,8 @@ static int ali_ircc_net_open(struct net_device *dev) * failure. */ if (request_dma(self->io.dma, dev->name)) { - IRDA_WARNING("%s, unable to allocate dma=%d\n", - ALI_IRCC_DRIVER_NAME, - self->io.dma); + net_warn_ratelimited("%s, unable to allocate dma=%d\n", + ALI_IRCC_DRIVER_NAME, self->io.dma); free_irq(self->io.irq, dev); return -EAGAIN; } @@ -1385,7 +1358,6 @@ static int ali_ircc_net_open(struct net_device *dev) */ self->irlap = irlap_open(dev, &self->qos, hwname); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return 0; } @@ -1402,7 +1374,6 @@ static int ali_ircc_net_close(struct net_device *dev) struct ali_ircc_cb *self; //int iobase; - IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); @@ -1425,7 +1396,6 @@ static int ali_ircc_net_close(struct net_device *dev) free_irq(self->io.irq, dev); free_dma(self->io.dma); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return 0; } @@ -1445,7 +1415,6 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, __u32 speed; int mtt, diff; - IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); self = netdev_priv(dev); iobase = self->io.fir_base; @@ -1499,7 +1468,8 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, diff = self->now.tv_usec - self->stamp.tv_usec; /* self->stamp is set from ali_ircc_dma_receive_complete() */ - IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff); + pr_debug("%s(), ******* diff = %d *******\n", + __func__, diff); if (diff < 0) diff += 1000000; @@ -1521,7 +1491,8 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, /* Adjust for timer resolution */ mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */ - IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt); + pr_debug("%s(), ************** mtt = %d ***********\n", + __func__, mtt); /* Setup timer */ if (mtt == 1) /* 500 us */ @@ -1578,7 +1549,6 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return NETDEV_TX_OK; } @@ -1589,7 +1559,6 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self) unsigned char FIFO_OPTI, Hi, Lo; - IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); iobase = self->io.fir_base; @@ -1640,7 +1609,8 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self) tmp = inb(iobase+FIR_LCR_B); tmp &= ~0x20; // Disable SIP outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B); - IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B)); + pr_debug("%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", + __func__, inb(iobase + FIR_LCR_B)); outb(0, iobase+FIR_LSR); @@ -1650,7 +1620,6 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self) switch_bank(iobase, BANK0); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) @@ -1658,7 +1627,6 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) int iobase; int ret = TRUE; - IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); iobase = self->io.fir_base; @@ -1671,7 +1639,8 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT) { - IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__); + net_err_ratelimited("%s(), ********* LSR_FRAME_ABORT *********\n", + __func__); self->netdev->stats.tx_errors++; self->netdev->stats.tx_fifo_errors++; } @@ -1714,7 +1683,6 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) switch_bank(iobase, BANK0); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return ret; } @@ -1729,7 +1697,6 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self) { int iobase, tmp; - IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); iobase = self->io.fir_base; @@ -1767,7 +1734,8 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self) //switch_bank(iobase, BANK0); tmp = inb(iobase+FIR_LCR_B); outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM - IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B)); + pr_debug("%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", + __func__, inb(iobase + FIR_LCR_B)); /* Set Rx Threshold */ switch_bank(iobase, BANK1); @@ -1779,7 +1747,6 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self) outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR); switch_bank(iobase, BANK0); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return 0; } @@ -1790,8 +1757,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) __u8 status, MessageCount; int len, i, iobase, val; - IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); - st_fifo = &self->st_fifo; iobase = self->io.fir_base; @@ -1799,7 +1764,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) MessageCount = inb(iobase+ FIR_LSR)&0x07; if (MessageCount > 0) - IRDA_DEBUG(0, "%s(), Message count = %d,\n", __func__ , MessageCount); + pr_debug("%s(), Message count = %d\n", __func__, MessageCount); for (i=0; i<=MessageCount; i++) { @@ -1812,11 +1777,11 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) len = len << 8; len |= inb(iobase+FIR_RX_DSR_LO); - IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len); - IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status); + pr_debug("%s(), RX Length = 0x%.2x,\n", __func__ , len); + pr_debug("%s(), RX Status = 0x%.2x,\n", __func__ , status); if (st_fifo->tail >= MAX_RX_WINDOW) { - IRDA_DEBUG(0, "%s(), window is full!\n", __func__ ); + pr_debug("%s(), window is full!\n", __func__); continue; } @@ -1839,7 +1804,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) /* Check for errors */ if ((status & 0xd8) || self->rcvFramesOverflow || (len==0)) { - IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ ); + pr_debug("%s(), ************* RX Errors ************\n", + __func__); /* Skip frame */ self->netdev->stats.rx_errors++; @@ -1849,29 +1815,34 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) if (status & LSR_FIFO_UR) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ ); + pr_debug("%s(), ************* FIFO Errors ************\n", + __func__); } if (status & LSR_FRAME_ERROR) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ ); + pr_debug("%s(), ************* FRAME Errors ************\n", + __func__); } if (status & LSR_CRC_ERROR) { self->netdev->stats.rx_crc_errors++; - IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ ); + pr_debug("%s(), ************* CRC Errors ************\n", + __func__); } if(self->rcvFramesOverflow) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ ); + pr_debug("%s(), ************* Overran DMA buffer ************\n", + __func__); } if(len == 0) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ ); + pr_debug("%s(), ********** Receive Frame Size = 0 *********\n", + __func__); } } else @@ -1883,7 +1854,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) val = inb(iobase+FIR_BSR); if ((val& BSR_FIFO_NOT_EMPTY)== 0x80) { - IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ ); + pr_debug("%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", + __func__); /* Put this entry back in fifo */ st_fifo->head--; @@ -1918,9 +1890,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) skb = dev_alloc_skb(len+1); if (skb == NULL) { - IRDA_WARNING("%s(), memory squeeze, " - "dropping frame.\n", - __func__); self->netdev->stats.rx_dropped++; return FALSE; @@ -1947,7 +1916,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) switch_bank(iobase, BANK0); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return TRUE; } @@ -1967,7 +1935,6 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, int iobase; __u32 speed; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;); @@ -2016,7 +1983,6 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, dev_kfree_skb(skb); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return NETDEV_TX_OK; } @@ -2035,7 +2001,6 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) unsigned long flags; int ret = 0; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); @@ -2043,11 +2008,11 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ - IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ ); + pr_debug("%s(), SIOCSBANDWIDTH\n", __func__); /* * This function will also be used by IrLAP to change the * speed, so we still must allow for speed change within @@ -2061,13 +2026,13 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) spin_unlock_irqrestore(&self->lock, flags); break; case SIOCSMEDIABUSY: /* Set media busy */ - IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ ); + pr_debug("%s(), SIOCSMEDIABUSY\n", __func__); if (!capable(CAP_NET_ADMIN)) return -EPERM; irda_device_set_media_busy(self->netdev, TRUE); break; case SIOCGRECEIVING: /* Check if we are receiving right now */ - IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ ); + pr_debug("%s(), SIOCGRECEIVING\n", __func__); /* This is protected */ irq->ifr_receiving = ali_ircc_is_receiving(self); break; @@ -2075,7 +2040,6 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ret = -EOPNOTSUPP; } - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return ret; } @@ -2092,7 +2056,6 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self) int status = FALSE; int iobase; - IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ ); IRDA_ASSERT(self != NULL, return FALSE;); @@ -2106,7 +2069,8 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self) if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0) { /* We are receiving something */ - IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ ); + pr_debug("%s(), We are receiving something\n", + __func__); status = TRUE; } switch_bank(iobase, BANK0); @@ -2118,7 +2082,6 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self) spin_unlock_irqrestore(&self->lock, flags); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return status; } @@ -2127,7 +2090,7 @@ static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state) { struct ali_ircc_cb *self = platform_get_drvdata(dev); - IRDA_MESSAGE("%s, Suspending\n", ALI_IRCC_DRIVER_NAME); + net_info_ratelimited("%s, Suspending\n", ALI_IRCC_DRIVER_NAME); if (self->io.suspended) return 0; @@ -2148,7 +2111,7 @@ static int ali_ircc_resume(struct platform_device *dev) ali_ircc_net_open(self->netdev); - IRDA_MESSAGE("%s, Waking up\n", ALI_IRCC_DRIVER_NAME); + net_info_ratelimited("%s, Waking up\n", ALI_IRCC_DRIVER_NAME); self->io.suspended = 0; @@ -2164,7 +2127,8 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable) int iobase = self->io.fir_base; /* or sir_base */ - IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable); + pr_debug("%s(), -------- Start -------- ( Enable = %d )\n", + __func__, enable); /* Enable the interrupt which we wish to */ if (enable){ @@ -2205,14 +2169,12 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable) else outb(newMask, iobase+UART_IER); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } static void SIR2FIR(int iobase) { //unsigned char tmp; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); /* Already protected (change_speed() or setup()), no need to lock. * Jean II */ @@ -2228,14 +2190,12 @@ static void SIR2FIR(int iobase) //tmp |= 0x20; //outb(tmp, iobase+FIR_LCR_B); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } static void FIR2SIR(int iobase) { unsigned char val; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); /* Already protected (change_speed() or setup()), no need to lock. * Jean II */ @@ -2251,7 +2211,6 @@ static void FIR2SIR(int iobase) val = inb(iobase+UART_LSR); val = inb(iobase+UART_MSR); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>"); diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index a87a82ca111f..b337e6d23a88 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -232,7 +232,7 @@ char head=tete; for (i=0;i<len;i+=16) { for (j=0;j<16 && i+j<len;j++) { sprintf(&dump[3*j],"%02x.",data[i+j]); } dump [3*j]=0; - IRDA_DEBUG (2, "%c%s\n",head , dump); + pr_debug("%c%s\n", head, dump); head='+'; } } @@ -245,8 +245,6 @@ toshoboe_dumpregs (struct toshoboe_cb *self) { __u32 ringbase; - IRDA_DEBUG (4, "%s()\n", __func__); - ringbase = INB (OBOE_RING_BASE0) << 10; ringbase |= INB (OBOE_RING_BASE1) << 18; ringbase |= INB (OBOE_RING_BASE2) << 26; @@ -293,8 +291,6 @@ static void toshoboe_disablebm (struct toshoboe_cb *self) { __u8 command; - IRDA_DEBUG (4, "%s()\n", __func__); - pci_read_config_byte (self->pdev, PCI_COMMAND, &command); command &= ~PCI_COMMAND_MASTER; pci_write_config_byte (self->pdev, PCI_COMMAND, command); @@ -305,8 +301,6 @@ toshoboe_disablebm (struct toshoboe_cb *self) static void toshoboe_stopchip (struct toshoboe_cb *self) { - IRDA_DEBUG (4, "%s()\n", __func__); - /*Disable interrupts */ OUTB (0x0, OBOE_IER); /*Disable DMA, Disable Rx, Disable Tx */ @@ -350,7 +344,7 @@ toshoboe_setbaud (struct toshoboe_cb *self) __u16 pconfig = 0; __u8 config0l = 0; - IRDA_DEBUG (2, "%s(%d/%d)\n", __func__, self->speed, self->io.speed); + pr_debug("%s(%d/%d)\n", __func__, self->speed, self->io.speed); switch (self->speed) { @@ -482,7 +476,6 @@ toshoboe_setbaud (struct toshoboe_cb *self) static void toshoboe_enablebm (struct toshoboe_cb *self) { - IRDA_DEBUG (4, "%s()\n", __func__); pci_set_master (self->pdev); } @@ -492,8 +485,6 @@ toshoboe_initring (struct toshoboe_cb *self) { int i; - IRDA_DEBUG (4, "%s()\n", __func__); - for (i = 0; i < TX_SLOTS; ++i) { self->ring->tx[i].len = 0; @@ -550,8 +541,6 @@ toshoboe_startchip (struct toshoboe_cb *self) { __u32 physaddr; - IRDA_DEBUG (4, "%s()\n", __func__); - toshoboe_initring (self); toshoboe_enablebm (self); OUTBP (OBOE_CONFIG1_RESET, OBOE_CONFIG1); @@ -636,9 +625,8 @@ toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt) xbofs=xbofs/80000; /*Eight bits per byte, and mtt is in us*/ xbofs++; - IRDA_DEBUG (2, DRIVER_NAME - ": generated mtt of %d bytes for %d us at %d baud\n" - , xbofs,mtt,self->speed); + pr_debug(DRIVER_NAME ": generated mtt of %d bytes for %d us at %d baud\n", + xbofs, mtt, self->speed); if (xbofs > TX_LEN) { @@ -824,8 +812,6 @@ toshoboe_probe (struct toshoboe_cb *self) #endif unsigned long flags; - IRDA_DEBUG (4, "%s()\n", __func__); - if (request_irq (self->io.irq, toshoboe_probeinterrupt, self->io.irqflags, "toshoboe", (void *) self)) { @@ -983,10 +969,10 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev) IRDA_ASSERT (self != NULL, return NETDEV_TX_OK; ); - IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __func__ - ,skb->len,self->txpending,INB (OBOE_ENABLEH)); + pr_debug("%s.tx:%x(%x)%x\n", + __func__, skb->len, self->txpending, INB(OBOE_ENABLEH)); if (!cb->magic) { - IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __func__, cb->magic); + pr_debug("%s.Not IrLAP:%x\n", __func__, cb->magic); #ifdef DUMP_PACKETS _dumpbufs(skb->data,skb->len,'>'); #endif @@ -1012,8 +998,8 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev) if (self->txpending || skb->len) { self->new_speed = speed; - IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" , - __func__, speed); + pr_debug("%s: Queued TxDone scheduled speed change %d\n" , + __func__, speed); /* if no data, that's all! */ if (!skb->len) { @@ -1055,8 +1041,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev) /* which we will add a wrong checksum to */ mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt); - IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __func__ - ,skb->len,mtt,self->txpending); + pr_debug("%s.mtt:%x(%x)%d\n", __func__, skb->len, mtt, self->txpending); if (mtt) { self->ring->tx[self->txs].len = mtt & 0xfff; @@ -1099,8 +1084,9 @@ dumpbufs(skb->data,skb->len,'>'); if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS) { - IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __func__ - ,skb->len, self->ring->tx[self->txs].control, self->txpending); + pr_debug("%s.ful:%x(%x)%x\n", + __func__, skb->len, self->ring->tx[self->txs].control, + self->txpending); toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX); spin_unlock_irqrestore(&self->spinlock, flags); return NETDEV_TX_BUSY; @@ -1177,8 +1163,7 @@ toshoboe_interrupt (int irq, void *dev_id) if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS) self->txpending++; } - IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __func__ - ,irqstat,txp,self->txpending); + pr_debug("%s.txd(%x)%x/%x\n", __func__, irqstat, txp, self->txpending); txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK; @@ -1206,8 +1191,8 @@ toshoboe_interrupt (int irq, void *dev_id) if ((!self->txpending) && (self->new_speed)) { self->speed = self->new_speed; - IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n", - __func__, self->speed); + pr_debug("%s: Executed TxDone scheduled speed change %d\n", + __func__, self->speed); toshoboe_setbaud (self); } @@ -1222,8 +1207,8 @@ toshoboe_interrupt (int irq, void *dev_id) { int len = self->ring->rx[self->rxs].len; skb = NULL; - IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __func__ - ,len,self->ring->rx[self->rxs].control); + pr_debug("%s.rcv:%x(%x)\n", __func__ + , len, self->ring->rx[self->rxs].control); #ifdef DUMP_PACKETS dumpbufs(self->rx_bufs[self->rxs],len,'<'); @@ -1244,7 +1229,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); len -= 2; else len = 0; - IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __func__, len,enable); + pr_debug("%s.SIR:%x(%x)\n", __func__, len, enable); } #ifdef USE_MIR @@ -1254,7 +1239,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); len -= 2; else len = 0; - IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __func__, len,enable); + pr_debug("%s.MIR:%x(%x)\n", __func__, len, enable); } #endif else if (enable & OBOE_ENABLEH_FIRON) @@ -1263,10 +1248,10 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); len -= 4; /*FIXME: check this */ else len = 0; - IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __func__, len,enable); + pr_debug("%s.FIR:%x(%x)\n", __func__, len, enable); } else - IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __func__, len,enable); + pr_debug("%s.?IR:%x(%x)\n", __func__, len, enable); if (len) { @@ -1299,8 +1284,8 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); /* (SIR) data is splitted in several slots. */ /* we have to join all the received buffers received */ /*in a large buffer before checking CRC. */ - IRDA_DEBUG (0, "%s.err:%x(%x)\n", __func__ - ,len,self->ring->rx[self->rxs].control); + pr_debug("%s.err:%x(%x)\n", __func__ + , len, self->ring->rx[self->rxs].control); } self->ring->rx[self->rxs].len = 0x0; @@ -1327,8 +1312,8 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); if (irqstat & OBOE_INT_SIP) { self->int_sip++; - IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __func__ - ,self->int_sip,irqstat,self->txpending); + pr_debug("%s.sip:%x(%x)%x\n", + __func__, self->int_sip, irqstat, self->txpending); } return IRQ_HANDLED; } @@ -1341,8 +1326,6 @@ toshoboe_net_open (struct net_device *dev) unsigned long flags; int rc; - IRDA_DEBUG (4, "%s()\n", __func__); - self = netdev_priv(dev); if (self->async) @@ -1379,8 +1362,6 @@ toshoboe_net_close (struct net_device *dev) { struct toshoboe_cb *self; - IRDA_DEBUG (4, "%s()\n", __func__); - IRDA_ASSERT (dev != NULL, return -1; ); self = netdev_priv(dev); @@ -1424,7 +1405,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) IRDA_ASSERT (self != NULL, return -1; ); - IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); /* Disable interrupts & save flags */ spin_lock_irqsave(&self->spinlock, flags); @@ -1436,8 +1417,8 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) * speed, so we still must allow for speed change within * interrupt context. */ - IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __func__ - ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate ); + pr_debug("%s(BANDWIDTH), %s, (%X/%ld\n", + __func__, dev->name, INB(OBOE_STATUS), irq->ifr_baudrate); if (!in_interrupt () && !capable (CAP_NET_ADMIN)) { ret = -EPERM; goto out; @@ -1449,8 +1430,9 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) self->new_speed = irq->ifr_baudrate; break; case SIOCSMEDIABUSY: /* Set media busy */ - IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __func__ - ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) ); + pr_debug("%s(MEDIABUSY), %s, (%X/%x)\n", + __func__, dev->name, + INB(OBOE_STATUS), capable(CAP_NET_ADMIN)); if (!capable (CAP_NET_ADMIN)) { ret = -EPERM; goto out; @@ -1459,11 +1441,11 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) break; case SIOCGRECEIVING: /* Check if we are receiving right now */ irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0; - IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __func__ - ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving ); + pr_debug("%s(RECEIVING), %s, (%X/%x)\n", + __func__, dev->name, INB(OBOE_STATUS), irq->ifr_receiving); break; default: - IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); + pr_debug("%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); ret = -EOPNOTSUPP; } out: @@ -1490,8 +1472,6 @@ toshoboe_close (struct pci_dev *pci_dev) int i; struct toshoboe_cb *self = pci_get_drvdata(pci_dev); - IRDA_DEBUG (4, "%s()\n", __func__); - IRDA_ASSERT (self != NULL, return; ); if (!self->stopped) @@ -1538,8 +1518,6 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid) int ok = 0; int err; - IRDA_DEBUG (4, "%s()\n", __func__); - if ((err=pci_enable_device(pci_dev))) return err; @@ -1700,8 +1678,6 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap) unsigned long flags; int i = 10; - IRDA_DEBUG (4, "%s()\n", __func__); - if (!self || self->stopped) return 0; @@ -1728,8 +1704,6 @@ toshoboe_wakeup (struct pci_dev *pci_dev) struct toshoboe_cb *self = pci_get_drvdata(pci_dev); unsigned long flags; - IRDA_DEBUG (4, "%s()\n", __func__); - if (!self || !self->stopped) return 0; diff --git a/drivers/net/irda/girbil-sir.c b/drivers/net/irda/girbil-sir.c index 96cdecff349d..7e0a5b8c6d53 100644 --- a/drivers/net/irda/girbil-sir.c +++ b/drivers/net/irda/girbil-sir.c @@ -86,8 +86,6 @@ static int girbil_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power on dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); @@ -102,8 +100,6 @@ static int girbil_open(struct sir_dev *dev) static int girbil_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -126,8 +122,6 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed) u8 control[2]; static int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - /* dongle alread reset - port and dongle at default speed */ switch(state) { @@ -179,7 +173,8 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed) break; default: - IRDA_ERROR("%s - undefined state %d\n", __func__, state); + net_err_ratelimited("%s - undefined state %d\n", + __func__, state); ret = -EINVAL; break; } @@ -209,8 +204,6 @@ static int girbil_reset(struct sir_dev *dev) u8 control = GIRBIL_TXEN | GIRBIL_RXEN; int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - switch (state) { case SIRDEV_STATE_DONGLE_RESET: /* Reset dongle */ @@ -241,7 +234,8 @@ static int girbil_reset(struct sir_dev *dev) break; default: - IRDA_ERROR("%s(), undefined state %d\n", __func__, state); + net_err_ratelimited("%s(), undefined state %d\n", + __func__, state); ret = -1; break; } diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 925b78cc9797..48b2f9a321b7 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -176,12 +176,13 @@ static void irda_usb_build_header(struct irda_usb_cb *self, (!force) && (self->speed != -1)) { /* No speed and xbofs change here * (we'll do it later in the write callback) */ - IRDA_DEBUG(2, "%s(), not changing speed yet\n", __func__); + pr_debug("%s(), not changing speed yet\n", __func__); *header = 0; return; } - IRDA_DEBUG(2, "%s(), changing speed to %d\n", __func__, self->new_speed); + pr_debug("%s(), changing speed to %d\n", + __func__, self->new_speed); self->speed = self->new_speed; /* We will do ` self->new_speed = -1; ' in the completion * handler just in case the current URB fail - Jean II */ @@ -227,7 +228,8 @@ static void irda_usb_build_header(struct irda_usb_cb *self, /* Set the negotiated additional XBOFS */ if (self->new_xbofs != -1) { - IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __func__, self->new_xbofs); + pr_debug("%s(), changing xbofs to %d\n", + __func__, self->new_xbofs); self->xbofs = self->new_xbofs; /* We will do ` self->new_xbofs = -1; ' in the completion * handler just in case the current URB fail - Jean II */ @@ -301,13 +303,13 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self) struct urb *urb; int ret; - IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __func__, - self->new_speed, self->new_xbofs); + pr_debug("%s(), speed=%d, xbofs=%d\n", __func__, + self->new_speed, self->new_xbofs); /* Grab the speed URB */ urb = self->speed_urb; if (urb->status != 0) { - IRDA_WARNING("%s(), URB still in use!\n", __func__); + net_warn_ratelimited("%s(), URB still in use!\n", __func__); return; } @@ -333,7 +335,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self) /* Irq disabled -> GFP_ATOMIC */ if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) { - IRDA_WARNING("%s(), failed Speed URB\n", __func__); + net_warn_ratelimited("%s(), failed Speed URB\n", __func__); } } @@ -346,8 +348,6 @@ static void speed_bulk_callback(struct urb *urb) { struct irda_usb_cb *self = urb->context; - IRDA_DEBUG(2, "%s()\n", __func__); - /* We should always have a context */ IRDA_ASSERT(self != NULL, return;); /* We should always be called for the speed URB */ @@ -356,7 +356,8 @@ static void speed_bulk_callback(struct urb *urb) /* Check for timeout and other USB nasties */ if (urb->status != 0) { /* I get a lot of -ECONNABORTED = -103 here - Jean II */ - IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags); + pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n", + __func__, urb->status, urb->transfer_flags); /* Don't do anything here, that might confuse the USB layer. * Instead, we will wait for irda_usb_net_timeout(), the @@ -391,7 +392,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb, s16 xbofs; int res, mtt; - IRDA_DEBUG(4, "%s() on %s\n", __func__, netdev->name); + pr_debug("%s() on %s\n", __func__, netdev->name); netif_stop_queue(netdev); @@ -402,7 +403,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb, * We need to check self->present under the spinlock because * of irda_usb_disconnect() is synchronous - Jean II */ if (!self->present) { - IRDA_DEBUG(0, "%s(), Device is gone...\n", __func__); + pr_debug("%s(), Device is gone...\n", __func__); goto drop; } @@ -435,7 +436,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb, } if (urb->status != 0) { - IRDA_WARNING("%s(), URB still in use!\n", __func__); + net_warn_ratelimited("%s(), URB still in use!\n", __func__); goto drop; } @@ -522,7 +523,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb, /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */ if ((res = usb_submit_urb(urb, GFP_ATOMIC))) { - IRDA_WARNING("%s(), failed Tx URB\n", __func__); + net_warn_ratelimited("%s(), failed Tx URB\n", __func__); netdev->stats.tx_errors++; /* Let USB recover : We will catch that in the watchdog */ /*netif_start_queue(netdev);*/ @@ -554,8 +555,6 @@ static void write_bulk_callback(struct urb *urb) struct sk_buff *skb = urb->context; struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context; - IRDA_DEBUG(2, "%s()\n", __func__); - /* We should always have a context */ IRDA_ASSERT(self != NULL, return;); /* We should always be called for the speed URB */ @@ -568,7 +567,8 @@ static void write_bulk_callback(struct urb *urb) /* Check for timeout and other USB nasties */ if (urb->status != 0) { /* I get a lot of -ECONNABORTED = -103 here - Jean II */ - IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags); + pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n", + __func__, urb->status, urb->transfer_flags); /* Don't do anything here, that might confuse the USB layer, * and we could go in recursion and blow the kernel stack... @@ -587,7 +587,7 @@ static void write_bulk_callback(struct urb *urb) /* If the network is closed, stop everything */ if ((!self->netopen) || (!self->present)) { - IRDA_DEBUG(0, "%s(), Network is gone...\n", __func__); + pr_debug("%s(), Network is gone...\n", __func__); spin_unlock_irqrestore(&self->lock, flags); return; } @@ -598,7 +598,7 @@ static void write_bulk_callback(struct urb *urb) (self->new_xbofs != self->xbofs)) { /* We haven't changed speed yet (because of * IUC_SPEED_BUG), so do it now - Jean II */ - IRDA_DEBUG(1, "%s(), Changing speed now...\n", __func__); + pr_debug("%s(), Changing speed now...\n", __func__); irda_usb_change_speed_xbofs(self); } else { /* New speed and xbof is now committed in hardware */ @@ -630,7 +630,7 @@ static void irda_usb_net_timeout(struct net_device *netdev) struct urb *urb; int done = 0; /* If we have made any progress */ - IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __func__); + pr_debug("%s(), Network layer thinks we timed out!\n", __func__); IRDA_ASSERT(self != NULL, return;); /* Protect us from USB callbacks, net Tx and else. */ @@ -638,7 +638,7 @@ static void irda_usb_net_timeout(struct net_device *netdev) /* self->present *MUST* be read under spinlock */ if (!self->present) { - IRDA_WARNING("%s(), device not present!\n", __func__); + net_warn_ratelimited("%s(), device not present!\n", __func__); netif_stop_queue(netdev); spin_unlock_irqrestore(&self->lock, flags); return; @@ -647,7 +647,8 @@ static void irda_usb_net_timeout(struct net_device *netdev) /* Check speed URB */ urb = self->speed_urb; if (urb->status != 0) { - IRDA_DEBUG(0, "%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags); + pr_debug("%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", + netdev->name, urb->status, urb->transfer_flags); switch (urb->status) { case -EINPROGRESS: @@ -672,7 +673,8 @@ static void irda_usb_net_timeout(struct net_device *netdev) if (urb->status != 0) { struct sk_buff *skb = urb->context; - IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags); + pr_debug("%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", + netdev->name, urb->status, urb->transfer_flags); /* Increase error count */ netdev->stats.tx_errors++; @@ -761,8 +763,6 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc struct irda_skb_cb *cb; int ret; - IRDA_DEBUG(2, "%s()\n", __func__); - /* This should never happen */ IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(urb != NULL, return;); @@ -783,8 +783,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc if (ret) { /* If this ever happen, we are in deep s***. * Basically, the Rx path will stop... */ - IRDA_WARNING("%s(), Failed to submit Rx URB %d\n", - __func__, ret); + net_warn_ratelimited("%s(), Failed to submit Rx URB %d\n", + __func__, ret); } } @@ -805,7 +805,7 @@ static void irda_usb_receive(struct urb *urb) struct urb *next_urb; unsigned int len, docopy; - IRDA_DEBUG(2, "%s(), len=%d\n", __func__, urb->actual_length); + pr_debug("%s(), len=%d\n", __func__, urb->actual_length); /* Find ourselves */ cb = (struct irda_skb_cb *) skb->cb; @@ -815,7 +815,7 @@ static void irda_usb_receive(struct urb *urb) /* If the network is closed or the device gone, stop everything */ if ((!self->netopen) || (!self->present)) { - IRDA_DEBUG(0, "%s(), Network is gone!\n", __func__); + pr_debug("%s(), Network is gone!\n", __func__); /* Don't re-submit the URB : will stall the Rx path */ return; } @@ -838,7 +838,8 @@ static void irda_usb_receive(struct urb *urb) /* Usually precursor to a hot-unplug on OHCI. */ default: self->netdev->stats.rx_errors++; - IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags); + pr_debug("%s(), RX status %d, transfer_flags 0x%04X\n", + __func__, urb->status, urb->transfer_flags); break; } /* If we received an error, we don't want to resubmit the @@ -859,7 +860,7 @@ static void irda_usb_receive(struct urb *urb) /* Check for empty frames */ if (urb->actual_length <= self->header_length) { - IRDA_WARNING("%s(), empty frame!\n", __func__); + net_warn_ratelimited("%s(), empty frame!\n", __func__); goto done; } @@ -964,8 +965,6 @@ static void irda_usb_rx_defer_expired(unsigned long data) struct irda_skb_cb *cb; struct urb *next_urb; - IRDA_DEBUG(2, "%s()\n", __func__); - /* Find ourselves */ cb = (struct irda_skb_cb *) skb->cb; IRDA_ASSERT(cb != NULL, return;); @@ -1049,8 +1048,8 @@ static int stir421x_fw_upload(struct irda_usb_cb *self, self->bulk_out_ep), patch_block, block_size, &actual_len, msecs_to_jiffies(500)); - IRDA_DEBUG(3,"%s(): Bulk send %u bytes, ret=%d\n", - __func__, actual_len, ret); + pr_debug("%s(): Bulk send %u bytes, ret=%d\n", + __func__, actual_len, ret); if (ret < 0) break; @@ -1088,8 +1087,8 @@ static int stir421x_patch_device(struct irda_usb_cb *self) return ret; /* We get a patch from userspace */ - IRDA_MESSAGE("%s(): Received firmware %s (%zu bytes)\n", - __func__, stir421x_fw_name, fw->size); + net_info_ratelimited("%s(): Received firmware %s (%zu bytes)\n", + __func__, stir421x_fw_name, fw->size); ret = -EINVAL; @@ -1112,8 +1111,8 @@ static int stir421x_patch_device(struct irda_usb_cb *self) + ((build / 10) << 4) + (build % 10); - IRDA_DEBUG(3, "%s(): Firmware Product version %ld\n", - __func__, fw_version); + pr_debug("%s(): Firmware Product version %ld\n", + __func__, fw_version); } } @@ -1169,8 +1168,6 @@ static int irda_usb_net_open(struct net_device *netdev) char hwname[16]; int i; - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(netdev != NULL, return -1;); self = netdev_priv(netdev); IRDA_ASSERT(self != NULL, return -1;); @@ -1179,13 +1176,13 @@ static int irda_usb_net_open(struct net_device *netdev) /* Can only open the device if it's there */ if(!self->present) { spin_unlock_irqrestore(&self->lock, flags); - IRDA_WARNING("%s(), device not present!\n", __func__); + net_warn_ratelimited("%s(), device not present!\n", __func__); return -1; } if(self->needspatch) { spin_unlock_irqrestore(&self->lock, flags); - IRDA_WARNING("%s(), device needs patch\n", __func__) ; + net_warn_ratelimited("%s(), device needs patch\n", __func__); return -EIO ; } @@ -1227,8 +1224,6 @@ static int irda_usb_net_open(struct net_device *netdev) if (!skb) { /* If this ever happen, we are in deep s***. * Basically, we can't start the Rx path... */ - IRDA_WARNING("%s(), Failed to allocate Rx skb\n", - __func__); return -1; } //skb_reserve(newskb, USB_IRDA_HEADER - 1); @@ -1251,8 +1246,6 @@ static int irda_usb_net_close(struct net_device *netdev) struct irda_usb_cb *self; int i; - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(netdev != NULL, return -1;); self = netdev_priv(netdev); IRDA_ASSERT(self != NULL, return -1;); @@ -1306,7 +1299,7 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ @@ -1356,7 +1349,6 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self) { struct irda_class_desc *desc; - IRDA_DEBUG(3, "%s()\n", __func__); desc = self->irda_desc; @@ -1372,8 +1364,10 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self) self->qos.window_size.bits = desc->bmWindowSize; self->qos.data_size.bits = desc->bmDataSize; - IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n", - __func__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits); + pr_debug("%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n", + __func__, self->qos.baud_rate.bits, self->qos.data_size.bits, + self->qos.window_size.bits, self->qos.additional_bofs.bits, + self->qos.min_turn_time.bits); /* Don't always trust what the dongle tell us */ if(self->capability & IUC_SIR_ONLY) @@ -1416,8 +1410,6 @@ static inline int irda_usb_open(struct irda_usb_cb *self) { struct net_device *netdev = self->netdev; - IRDA_DEBUG(1, "%s()\n", __func__); - netdev->netdev_ops = &irda_usb_netdev_ops; irda_usb_init_qos(self); @@ -1432,8 +1424,6 @@ static inline int irda_usb_open(struct irda_usb_cb *self) */ static inline void irda_usb_close(struct irda_usb_cb *self) { - IRDA_DEBUG(1, "%s()\n", __func__); - /* Remove netdevice */ unregister_netdev(self->netdev); @@ -1505,13 +1495,15 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_ /* This is our interrupt endpoint */ self->bulk_int_ep = ep; } else { - IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __func__, ep); + net_err_ratelimited("%s(), Unrecognised endpoint %02X\n", + __func__, ep); } } } - IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n", - __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep); + pr_debug("%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n", + __func__, self->bulk_in_ep, self->bulk_out_ep, + self->bulk_out_mtu, self->bulk_int_ep); return (self->bulk_in_ep != 0) && (self->bulk_out_ep != 0); } @@ -1573,13 +1565,13 @@ static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interf 0, intf->altsetting->desc.bInterfaceNumber, desc, sizeof(*desc), 500); - IRDA_DEBUG(1, "%s(), ret=%d\n", __func__, ret); + pr_debug("%s(), ret=%d\n", __func__, ret); if (ret < sizeof(*desc)) { - IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n", - (ret<0) ? "failed" : "too short", ret); + net_warn_ratelimited("usb-irda: class_descriptor read %s (%d)\n", + ret < 0 ? "failed" : "too short", ret); } else if (desc->bDescriptorType != USB_DT_IRDA) { - IRDA_WARNING("usb-irda: bad class_descriptor type\n"); + net_warn_ratelimited("usb-irda: bad class_descriptor type\n"); } else { #ifdef IU_DUMP_CLASS_DESC @@ -1622,9 +1614,9 @@ static int irda_usb_probe(struct usb_interface *intf, * don't need to check if the dongle is really ours. * Jean II */ - IRDA_MESSAGE("IRDA-USB found at address %d, Vendor: %x, Product: %x\n", - dev->devnum, le16_to_cpu(dev->descriptor.idVendor), - le16_to_cpu(dev->descriptor.idProduct)); + net_info_ratelimited("IRDA-USB found at address %d, Vendor: %x, Product: %x\n", + dev->devnum, le16_to_cpu(dev->descriptor.idVendor), + le16_to_cpu(dev->descriptor.idProduct)); net = alloc_irdadev(sizeof(*self)); if (!net) @@ -1680,7 +1672,8 @@ static int irda_usb_probe(struct usb_interface *intf, * specify an alternate, but very few driver do like this. * Jean II */ ret = usb_set_interface(dev, intf->altsetting->desc.bInterfaceNumber, 0); - IRDA_DEBUG(1, "usb-irda: set interface %d result %d\n", intf->altsetting->desc.bInterfaceNumber, ret); + pr_debug("usb-irda: set interface %d result %d\n", + intf->altsetting->desc.bInterfaceNumber, ret); switch (ret) { case 0: break; @@ -1688,10 +1681,11 @@ static int irda_usb_probe(struct usb_interface *intf, /* Martin Diehl says if we get a -EPIPE we should * be fine and we don't need to do a usb_clear_halt(). * - Jean II */ - IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __func__); + pr_debug("%s(), Received -EPIPE, ignoring...\n", + __func__); break; default: - IRDA_DEBUG(0, "%s(), Unknown error %d\n", __func__, ret); + pr_debug("%s(), Unknown error %d\n", __func__, ret); ret = -EIO; goto err_out_3; } @@ -1700,7 +1694,7 @@ static int irda_usb_probe(struct usb_interface *intf, interface = intf->cur_altsetting; if(!irda_usb_parse_endpoints(self, interface->endpoint, interface->desc.bNumEndpoints)) { - IRDA_ERROR("%s(), Bogus endpoints...\n", __func__); + net_err_ratelimited("%s(), Bogus endpoints...\n", __func__); ret = -EIO; goto err_out_3; } @@ -1717,7 +1711,7 @@ static int irda_usb_probe(struct usb_interface *intf, ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0), 0x02, 0x40, 0, 0, NULL, 0, 500); if (ret < 0) { - IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret); + pr_debug("usb_control_msg failed %d\n", ret); goto err_out_3; } else { mdelay(10); @@ -1746,7 +1740,7 @@ static int irda_usb_probe(struct usb_interface *intf, if (ret) goto err_out_5; - IRDA_MESSAGE("IrDA: Registered device %s\n", net->name); + net_info_ratelimited("IrDA: Registered device %s\n", net->name); usb_set_intfdata(intf, self); if (self->needspatch) { @@ -1754,7 +1748,7 @@ static int irda_usb_probe(struct usb_interface *intf, ret = stir421x_patch_device(self); self->needspatch = (ret < 0); if (self->needspatch) { - IRDA_ERROR("STIR421X: Couldn't upload patch\n"); + net_err_ratelimited("STIR421X: Couldn't upload patch\n"); goto err_out_6; } @@ -1809,8 +1803,6 @@ static void irda_usb_disconnect(struct usb_interface *intf) struct irda_usb_cb *self = usb_get_intfdata(intf); int i; - IRDA_DEBUG(1, "%s()\n", __func__); - usb_set_intfdata(intf, NULL); if (!self) return; @@ -1859,7 +1851,7 @@ static void irda_usb_disconnect(struct usb_interface *intf) /* Free self and network device */ free_netdev(self->netdev); - IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__); + pr_debug("%s(), USB IrDA Disconnected\n", __func__); } #ifdef CONFIG_PM diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index 24b6dddd7f2f..696852eb23c3 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c @@ -231,7 +231,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp, dev = priv->dev; if (!dev) { - IRDA_WARNING("%s(), not ready yet!\n", __func__); + net_warn_ratelimited("%s(), not ready yet!\n", __func__); return; } @@ -240,7 +240,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp, * Characters received with a parity error, etc? */ if (fp && *fp++) { - IRDA_DEBUG(0, "Framing or parity error!\n"); + pr_debug("Framing or parity error!\n"); sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */ return; } @@ -387,7 +387,7 @@ static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int c IRDA_ASSERT(priv != NULL, return -ENODEV;); IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;); - IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __func__, cmd); + pr_debug("%s(cmd=0x%X)\n", __func__, cmd); dev = priv->dev; IRDA_ASSERT(dev != NULL, return -1;); @@ -477,7 +477,7 @@ static int irtty_open(struct tty_struct *tty) mutex_unlock(&irtty_mutex); - IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __func__, tty->name); + pr_debug("%s - %s: irda line discipline opened\n", __func__, tty->name); return 0; @@ -528,7 +528,7 @@ static void irtty_close(struct tty_struct *tty) kfree(priv); - IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __func__, tty->name); + pr_debug("%s - %s: irda line discipline closed\n", __func__, tty->name); } /* ------------------------------------------------------- */ @@ -555,8 +555,8 @@ static int __init irtty_sir_init(void) int err; if ((err = tty_register_ldisc(N_IRDA, &irda_ldisc)) != 0) - IRDA_ERROR("IrDA: can't register line discipline (err = %d)\n", - err); + net_err_ratelimited("IrDA: can't register line discipline (err = %d)\n", + err); return err; } @@ -565,8 +565,8 @@ static void __exit irtty_sir_cleanup(void) int err; if ((err = tty_unregister_ldisc(N_IRDA))) { - IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n", - __func__, err); + net_err_ratelimited("%s(), can't unregister line discipline (err = %d)\n", + __func__, err); } } diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c index 6827777cbeea..8eefcb44bac3 100644 --- a/drivers/net/irda/litelink-sir.c +++ b/drivers/net/irda/litelink-sir.c @@ -76,8 +76,6 @@ static int litelink_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power up dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); @@ -93,8 +91,6 @@ static int litelink_open(struct sir_dev *dev) static int litelink_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -111,8 +107,6 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed) { int i; - IRDA_DEBUG(2, "%s()\n", __func__); - /* dongle already reset by irda-thread - current speed (dongle and * port) is the default speed (115200 for litelink!) */ @@ -154,8 +148,6 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed) */ static int litelink_reset(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* probably the power-up can be dropped here, but with only * 15 usec delay it's not worth the risk unless somebody with * the hardware confirms it doesn't break anything... diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c index a9a81358477b..a764817b47f1 100644 --- a/drivers/net/irda/ma600-sir.c +++ b/drivers/net/irda/ma600-sir.c @@ -65,13 +65,11 @@ static struct dongle_driver ma600 = { static int __init ma600_sir_init(void) { - IRDA_DEBUG(2, "%s()\n", __func__); return irda_register_dongle(&ma600); } static void __exit ma600_sir_cleanup(void) { - IRDA_DEBUG(2, "%s()\n", __func__); irda_unregister_dongle(&ma600); } @@ -86,8 +84,6 @@ static int ma600_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Explicitly set the speeds we can accept */ @@ -104,8 +100,6 @@ static int ma600_open(struct sir_dev *dev) static int ma600_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -174,8 +168,8 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed) { u8 byte; - IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __func__, - speed, dev->speed); + pr_debug("%s(), speed=%d (was %d)\n", __func__, + speed, dev->speed); /* dongle already reset, dongle and port at default speed (9600) */ @@ -198,13 +192,13 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed) sirdev_raw_read(dev, &byte, sizeof(byte)); if (byte != get_control_byte(speed)) { - IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n", - __func__, (unsigned) byte, - (unsigned) get_control_byte(speed)); + net_warn_ratelimited("%s(): bad control byte read-back %02x != %02x\n", + __func__, (unsigned)byte, + (unsigned)get_control_byte(speed)); return -1; } else - IRDA_DEBUG(2, "%s() control byte write read OK\n", __func__); + pr_debug("%s() control byte write read OK\n", __func__); #endif /* Set DTR, Set RTS */ @@ -236,8 +230,6 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed) static int ma600_reset(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Reset the dongle : set DTR low for 10 ms */ sirdev_set_dtr_rts(dev, FALSE, TRUE); msleep(10); diff --git a/drivers/net/irda/mcp2120-sir.c b/drivers/net/irda/mcp2120-sir.c index 5e2f4859cee7..2e33f91bfe8f 100644 --- a/drivers/net/irda/mcp2120-sir.c +++ b/drivers/net/irda/mcp2120-sir.c @@ -63,8 +63,6 @@ static int mcp2120_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - /* seems no explicit power-on required here and reset switching it on anyway */ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; @@ -76,8 +74,6 @@ static int mcp2120_open(struct sir_dev *dev) static int mcp2120_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ /* reset and inhibit mcp2120 */ sirdev_set_dtr_rts(dev, TRUE, TRUE); @@ -102,8 +98,6 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed) u8 control[2]; static int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - switch (state) { case SIRDEV_STATE_DONGLE_SPEED: /* Set DTR to enter command mode */ @@ -155,7 +149,8 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed) break; default: - IRDA_ERROR("%s(), undefine state %d\n", __func__, state); + net_err_ratelimited("%s(), undefine state %d\n", + __func__, state); ret = -EINVAL; break; } @@ -187,8 +182,6 @@ static int mcp2120_reset(struct sir_dev *dev) unsigned delay = 0; int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - switch (state) { case SIRDEV_STATE_DONGLE_RESET: //printk("mcp2120_reset: dongle_reset\n"); @@ -213,7 +206,8 @@ static int mcp2120_reset(struct sir_dev *dev) break; default: - IRDA_ERROR("%s(), undefined state %d\n", __func__, state); + net_err_ratelimited("%s(), undefined state %d\n", + __func__, state); ret = -EINVAL; break; } diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 16f8ffb50e04..e4d678fbeb2f 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -197,14 +197,14 @@ error: /* Setup a communication between mcs7780 and agilent chip. */ static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs) { - IRDA_WARNING("This transceiver type is not supported yet.\n"); + net_warn_ratelimited("This transceiver type is not supported yet\n"); return 1; } /* Setup a communication between mcs7780 and sharp chip. */ static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs) { - IRDA_WARNING("This transceiver type is not supported yet.\n"); + net_warn_ratelimited("This transceiver type is not supported yet\n"); return 1; } @@ -213,9 +213,9 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) { int ret = 0; __u16 rval; - char *msg; + const char *msg; - msg = "Basic transceiver setup error."; + msg = "Basic transceiver setup error"; /* read value of MODE Register, set the DRIVER and RESET bits * and write value back out to MODE Register @@ -261,7 +261,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) if(unlikely(ret)) goto error; - msg = "transceiver model specific setup error."; + msg = "transceiver model specific setup error"; switch (mcs->transceiver_type) { case MCS_TSC_VISHAY: ret = mcs_setup_transceiver_vishay(mcs); @@ -276,8 +276,8 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) break; default: - IRDA_WARNING("Unknown transceiver type: %d\n", - mcs->transceiver_type); + net_warn_ratelimited("Unknown transceiver type: %d\n", + mcs->transceiver_type); ret = 1; } if (unlikely(ret)) @@ -300,7 +300,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) goto error; } - msg = "transceiver reset."; + msg = "transceiver reset"; ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval); if (unlikely(ret != 2)) @@ -315,7 +315,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) return ret; error: - IRDA_ERROR("%s\n", msg); + net_err_ratelimited("%s\n", msg); return ret; } @@ -399,8 +399,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len) new_len = len - 2; if(unlikely(new_len <= 0)) { - IRDA_ERROR("%s short frame length %d\n", - mcs->netdev->name, new_len); + net_err_ratelimited("%s short frame length %d\n", + mcs->netdev->name, new_len); ++mcs->netdev->stats.rx_errors; ++mcs->netdev->stats.rx_length_errors; return; @@ -409,8 +409,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len) fcs = irda_calc_crc16(~fcs, buf, len); if(fcs != GOOD_FCS) { - IRDA_ERROR("crc error calc 0x%x len %d\n", - fcs, new_len); + net_err_ratelimited("crc error calc 0x%x len %d\n", + fcs, new_len); mcs->netdev->stats.rx_errors++; mcs->netdev->stats.rx_crc_errors++; return; @@ -452,8 +452,8 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) new_len = len - 4; if(unlikely(new_len <= 0)) { - IRDA_ERROR("%s short frame length %d\n", - mcs->netdev->name, new_len); + net_err_ratelimited("%s short frame length %d\n", + mcs->netdev->name, new_len); ++mcs->netdev->stats.rx_errors; ++mcs->netdev->stats.rx_length_errors; return; @@ -461,7 +461,8 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) fcs = ~(crc32_le(~0, buf, new_len)); if(fcs != get_unaligned_le32(buf + new_len)) { - IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); + net_err_ratelimited("crc error calc 0x%x len %d\n", + fcs, new_len); mcs->netdev->stats.rx_errors++; mcs->netdev->stats.rx_crc_errors++; return; @@ -583,7 +584,7 @@ static int mcs_speed_change(struct mcs_cb *mcs) } while(cnt++ < 100 && (rval & MCS_IRINTX)); if (cnt > 100) { - IRDA_ERROR("unable to change speed\n"); + net_err_ratelimited("unable to change speed\n"); ret = -EIO; goto error; } @@ -634,8 +635,8 @@ static int mcs_speed_change(struct mcs_cb *mcs) default: ret = 1; - IRDA_WARNING("Unknown transceiver type: %d\n", - mcs->transceiver_type); + net_warn_ratelimited("Unknown transceiver type: %d\n", + mcs->transceiver_type); } if (unlikely(ret)) goto error; @@ -731,7 +732,7 @@ static int mcs_net_open(struct net_device *netdev) sprintf(hwname, "usb#%d", mcs->usbdev->devnum); mcs->irlap = irlap_open(netdev, &mcs->qos, hwname); if (!mcs->irlap) { - IRDA_ERROR("mcs7780: irlap_open failed\n"); + net_err_ratelimited("mcs7780: irlap_open failed\n"); goto error2; } @@ -851,7 +852,7 @@ static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb, mcs->out_buf, wraplen, mcs_send_irq, mcs); if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) { - IRDA_ERROR("failed tx_urb: %d\n", ret); + net_err_ratelimited("failed tx_urb: %d\n", ret); switch (ret) { case -ENODEV: case -EPIPE: @@ -893,13 +894,13 @@ static int mcs_probe(struct usb_interface *intf, if (!ndev) goto error1; - IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum); + pr_debug("MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum); SET_NETDEV_DEV(ndev, &intf->dev); ret = usb_reset_configuration(udev); if (ret != 0) { - IRDA_ERROR("mcs7780: usb reset configuration failed\n"); + net_err_ratelimited("mcs7780: usb reset configuration failed\n"); goto error2; } @@ -941,8 +942,8 @@ static int mcs_probe(struct usb_interface *intf, if (ret != 0) goto error2; - IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s\n", - ndev->name); + pr_debug("IrDA: Registered MosChip MCS7780 device as %s\n", + ndev->name); mcs->transceiver_type = transceiver_type; mcs->sir_tweak = sir_tweak; @@ -972,7 +973,7 @@ static void mcs_disconnect(struct usb_interface *intf) free_netdev(mcs->netdev); usb_set_intfdata(intf, NULL); - IRDA_DEBUG(0, "MCS7780 now disconnected.\n"); + pr_debug("MCS7780 now disconnected.\n"); } module_usb_driver(mcs_driver); diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index 66bc03bdb138..e7317b104bfb 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -211,7 +211,8 @@ static int __init nsc_ircc_init(void) ret = platform_driver_register(&nsc_ircc_driver); if (ret) { - IRDA_ERROR("%s, Can't register driver!\n", driver_name); + net_err_ratelimited("%s, Can't register driver!\n", + driver_name); return ret; } @@ -225,8 +226,8 @@ static int __init nsc_ircc_init(void) /* Probe for all the NSC chipsets we know about */ for (chip = chips; chip->name ; chip++) { - IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, - chip->name); + pr_debug("%s(), Probing for %s ...\n", __func__, + chip->name); /* Try all config registers for this chip */ for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) { @@ -237,7 +238,8 @@ static int __init nsc_ircc_init(void) /* Read index register */ reg = inb(cfg_base); if (reg == 0xff) { - IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __func__, cfg_base); + pr_debug("%s() no chip at 0x%03x\n", + __func__, cfg_base); continue; } @@ -245,8 +247,9 @@ static int __init nsc_ircc_init(void) outb(chip->cid_index, cfg_base); id = inb(cfg_base+1); if ((id & chip->cid_mask) == chip->cid_value) { - IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n", - __func__, chip->name, id & ~chip->cid_mask); + pr_debug("%s() Found %s chip, revision=%d\n", + __func__, chip->name, + id & ~chip->cid_mask); /* * If we found a correct PnP setting, @@ -260,7 +263,8 @@ static int __init nsc_ircc_init(void) info.irq = pnp_info.irq; if (info.fir_base < 0x2000) { - IRDA_MESSAGE("%s, chip->init\n", driver_name); + net_info_ratelimited("%s, chip->init\n", + driver_name); chip->init(chip, &info); } else chip->probe(chip, &info); @@ -275,7 +279,8 @@ static int __init nsc_ircc_init(void) * the chip. */ if (ret) { - IRDA_DEBUG(2, "%s, PnP init failed\n", driver_name); + pr_debug("%s, PnP init failed\n", + driver_name); memset(&info, 0, sizeof(chipio_t)); info.cfg_base = cfg_base; info.fir_base = io[i]; @@ -297,7 +302,8 @@ static int __init nsc_ircc_init(void) } i++; } else { - IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __func__, id); + pr_debug("%s(), Wrong chip id=0x%02x\n", + __func__, id); } } } @@ -361,31 +367,29 @@ static int __init nsc_ircc_open(chipio_t *info) void *ret; int err, chip_index; - IRDA_DEBUG(2, "%s()\n", __func__); - - for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) { if (!dev_self[chip_index]) break; } if (chip_index == ARRAY_SIZE(dev_self)) { - IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __func__); + net_err_ratelimited("%s(), maximum number of supported chips reached!\n", + __func__); return -ENOMEM; } - IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, - info->cfg_base); + net_info_ratelimited("%s, Found chip at base=0x%03x\n", + driver_name, info->cfg_base); if ((nsc_ircc_setup(info)) == -1) return -1; - IRDA_MESSAGE("%s, driver loaded (Dag Brattli)\n", driver_name); + net_info_ratelimited("%s, driver loaded (Dag Brattli)\n", driver_name); dev = alloc_irdadev(sizeof(struct nsc_ircc_cb)); if (dev == NULL) { - IRDA_ERROR("%s(), can't allocate memory for " - "control block!\n", __func__); + net_err_ratelimited("%s(), can't allocate memory for control block!\n", + __func__); return -ENOMEM; } @@ -408,8 +412,8 @@ static int __init nsc_ircc_open(chipio_t *info) /* Reserve the ioports that we need */ ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name); if (!ret) { - IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", - __func__, self->io.fir_base); + net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n", + __func__, self->io.fir_base); err = -ENODEV; goto out1; } @@ -460,21 +464,22 @@ static int __init nsc_ircc_open(chipio_t *info) err = register_netdev(dev); if (err) { - IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); + net_err_ratelimited("%s(), register_netdev() failed!\n", + __func__); goto out4; } - IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); + net_info_ratelimited("IrDA: Registered device %s\n", dev->name); /* Check if user has supplied a valid dongle id or not */ if ((dongle_id <= 0) || (dongle_id >= ARRAY_SIZE(dongle_types))) { dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); - IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name, - dongle_types[dongle_id]); + net_info_ratelimited("%s, Found dongle: %s\n", + driver_name, dongle_types[dongle_id]); } else { - IRDA_MESSAGE("%s, Using dongle: %s\n", driver_name, - dongle_types[dongle_id]); + net_info_ratelimited("%s, Using dongle: %s\n", + driver_name, dongle_types[dongle_id]); } self->io.dongle_id = dongle_id; @@ -516,8 +521,6 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self) { int iobase; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); iobase = self->io.fir_base; @@ -528,8 +531,8 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self) unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ - IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", - __func__, self->io.fir_base); + pr_debug("%s(), Releasing Region %03x\n", + __func__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) @@ -567,7 +570,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info) case 0x2e8: outb(0x15, cfg_base+1); break; case 0x3f8: outb(0x16, cfg_base+1); break; case 0x2f8: outb(0x17, cfg_base+1); break; - default: IRDA_ERROR("%s(), invalid base_address", __func__); + default: net_err_ratelimited("%s(), invalid base_address\n", __func__); } /* Control Signal Routing Register (CSRT) */ @@ -579,7 +582,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info) case 9: temp = 0x05; break; case 11: temp = 0x06; break; case 15: temp = 0x07; break; - default: IRDA_ERROR("%s(), invalid irq", __func__); + default: net_err_ratelimited("%s(), invalid irq\n", __func__); } outb(CFG_108_CSRT, cfg_base); @@ -587,7 +590,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info) case 0: outb(0x08+temp, cfg_base+1); break; case 1: outb(0x10+temp, cfg_base+1); break; case 3: outb(0x18+temp, cfg_base+1); break; - default: IRDA_ERROR("%s(), invalid dma", __func__); + default: net_err_ratelimited("%s(), invalid dma\n", __func__); } outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */ @@ -626,8 +629,8 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info) break; } info->sir_base = info->fir_base; - IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, - info->fir_base); + pr_debug("%s(), probing fir_base=0x%03x\n", __func__, + info->fir_base); /* Read control signals routing register (CSRT) */ outb(CFG_108_CSRT, cfg_base); @@ -659,7 +662,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info) info->irq = 15; break; } - IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq); + pr_debug("%s(), probing irq=%d\n", __func__, info->irq); /* Currently we only read Rx DMA but it will also be used for Tx */ switch ((reg >> 3) & 0x03) { @@ -676,7 +679,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info) info->dma = 3; break; } - IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma); + pr_debug("%s(), probing dma=%d\n", __func__, info->dma); /* Read mode control register (MCTL) */ outb(CFG_108_MCTL, cfg_base); @@ -727,7 +730,7 @@ static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info) pnp = (reg >> 3) & 0x01; if (pnp) { - IRDA_DEBUG(2, "(), Chip is in PnP mode\n"); + pr_debug("(), Chip is in PnP mode\n"); outb(0x46, cfg_base); reg = (inb(cfg_base+1) & 0xfe) << 2; @@ -831,9 +834,8 @@ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info) int enabled; /* User is sure about his config... accept it. */ - IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): " - "io=0x%04x, irq=%d, dma=%d\n", - __func__, info->fir_base, info->irq, info->dma); + pr_debug("%s(): nsc_ircc_init_39x (user settings): io=0x%04x, irq=%d, dma=%d\n", + __func__, info->fir_base, info->irq, info->dma); /* Access bank for SP2 */ outb(CFG_39X_LDN, cfg_base); @@ -873,8 +875,8 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) int reg1, reg2, irq, irqt, dma1, dma2; int enabled, susp; - IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n", - __func__, cfg_base); + pr_debug("%s(), nsc_ircc_probe_39x, base=%d\n", + __func__, cfg_base); /* This function should be executed with irq off to avoid * another driver messing with the Super I/O bank - Jean II */ @@ -908,7 +910,8 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) outb(CFG_39X_SPC, cfg_base); susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1); - IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __func__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp); + pr_debug("%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", + __func__, reg1, reg2, irq, irqt, dma1, dma2, enabled, susp); /* Configure SP2 */ @@ -959,8 +962,8 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) pnp_info.dma = pnp_dma(dev, 0); - IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n", - __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma); + pr_debug("%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n", + __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma); if((pnp_info.fir_base == 0) || (pnp_info.irq == -1) || (pnp_info.dma == -1)) { @@ -988,13 +991,13 @@ static int nsc_ircc_setup(chipio_t *info) switch_bank(iobase, BANK3); version = inb(iobase+MID); - IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n", - __func__, driver_name, version); + pr_debug("%s() Driver %s Found chip version %02x\n", + __func__, driver_name, version); /* Should be 0x2? */ if (0x20 != (version & 0xf0)) { - IRDA_ERROR("%s, Wrong chip version %02x\n", - driver_name, version); + net_err_ratelimited("%s, Wrong chip version %02x\n", + driver_name, version); return -1; } @@ -1092,39 +1095,39 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id) switch (dongle_id) { case 0x00: /* same as */ case 0x01: /* Differential serial interface */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x02: /* same as */ case 0x03: /* Reserved */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x04: /* Sharp RY5HD01 */ break; case 0x05: /* Reserved, but this is what the Thinkpad reports */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x06: /* Single-ended serial interface */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x07: /* Consumer-IR only */ - IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s is not for IrDA mode\n", + __func__, dongle_types[dongle_id]); break; case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ - IRDA_DEBUG(0, "%s(), %s\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s\n", + __func__, dongle_types[dongle_id]); break; case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */ outb(0x28, iobase+7); /* Set irsl[0-2] as output */ break; case 0x0A: /* same as */ case 0x0B: /* Reserved */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x0C: /* same as */ case 0x0D: /* HP HSDL-1100/HSDL-2100 */ @@ -1138,15 +1141,15 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id) outb(0x28, iobase+7); /* Set irsl[0-2] as output */ break; case 0x0F: /* No dongle connected */ - IRDA_DEBUG(0, "%s(), %s\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s\n", + __func__, dongle_types[dongle_id]); switch_bank(iobase, BANK0); outb(0x62, iobase+MCR); break; default: - IRDA_DEBUG(0, "%s(), invalid dongle_id %#x", - __func__, dongle_id); + pr_debug("%s(), invalid dongle_id %#x", + __func__, dongle_id); } /* IRCFG1: IRSL1 and 2 are set to IrDA mode */ @@ -1177,31 +1180,31 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id) switch (dongle_id) { case 0x00: /* same as */ case 0x01: /* Differential serial interface */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x02: /* same as */ case 0x03: /* Reserved */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x04: /* Sharp RY5HD01 */ break; case 0x05: /* Reserved */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x06: /* Single-ended serial interface */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x07: /* Consumer-IR only */ - IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s is not for IrDA mode\n", + __func__, dongle_types[dongle_id]); break; case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ - IRDA_DEBUG(0, "%s(), %s\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s\n", + __func__, dongle_types[dongle_id]); outb(0x00, iobase+4); if (speed > 115200) outb(0x01, iobase+4); @@ -1219,8 +1222,8 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id) break; case 0x0A: /* same as */ case 0x0B: /* Reserved */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x0C: /* same as */ case 0x0D: /* HP HSDL-1100/HSDL-2100 */ @@ -1228,14 +1231,14 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id) case 0x0E: /* Supports SIR Mode only */ break; case 0x0F: /* No dongle connected */ - IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s is not for IrDA mode\n", + __func__, dongle_types[dongle_id]); switch_bank(iobase, BANK0); outb(0x62, iobase+MCR); break; default: - IRDA_DEBUG(0, "%s(), invalid data_rate\n", __func__); + pr_debug("%s(), invalid data_rate\n", __func__); } /* Restore bank register */ outb(bank, iobase+BSR); @@ -1256,7 +1259,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed) __u8 bank; __u8 ier; /* Interrupt enable register */ - IRDA_DEBUG(2, "%s(), speed=%d\n", __func__, speed); + pr_debug("%s(), speed=%d\n", __func__, speed); IRDA_ASSERT(self != NULL, return 0;); @@ -1289,20 +1292,20 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed) outb(inb(iobase+4) | 0x04, iobase+4); mcr = MCR_MIR; - IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__); + pr_debug("%s(), handling baud of 576000\n", __func__); break; case 1152000: mcr = MCR_MIR; - IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__); + pr_debug("%s(), handling baud of 1152000\n", __func__); break; case 4000000: mcr = MCR_FIR; - IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__); + pr_debug("%s(), handling baud of 4000000\n", __func__); break; default: mcr = MCR_FIR; - IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", - __func__, speed); + pr_debug("%s(), unknown baud rate of %d\n", + __func__, speed); break; } @@ -1609,15 +1612,13 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size) int actual = 0; __u8 bank; - IRDA_DEBUG(4, "%s()\n", __func__); - /* Save current bank */ bank = inb(iobase+BSR); switch_bank(iobase, BANK0); if (!(inb_p(iobase+LSR) & LSR_TXEMP)) { - IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n", - __func__); + pr_debug("%s(), warning, FIFO not empty yet!\n", + __func__); /* FIFO may still be filled to the Tx interrupt threshold */ fifo_size -= 17; @@ -1629,8 +1630,8 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size) outb(buf[actual++], iobase+TXD); } - IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", - __func__, fifo_size, actual, len); + pr_debug("%s(), fifo_size %d ; %d sent of %d\n", + __func__, fifo_size, actual, len); /* Restore bank */ outb(bank, iobase+BSR); @@ -1651,8 +1652,6 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self) __u8 bank; int ret = TRUE; - IRDA_DEBUG(2, "%s()\n", __func__); - iobase = self->io.fir_base; /* Save current bank */ @@ -1782,7 +1781,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8); if (st_fifo->tail >= MAX_RX_WINDOW) { - IRDA_DEBUG(0, "%s(), window is full!\n", __func__); + pr_debug("%s(), window is full!\n", __func__); continue; } @@ -1872,9 +1871,6 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) skb = dev_alloc_skb(len+1); if (skb == NULL) { - IRDA_WARNING("%s(), memory squeeze, " - "dropping frame.\n", - __func__); self->netdev->stats.rx_dropped++; /* Restore bank register */ @@ -1979,7 +1975,7 @@ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir) * Need to be after self->io.direction to avoid race with * nsc_ircc_hard_xmit_sir() - Jean II */ if (self->new_speed) { - IRDA_DEBUG(2, "%s(), Changing speed!\n", __func__); + pr_debug("%s(), Changing speed!\n", __func__); self->ier = nsc_ircc_change_speed(self, self->new_speed); self->new_speed = 0; @@ -2063,9 +2059,8 @@ static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase, nsc_ircc_dma_receive(self); self->ier = IER_SFIF_IE; } else - IRDA_WARNING("%s(), potential " - "Tx queue lockup !\n", - __func__); + net_warn_ratelimited("%s(), potential Tx queue lockup !\n", + __func__); } } else { /* Not finished yet, so interrupt on DMA again */ @@ -2174,7 +2169,6 @@ static int nsc_ircc_net_open(struct net_device *dev) char hwname[32]; __u8 bank; - IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); @@ -2184,8 +2178,8 @@ static int nsc_ircc_net_open(struct net_device *dev) iobase = self->io.fir_base; if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) { - IRDA_WARNING("%s, unable to allocate irq=%d\n", - driver_name, self->io.irq); + net_warn_ratelimited("%s, unable to allocate irq=%d\n", + driver_name, self->io.irq); return -EAGAIN; } /* @@ -2193,8 +2187,8 @@ static int nsc_ircc_net_open(struct net_device *dev) * failure. */ if (request_dma(self->io.dma, dev->name)) { - IRDA_WARNING("%s, unable to allocate dma=%d\n", - driver_name, self->io.dma); + net_warn_ratelimited("%s, unable to allocate dma=%d\n", + driver_name, self->io.dma); free_irq(self->io.irq, dev); return -EAGAIN; } @@ -2236,7 +2230,6 @@ static int nsc_ircc_net_close(struct net_device *dev) int iobase; __u8 bank; - IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); @@ -2290,7 +2283,7 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ @@ -2329,7 +2322,7 @@ static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state) if (self->io.suspended) return 0; - IRDA_DEBUG(1, "%s, Suspending\n", driver_name); + pr_debug("%s, Suspending\n", driver_name); rtnl_lock(); if (netif_running(self->netdev)) { @@ -2363,7 +2356,7 @@ static int nsc_ircc_resume(struct platform_device *dev) if (!self->io.suspended) return 0; - IRDA_DEBUG(1, "%s, Waking up\n", driver_name); + pr_debug("%s, Waking up\n", driver_name); rtnl_lock(); nsc_ircc_setup(&self->io); @@ -2372,8 +2365,8 @@ static int nsc_ircc_resume(struct platform_device *dev) if (netif_running(self->netdev)) { if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, self->netdev->name, self->netdev)) { - IRDA_WARNING("%s, unable to allocate irq=%d\n", - driver_name, self->io.irq); + net_warn_ratelimited("%s, unable to allocate irq=%d\n", + driver_name, self->io.irq); /* * Don't fail resume process, just kill this diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c index f237136f3827..a7c2e990ae69 100644 --- a/drivers/net/irda/old_belkin-sir.c +++ b/drivers/net/irda/old_belkin-sir.c @@ -90,8 +90,6 @@ static int old_belkin_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power on dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); @@ -108,8 +106,6 @@ static int old_belkin_open(struct sir_dev *dev) static int old_belkin_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -123,8 +119,6 @@ static int old_belkin_close(struct sir_dev *dev) */ static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed) { - IRDA_DEBUG(2, "%s()\n", __func__); - dev->speed = 9600; return (speed==dev->speed) ? 0 : -EINVAL; } @@ -137,8 +131,6 @@ static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed) */ static int old_belkin_reset(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* This dongles speed "defaults" to 9600 bps ;-) */ dev->speed = 9600; diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 43e9ab4f4d7e..6af26a7d787c 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -82,7 +82,7 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev) return 0; default: - IRDA_ERROR("%s - undefined state\n", __func__); + net_err_ratelimited("%s - undefined state\n", __func__); return -EINVAL; } fsm->substate = next_state; @@ -109,11 +109,11 @@ static void sirdev_config_fsm(struct work_struct *work) int ret = -1; unsigned delay; - IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies); + pr_debug("%s(), <%ld>\n", __func__, jiffies); do { - IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", - __func__, fsm->state, fsm->substate); + pr_debug("%s - state=0x%04x / substate=0x%04x\n", + __func__, fsm->state, fsm->substate); next_state = fsm->state; delay = 0; @@ -251,12 +251,13 @@ static void sirdev_config_fsm(struct work_struct *work) break; default: - IRDA_ERROR("%s - undefined state\n", __func__); + net_err_ratelimited("%s - undefined state\n", __func__); fsm->result = -EINVAL; /* fall thru */ case SIRDEV_STATE_ERROR: - IRDA_ERROR("%s - error: %d\n", __func__, fsm->result); + net_err_ratelimited("%s - error: %d\n", + __func__, fsm->result); #if 0 /* don't enable this before we have netdev->tx_timeout to recover */ netif_stop_queue(dev->netdev); @@ -286,12 +287,12 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par { struct sir_fsm *fsm = &dev->fsm; - IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__, - initial_state, param); + pr_debug("%s - state=0x%04x / param=%u\n", __func__, + initial_state, param); if (down_trylock(&fsm->sem)) { if (in_interrupt() || in_atomic() || irqs_disabled()) { - IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__); + pr_debug("%s(), state machine busy!\n", __func__); return -EWOULDBLOCK; } else down(&fsm->sem); @@ -299,7 +300,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par if (fsm->state == SIRDEV_STATE_DEAD) { /* race with sirdev_close should never happen */ - IRDA_ERROR("%s(), instance staled!\n", __func__); + net_err_ratelimited("%s(), instance staled!\n", __func__); up(&fsm->sem); return -ESTALE; /* or better EPIPE? */ } @@ -344,7 +345,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type) { int err; - IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type); + pr_debug("%s : requesting dongle %d.\n", __func__, type); err = sirdev_schedule_dongle_open(dev, type); if (unlikely(err)) @@ -379,7 +380,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len) ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); if (ret > 0) { - IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__); + pr_debug("%s(), raw-tx started\n", __func__); dev->tx_buff.data += ret; dev->tx_buff.len -= ret; @@ -439,8 +440,8 @@ void sirdev_write_complete(struct sir_dev *dev) spin_lock_irqsave(&dev->tx_lock, flags); - IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n", - __func__, dev->tx_buff.len); + pr_debug("%s() - dev->tx_buff.len = %d\n", + __func__, dev->tx_buff.len); if (likely(dev->tx_buff.len > 0)) { /* Write data left in transmit buffer */ @@ -452,8 +453,8 @@ void sirdev_write_complete(struct sir_dev *dev) } else if (unlikely(actual<0)) { /* could be dropped later when we have tx_timeout to recover */ - IRDA_ERROR("%s: drv->do_write failed (%d)\n", - __func__, actual); + net_err_ratelimited("%s: drv->do_write failed (%d)\n", + __func__, actual); if ((skb=dev->tx_skb) != NULL) { dev->tx_skb = NULL; dev_kfree_skb_any(skb); @@ -474,7 +475,7 @@ void sirdev_write_complete(struct sir_dev *dev) * restarted when the irda-thread has completed the request. */ - IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__); + pr_debug("%s(), raw-tx done\n", __func__); dev->raw_tx = 0; goto done; /* no post-frame handling in raw mode */ } @@ -491,7 +492,7 @@ void sirdev_write_complete(struct sir_dev *dev) * re-activated. */ - IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__); + pr_debug("%s(), finished with frame!\n", __func__); if ((skb=dev->tx_skb) != NULL) { dev->tx_skb = NULL; @@ -501,14 +502,14 @@ void sirdev_write_complete(struct sir_dev *dev) } if (unlikely(dev->new_speed > 0)) { - IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__); + pr_debug("%s(), Changing speed!\n", __func__); err = sirdev_schedule_speed(dev, dev->new_speed); if (unlikely(err)) { /* should never happen * forget the speed change and hope the stack recovers */ - IRDA_ERROR("%s - schedule speed change failed: %d\n", - __func__, err); + net_err_ratelimited("%s - schedule speed change failed: %d\n", + __func__, err); netif_wake_queue(dev->netdev); } /* else: success @@ -535,13 +536,13 @@ EXPORT_SYMBOL(sirdev_write_complete); int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) { if (!dev || !dev->netdev) { - IRDA_WARNING("%s(), not ready yet!\n", __func__); + net_warn_ratelimited("%s(), not ready yet!\n", __func__); return -1; } if (!dev->irlap) { - IRDA_WARNING("%s - too early: %p / %zd!\n", - __func__, cp, count); + net_warn_ratelimited("%s - too early: %p / %zd!\n", + __func__, cp, count); return -1; } @@ -551,7 +552,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) */ irda_device_set_media_busy(dev->netdev, TRUE); dev->netdev->stats.rx_dropped++; - IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count); + pr_debug("%s; rx-drop: %zd\n", __func__, count); return 0; } @@ -597,7 +598,7 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb, netif_stop_queue(ndev); - IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len); + pr_debug("%s(), skb->len = %d\n", __func__, skb->len); speed = irda_get_next_speed(skb); if ((speed != dev->speed) && (speed != -1)) { @@ -634,7 +635,7 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb, /* Check problems */ if(spin_is_locked(&dev->tx_lock)) { - IRDA_DEBUG(3, "%s(), write not completed\n", __func__); + pr_debug("%s(), write not completed\n", __func__); } /* serialize with write completion */ @@ -661,8 +662,8 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb, } else if (unlikely(actual < 0)) { /* could be dropped later when we have tx_timeout to recover */ - IRDA_ERROR("%s: drv->do_write failed (%d)\n", - __func__, actual); + net_err_ratelimited("%s: drv->do_write failed (%d)\n", + __func__, actual); dev_kfree_skb_any(skb); dev->netdev->stats.tx_errors++; dev->netdev->stats.tx_dropped++; @@ -683,7 +684,7 @@ static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) IRDA_ASSERT(dev != NULL, return -1;); - IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ @@ -800,8 +801,6 @@ static int sirdev_open(struct net_device *ndev) if (!try_module_get(drv->owner)) return -ESTALE; - IRDA_DEBUG(2, "%s()\n", __func__); - if (sirdev_alloc_buffers(dev)) goto errout_dec; @@ -818,7 +817,7 @@ static int sirdev_open(struct net_device *ndev) netif_wake_queue(ndev); - IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed); + pr_debug("%s - done, speed = %d\n", __func__, dev->speed); return 0; @@ -838,7 +837,7 @@ static int sirdev_close(struct net_device *ndev) struct sir_dev *dev = netdev_priv(ndev); const struct sir_driver *drv; -// IRDA_DEBUG(0, "%s\n", __func__); +/* pr_debug("%s\n", __func__); */ netif_stop_queue(ndev); @@ -880,7 +879,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n struct net_device *ndev; struct sir_dev *dev; - IRDA_DEBUG(0, "%s - %s\n", __func__, name); + pr_debug("%s - %s\n", __func__, name); /* instead of adding tests to protect against drv->do_write==NULL * at several places we refuse to create a sir_dev instance for @@ -894,7 +893,8 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n */ ndev = alloc_irdadev(sizeof(*dev)); if (ndev == NULL) { - IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__); + net_err_ratelimited("%s - Can't allocate memory for IrDA control block!\n", + __func__); goto out; } dev = netdev_priv(ndev); @@ -919,7 +919,8 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n ndev->netdev_ops = &sirdev_ops; if (register_netdev(ndev)) { - IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); + net_err_ratelimited("%s(), register_netdev() failed!\n", + __func__); goto out_freenetdev; } @@ -936,7 +937,7 @@ int sirdev_put_instance(struct sir_dev *dev) { int err = 0; - IRDA_DEBUG(0, "%s\n", __func__); + pr_debug("%s\n", __func__); atomic_set(&dev->enable_rx, 0); @@ -946,7 +947,7 @@ int sirdev_put_instance(struct sir_dev *dev) if (dev->dongle_drv) err = sirdev_schedule_dongle_close(dev); if (err) - IRDA_ERROR("%s - error %d\n", __func__, err); + net_err_ratelimited("%s - error %d\n", __func__, err); sirdev_close(dev->netdev); diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c index cfbabb63f5cc..7436f73ff1bb 100644 --- a/drivers/net/irda/sir_dongle.c +++ b/drivers/net/irda/sir_dongle.c @@ -34,8 +34,8 @@ int irda_register_dongle(struct dongle_driver *new) struct list_head *entry; struct dongle_driver *drv; - IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n", - __func__, new->driver_name, new->type); + pr_debug("%s : registering dongle \"%s\" (%d).\n", + __func__, new->driver_name, new->type); mutex_lock(&dongle_list_lock); list_for_each(entry, &dongle_list) { diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 282120430f12..b455ffe8850c 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -419,13 +419,16 @@ static int __init smsc_ircc_legacy_probe(void) #ifdef CONFIG_PCI if (smsc_ircc_preconfigure_subsystems(ircc_cfg, ircc_fir, ircc_sir, ircc_dma, ircc_irq) < 0) { /* Ignore errors from preconfiguration */ - IRDA_ERROR("%s, Preconfiguration failed !\n", driver_name); + net_err_ratelimited("%s, Preconfiguration failed !\n", + driver_name); } #endif if (ircc_fir > 0 && ircc_sir > 0) { - IRDA_MESSAGE(" Overriding FIR address 0x%04x\n", ircc_fir); - IRDA_MESSAGE(" Overriding SIR address 0x%04x\n", ircc_sir); + net_info_ratelimited(" Overriding FIR address 0x%04x\n", + ircc_fir); + net_info_ratelimited(" Overriding SIR address 0x%04x\n", + ircc_sir); if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq)) ret = -ENODEV; @@ -434,8 +437,8 @@ static int __init smsc_ircc_legacy_probe(void) /* try user provided configuration register base address */ if (ircc_cfg > 0) { - IRDA_MESSAGE(" Overriding configuration address " - "0x%04x\n", ircc_cfg); + net_info_ratelimited(" Overriding configuration address 0x%04x\n", + ircc_cfg); if (!smsc_superio_fdc(ircc_cfg)) ret = 0; if (!smsc_superio_lpc(ircc_cfg)) @@ -458,11 +461,12 @@ static int __init smsc_ircc_init(void) { int ret; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); ret = platform_driver_register(&smsc_ircc_driver); if (ret) { - IRDA_ERROR("%s, Can't register driver!\n", driver_name); + net_err_ratelimited("%s, Can't register driver!\n", + driver_name); return ret; } @@ -519,7 +523,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, struct net_device *dev; int err; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); err = smsc_ircc_present(fir_base, sir_base); if (err) @@ -527,7 +531,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, err = -ENOMEM; if (dev_count >= ARRAY_SIZE(dev_self)) { - IRDA_WARNING("%s(), too many devices!\n", __func__); + net_warn_ratelimited("%s(), too many devices!\n", __func__); goto err_out1; } @@ -536,7 +540,8 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, */ dev = alloc_irdadev(sizeof(struct smsc_ircc_cb)); if (!dev) { - IRDA_WARNING("%s() can't allocate net device\n", __func__); + net_warn_ratelimited("%s() can't allocate net device\n", + __func__); goto err_out1; } @@ -588,8 +593,8 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, err = register_netdev(self->netdev); if (err) { - IRDA_ERROR("%s, Network device registration failed!\n", - driver_name); + net_err_ratelimited("%s, Network device registration failed!\n", + driver_name); goto err_out4; } @@ -601,7 +606,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, } platform_set_drvdata(self->pldev, self); - IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); + net_info_ratelimited("IrDA: Registered device %s\n", dev->name); dev_count++; return 0; @@ -637,15 +642,15 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base) if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT, driver_name)) { - IRDA_WARNING("%s: can't get fir_base of 0x%03x\n", - __func__, fir_base); + net_warn_ratelimited("%s: can't get fir_base of 0x%03x\n", + __func__, fir_base); goto out1; } if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT, driver_name)) { - IRDA_WARNING("%s: can't get sir_base of 0x%03x\n", - __func__, sir_base); + net_warn_ratelimited("%s: can't get sir_base of 0x%03x\n", + __func__, sir_base); goto out2; } @@ -660,13 +665,13 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base) irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4; if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) { - IRDA_WARNING("%s(), addr 0x%04x - no device found!\n", - __func__, fir_base); + net_warn_ratelimited("%s(), addr 0x%04x - no device found!\n", + __func__, fir_base); goto out3; } - IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, " - "firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n", - chip & 0x0f, version, fir_base, sir_base, dma, irq); + net_info_ratelimited("SMsC IrDA Controller found\n IrCC version %d.%d, firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n", + chip & 0x0f, version, + fir_base, sir_base, dma, irq); return 0; @@ -704,16 +709,16 @@ static void smsc_ircc_setup_io(struct smsc_ircc_cb *self, if (irq != IRQ_INVAL) { if (irq != chip_irq) - IRDA_MESSAGE("%s, Overriding IRQ - chip says %d, using %d\n", - driver_name, chip_irq, irq); + net_info_ratelimited("%s, Overriding IRQ - chip says %d, using %d\n", + driver_name, chip_irq, irq); self->io.irq = irq; } else self->io.irq = chip_irq; if (dma != DMA_INVAL) { if (dma != chip_dma) - IRDA_MESSAGE("%s, Overriding DMA - chip says %d, using %d\n", - driver_name, chip_dma, dma); + net_info_ratelimited("%s, Overriding DMA - chip says %d, using %d\n", + driver_name, chip_dma, dma); self->io.dma = dma; } else self->io.dma = chip_dma; @@ -798,7 +803,7 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ @@ -852,8 +857,8 @@ static void smsc_ircc_timeout(struct net_device *dev) struct smsc_ircc_cb *self = netdev_priv(dev); unsigned long flags; - IRDA_WARNING("%s: transmit timed out, changing speed to: %d\n", - dev->name, self->io.speed); + net_warn_ratelimited("%s: transmit timed out, changing speed to: %d\n", + dev->name, self->io.speed); spin_lock_irqsave(&self->lock, flags); smsc_ircc_sir_start(self); smsc_ircc_change_speed(self, self->io.speed); @@ -877,7 +882,7 @@ static netdev_tx_t smsc_ircc_hard_xmit_sir(struct sk_buff *skb, unsigned long flags; s32 speed; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;); @@ -952,21 +957,21 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed) ir_mode = IRCC_CFGA_IRDA_HDLC; ctrl = IRCC_CRC; fast = 0; - IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__); + pr_debug("%s(), handling baud of 576000\n", __func__); break; case 1152000: ir_mode = IRCC_CFGA_IRDA_HDLC; ctrl = IRCC_1152 | IRCC_CRC; fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA; - IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", - __func__); + pr_debug("%s(), handling baud of 1152000\n", + __func__); break; case 4000000: ir_mode = IRCC_CFGA_IRDA_4PPM; ctrl = IRCC_CRC; fast = IRCC_LCR_A_FAST; - IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", - __func__); + pr_debug("%s(), handling baud of 4000000\n", + __func__); break; } #if 0 @@ -994,7 +999,7 @@ static void smsc_ircc_fir_start(struct smsc_ircc_cb *self) struct net_device *dev; int fir_base; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(self != NULL, return;); dev = self->netdev; @@ -1039,7 +1044,7 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self) { int fir_base; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(self != NULL, return;); @@ -1063,7 +1068,7 @@ static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed) struct net_device *dev; int last_speed_was_sir; - IRDA_DEBUG(0, "%s() changing speed to: %d\n", __func__, speed); + pr_debug("%s() changing speed to: %d\n", __func__, speed); IRDA_ASSERT(self != NULL, return;); dev = self->netdev; @@ -1131,7 +1136,7 @@ static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed) int lcr; /* Line control reg */ int divisor; - IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __func__, speed); + pr_debug("%s(), Setting speed to: %d\n", __func__, speed); IRDA_ASSERT(self != NULL, return;); iobase = self->io.sir_base; @@ -1166,7 +1171,7 @@ static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed) /* Turn on interrups */ outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); - IRDA_DEBUG(2, "%s() speed changed to: %d\n", __func__, speed); + pr_debug("%s() speed changed to: %d\n", __func__, speed); } @@ -1250,7 +1255,7 @@ static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs) int iobase = self->io.fir_base; u8 ctrl; - IRDA_DEBUG(3, "%s\n", __func__); + pr_debug("%s\n", __func__); #if 1 /* Disable Rx */ register_bank(iobase, 0); @@ -1304,7 +1309,7 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self) { int iobase = self->io.fir_base; - IRDA_DEBUG(3, "%s\n", __func__); + pr_debug("%s\n", __func__); #if 0 /* Disable Tx */ register_bank(iobase, 0); @@ -1408,7 +1413,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self) register_bank(iobase, 0); - IRDA_DEBUG(3, "%s\n", __func__); + pr_debug("%s\n", __func__); #if 0 /* Disable Rx */ register_bank(iobase, 0); @@ -1419,8 +1424,8 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self) lsr= inb(iobase + IRCC_LSR); msgcnt = inb(iobase + IRCC_LCR_B) & 0x08; - IRDA_DEBUG(2, "%s: dma count = %d\n", __func__, - get_dma_residue(self->io.dma)); + pr_debug("%s: dma count = %d\n", __func__, + get_dma_residue(self->io.dma)); len = self->rx_buff.truesize - get_dma_residue(self->io.dma); @@ -1442,17 +1447,15 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self) len -= self->io.speed < 4000000 ? 2 : 4; if (len < 2 || len > 2050) { - IRDA_WARNING("%s(), bogus len=%d\n", __func__, len); + net_warn_ratelimited("%s(), bogus len=%d\n", __func__, len); return; } - IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len); + pr_debug("%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len); skb = dev_alloc_skb(len + 1); - if (!skb) { - IRDA_WARNING("%s(), memory squeeze, dropping frame.\n", - __func__); + if (!skb) return; - } + /* Make sure IP header gets aligned */ skb_reserve(skb, 1); @@ -1491,7 +1494,7 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self) /* Make sure we don't stay here to long */ if (boguscount++ > 32) { - IRDA_DEBUG(2, "%s(), breaking!\n", __func__); + pr_debug("%s(), breaking!\n", __func__); break; } } while (inb(iobase + UART_LSR) & UART_LSR_DR); @@ -1533,7 +1536,7 @@ static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id) lcra = inb(iobase + IRCC_LCR_A); lsr = inb(iobase + IRCC_LSR); - IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __func__, iir); + pr_debug("%s(), iir = 0x%02x\n", __func__, iir); if (iir & IRCC_IIR_EOM) { if (self->io.direction == IO_RECV) @@ -1583,12 +1586,12 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev) /* Clear interrupt */ lsr = inb(iobase + UART_LSR); - IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", - __func__, iir, lsr, iobase); + pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n", + __func__, iir, lsr, iobase); switch (iir) { case UART_IIR_RLSI: - IRDA_DEBUG(2, "%s(), RLSI\n", __func__); + pr_debug("%s(), RLSI\n", __func__); break; case UART_IIR_RDI: /* Receive interrupt */ @@ -1600,8 +1603,8 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev) smsc_ircc_sir_write_wakeup(self); break; default: - IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", - __func__, iir); + pr_debug("%s(), unhandled IIR=%#x\n", + __func__, iir); break; } @@ -1628,12 +1631,12 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self) int status = FALSE; /* int iobase; */ - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(self != NULL, return FALSE;); - IRDA_DEBUG(0, "%s: dma count = %d\n", __func__, - get_dma_residue(self->io.dma)); + pr_debug("%s: dma count = %d\n", __func__, + get_dma_residue(self->io.dma)); status = (self->rx_buff.state != OUTSIDE_FRAME); @@ -1648,8 +1651,8 @@ static int smsc_ircc_request_irq(struct smsc_ircc_cb *self) error = request_irq(self->io.irq, smsc_ircc_interrupt, 0, self->netdev->name, self->netdev); if (error) - IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n", - __func__, self->io.irq, error); + pr_debug("%s(), unable to allocate irq=%d, err=%d\n", + __func__, self->io.irq, error); return error; } @@ -1693,21 +1696,21 @@ static int smsc_ircc_net_open(struct net_device *dev) struct smsc_ircc_cb *self; char hwname[16]; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); if (self->io.suspended) { - IRDA_DEBUG(0, "%s(), device is suspended\n", __func__); + pr_debug("%s(), device is suspended\n", __func__); return -EAGAIN; } if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, (void *) dev)) { - IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n", - __func__, self->io.irq); + pr_debug("%s(), unable to allocate irq=%d\n", + __func__, self->io.irq); return -EAGAIN; } @@ -1730,8 +1733,8 @@ static int smsc_ircc_net_open(struct net_device *dev) if (request_dma(self->io.dma, dev->name)) { smsc_ircc_net_close(dev); - IRDA_WARNING("%s(), unable to allocate DMA=%d\n", - __func__, self->io.dma); + net_warn_ratelimited("%s(), unable to allocate DMA=%d\n", + __func__, self->io.dma); return -EAGAIN; } @@ -1750,7 +1753,7 @@ static int smsc_ircc_net_close(struct net_device *dev) { struct smsc_ircc_cb *self; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); @@ -1781,7 +1784,7 @@ static int smsc_ircc_suspend(struct platform_device *dev, pm_message_t state) struct smsc_ircc_cb *self = platform_get_drvdata(dev); if (!self->io.suspended) { - IRDA_DEBUG(1, "%s, Suspending\n", driver_name); + pr_debug("%s, Suspending\n", driver_name); rtnl_lock(); if (netif_running(self->netdev)) { @@ -1802,7 +1805,7 @@ static int smsc_ircc_resume(struct platform_device *dev) struct smsc_ircc_cb *self = platform_get_drvdata(dev); if (self->io.suspended) { - IRDA_DEBUG(1, "%s, Waking up\n", driver_name); + pr_debug("%s, Waking up\n", driver_name); rtnl_lock(); smsc_ircc_init_chip(self); @@ -1833,7 +1836,7 @@ static int smsc_ircc_resume(struct platform_device *dev) */ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self) { - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(self != NULL, return -1;); @@ -1845,13 +1848,13 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self) smsc_ircc_stop_interrupts(self); /* Release the PORTS that this driver is using */ - IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__, - self->io.fir_base); + pr_debug("%s(), releasing 0x%03x\n", __func__, + self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); - IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__, - self->io.sir_base); + pr_debug("%s(), releasing 0x%03x\n", __func__, + self->io.sir_base); release_region(self->io.sir_base, self->io.sir_ext); @@ -1872,7 +1875,7 @@ static void __exit smsc_ircc_cleanup(void) { int i; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); for (i = 0; i < 2; i++) { if (dev_self[i]) @@ -1896,7 +1899,7 @@ static void smsc_ircc_sir_start(struct smsc_ircc_cb *self) struct net_device *dev; int fir_base, sir_base; - IRDA_DEBUG(3, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(self != NULL, return;); dev = self->netdev; @@ -1922,7 +1925,7 @@ static void smsc_ircc_sir_start(struct smsc_ircc_cb *self) /* Turn on interrups */ outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER); - IRDA_DEBUG(3, "%s() - exit\n", __func__); + pr_debug("%s() - exit\n", __func__); outb(0x00, fir_base + IRCC_MASTER); } @@ -1932,7 +1935,7 @@ void smsc_ircc_sir_stop(struct smsc_ircc_cb *self) { int iobase; - IRDA_DEBUG(3, "%s\n", __func__); + pr_debug("%s\n", __func__); iobase = self->io.sir_base; /* Reset UART */ @@ -1958,7 +1961,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self) IRDA_ASSERT(self != NULL, return;); - IRDA_DEBUG(4, "%s\n", __func__); + pr_debug("%s\n", __func__); iobase = self->io.sir_base; @@ -1979,8 +1982,8 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self) * if we need to change the speed of the hardware */ if (self->new_speed) { - IRDA_DEBUG(5, "%s(), Changing speed to %d.\n", - __func__, self->new_speed); + pr_debug("%s(), Changing speed to %d.\n", + __func__, self->new_speed); smsc_ircc_sir_wait_hw_transmitter_finish(self); smsc_ircc_change_speed(self, self->new_speed); self->new_speed = 0; @@ -2019,7 +2022,8 @@ static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len) /* Tx FIFO should be empty! */ if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) { - IRDA_WARNING("%s(), failed, fifo not empty!\n", __func__); + net_warn_ratelimited("%s(), failed, fifo not empty!\n", + __func__); return 0; } @@ -2058,14 +2062,14 @@ static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self) for (i = 0; smsc_transceivers[i].name != NULL; i++) if (smsc_transceivers[i].probe(self->io.fir_base)) { - IRDA_MESSAGE(" %s transceiver found\n", - smsc_transceivers[i].name); + net_info_ratelimited(" %s transceiver found\n", + smsc_transceivers[i].name); self->transceiver= i + 1; return; } - IRDA_MESSAGE("No transceiver found. Defaulting to %s\n", - smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name); + net_info_ratelimited("No transceiver found. Defaulting to %s\n", + smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name); self->transceiver = SMSC_IRCC2_C_DEFAULT_TRANSCEIVER; } @@ -2119,7 +2123,7 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self) udelay(1); if (count < 0) - IRDA_DEBUG(0, "%s(): stuck transmitter\n", __func__); + pr_debug("%s(): stuck transmitter\n", __func__); } @@ -2180,7 +2184,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor u8 mode, dma, irq; int ret = -ENODEV; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL) return ret; @@ -2191,7 +2195,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __func__, mode);*/ if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA)) - IRDA_WARNING("%s(): IrDA not enabled\n", __func__); + net_warn_ratelimited("%s(): IrDA not enabled\n", __func__); outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase); sirbase = inb(cfgbase + 1) << 2; @@ -2208,7 +2212,8 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase); irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK; - IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __func__, firbase, sirbase, dma, irq, mode); + net_info_ratelimited("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", + __func__, firbase, sirbase, dma, irq, mode); if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0) ret = 0; @@ -2230,7 +2235,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho unsigned short fir_io, sir_io; int ret = -ENODEV; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL) return ret; @@ -2264,7 +2269,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho static int __init smsc_access(unsigned short cfg_base, unsigned char reg) { - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); outb(reg, cfg_base); return inb(cfg_base) != reg ? -1 : 0; @@ -2274,7 +2279,7 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, { u8 devid, xdevid, rev; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); /* Leave configuration */ @@ -2329,16 +2334,16 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, return NULL; } - IRDA_MESSAGE("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n", - devid, rev, cfg_base, type, chip->name); + net_info_ratelimited("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n", + devid, rev, cfg_base, type, chip->name); if (chip->rev > rev) { - IRDA_MESSAGE("Revision higher than expected\n"); + net_info_ratelimited("Revision higher than expected\n"); return NULL; } if (chip->flags & NoIRDA) - IRDA_MESSAGE("chipset does not support IRDA\n"); + net_info_ratelimited("chipset does not support IRDA\n"); return chip; } @@ -2348,8 +2353,8 @@ static int __init smsc_superio_fdc(unsigned short cfg_base) int ret = -1; if (!request_region(cfg_base, 2, driver_name)) { - IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", - __func__, cfg_base); + net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n", + __func__, cfg_base); } else { if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") || !smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC")) @@ -2366,8 +2371,8 @@ static int __init smsc_superio_lpc(unsigned short cfg_base) int ret = -1; if (!request_region(cfg_base, 2, driver_name)) { - IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", - __func__, cfg_base); + net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n", + __func__, cfg_base); } else { if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") || !smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC")) @@ -2529,9 +2534,8 @@ static int __init preconfigure_smsc_chip(struct outb(LPC47N227_CFGACCESSKEY, iobase); // enter configuration state outb(SMSCSIOFLAT_DEVICEID_REG, iobase); // set for device ID tmpbyte = inb(iobase +1); // Read device ID - IRDA_DEBUG(0, - "Detected Chip id: 0x%02x, setting up registers...\n", - tmpbyte); + pr_debug("Detected Chip id: 0x%02x, setting up registers...\n", + tmpbyte); /* Disable UART1 and set up SIR I/O port */ outb(0x24, iobase); // select CR24 - UART1 base addr @@ -2540,8 +2544,8 @@ static int __init preconfigure_smsc_chip(struct outb( (conf->sir_io >> 2), iobase + 1); // bits 2-9 of 0x3f8 tmpbyte = inb(iobase + 1); if (tmpbyte != (conf->sir_io >> 2) ) { - IRDA_WARNING("ERROR: could not configure SIR ioport.\n"); - IRDA_WARNING("Try to supply ircc_cfg argument.\n"); + net_warn_ratelimited("ERROR: could not configure SIR ioport\n"); + net_warn_ratelimited("Try to supply ircc_cfg argument\n"); return -ENXIO; } @@ -2553,7 +2557,7 @@ static int __init preconfigure_smsc_chip(struct outb(tmpbyte, iobase + 1); tmpbyte = inb(iobase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK; if (tmpbyte != conf->fir_irq) { - IRDA_WARNING("ERROR: could not configure FIR IRQ channel.\n"); + net_warn_ratelimited("ERROR: could not configure FIR IRQ channel\n"); return -ENXIO; } @@ -2562,7 +2566,7 @@ static int __init preconfigure_smsc_chip(struct outb((conf->fir_io >> 3), iobase + 1); tmpbyte = inb(iobase + 1); if (tmpbyte != (conf->fir_io >> 3) ) { - IRDA_WARNING("ERROR: could not configure FIR I/O port.\n"); + net_warn_ratelimited("ERROR: could not configure FIR I/O port\n"); return -ENXIO; } @@ -2571,7 +2575,7 @@ static int __init preconfigure_smsc_chip(struct outb((conf->fir_dma & LPC47N227_FIRDMASELECT_MASK), iobase + 1); // DMA tmpbyte = inb(iobase + 1) & LPC47N227_FIRDMASELECT_MASK; if (tmpbyte != (conf->fir_dma & LPC47N227_FIRDMASELECT_MASK)) { - IRDA_WARNING("ERROR: could not configure FIR DMA channel.\n"); + net_warn_ratelimited("ERROR: could not configure FIR DMA channel\n"); return -ENXIO; } @@ -2628,7 +2632,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, unsigned short tmpword; unsigned char tmpbyte; - IRDA_MESSAGE("Setting up Intel 82801 controller and SMSC device\n"); + net_info_ratelimited("Setting up Intel 82801 controller and SMSC device\n"); /* * Select the range for the COMA COM port (SIR) * Register COM_DEC: @@ -2677,7 +2681,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, default: tmpbyte |= 0x01; /* COM2 default */ } - IRDA_DEBUG(1, "COM_DEC (write): 0x%02x\n", tmpbyte); + pr_debug("COM_DEC (write): 0x%02x\n", tmpbyte); pci_write_config_byte(dev, COM_DEC, tmpbyte); /* Enable Low Pin Count interface */ @@ -2699,13 +2703,13 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, tmpword |= 0x0400; break; default: - IRDA_WARNING("Uncommon I/O base address: 0x%04x\n", - conf->cfg_base); + net_warn_ratelimited("Uncommon I/O base address: 0x%04x\n", + conf->cfg_base); break; } tmpword &= 0xfffd; /* disable LPC COMB */ tmpword |= 0x0001; /* set bit 0 : enable LPC COMA addr range (GEN2) */ - IRDA_DEBUG(1, "LPC_EN (write): 0x%04x\n", tmpword); + pr_debug("LPC_EN (write): 0x%04x\n", tmpword); pci_write_config_word(dev, LPC_EN, tmpword); /* @@ -2750,7 +2754,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, default: break; /* do not change settings */ } - IRDA_DEBUG(1, "PCI_DMA_C (write): 0x%04x\n", tmpword); + pr_debug("PCI_DMA_C (write): 0x%04x\n", tmpword); pci_write_config_word(dev, PCI_DMA_C, tmpword); /* @@ -2761,7 +2765,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, */ tmpword = conf->fir_io & 0xfff8; tmpword |= 0x0001; - IRDA_DEBUG(1, "GEN2_DEC (write): 0x%04x\n", tmpword); + pr_debug("GEN2_DEC (write): 0x%04x\n", tmpword); pci_write_config_word(dev, GEN2_DEC, tmpword); /* Pre-configure chip */ @@ -2800,7 +2804,8 @@ static void __init preconfigure_ali_port(struct pci_dev *dev, mask = 0x08; break; default: - IRDA_ERROR("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n", port); + net_err_ratelimited("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n", + port); return; } @@ -2808,7 +2813,8 @@ static void __init preconfigure_ali_port(struct pci_dev *dev, /* Turn on the right bits */ tmpbyte |= mask; pci_write_config_byte(dev, reg, tmpbyte); - IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port); + net_info_ratelimited("Activated ALi 1533 ISA bridge port 0x%04x\n", + port); } static int __init preconfigure_through_ali(struct pci_dev *dev, @@ -2877,7 +2883,8 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, if (ircc_irq != IRQ_INVAL) tmpconf.fir_irq = ircc_irq; - IRDA_MESSAGE("Detected unconfigured %s SMSC IrDA chip, pre-configuring device.\n", conf->name); + net_info_ratelimited("Detected unconfigured %s SMSC IrDA chip, pre-configuring device\n", + conf->name); if (conf->preconfigure) ret = conf->preconfigure(dev, &tmpconf); else @@ -2922,8 +2929,8 @@ static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed) /* empty */; if (val) - IRDA_WARNING("%s(): ATC: 0x%02x\n", __func__, - inb(fir_base + IRCC_ATC)); + net_warn_ratelimited("%s(): ATC: 0x%02x\n", + __func__, inb(fir_base + IRCC_ATC)); } /* diff --git a/drivers/net/irda/tekram-sir.c b/drivers/net/irda/tekram-sir.c index 048a15422844..9dcf0c103b9d 100644 --- a/drivers/net/irda/tekram-sir.c +++ b/drivers/net/irda/tekram-sir.c @@ -63,8 +63,8 @@ static int __init tekram_sir_init(void) { if (tekram_delay < 1 || tekram_delay > 500) tekram_delay = 200; - IRDA_DEBUG(1, "%s - using %d ms delay\n", - tekram.driver_name, tekram_delay); + pr_debug("%s - using %d ms delay\n", + tekram.driver_name, tekram_delay); return irda_register_dongle(&tekram); } @@ -77,8 +77,6 @@ static int tekram_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - sirdev_set_dtr_rts(dev, TRUE, TRUE); qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; @@ -92,8 +90,6 @@ static int tekram_open(struct sir_dev *dev) static int tekram_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -130,8 +126,6 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed) u8 byte; static int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - switch(state) { case SIRDEV_STATE_DONGLE_SPEED: @@ -179,7 +173,8 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed) break; default: - IRDA_ERROR("%s - undefined state %d\n", __func__, state); + net_err_ratelimited("%s - undefined state %d\n", + __func__, state); ret = -EINVAL; break; } @@ -204,8 +199,6 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed) static int tekram_reset(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Clear DTR, Set RTS */ sirdev_set_dtr_rts(dev, FALSE, TRUE); diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c index 19ad4606b799..6d2f55959c49 100644 --- a/drivers/net/irda/toim3232-sir.c +++ b/drivers/net/irda/toim3232-sir.c @@ -168,8 +168,8 @@ static int __init toim3232_sir_init(void) { if (toim3232delay < 1 || toim3232delay > 500) toim3232delay = 200; - IRDA_DEBUG(1, "%s - using %d ms delay\n", - toim3232.driver_name, toim3232delay); + pr_debug("%s - using %d ms delay\n", + toim3232.driver_name, toim3232delay); return irda_register_dongle(&toim3232); } @@ -182,8 +182,6 @@ static int toim3232_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - /* Pull the lines high to start with. * * For the IR320ST-2, we need to charge the main supply capacitor to @@ -210,8 +208,6 @@ static int toim3232_open(struct sir_dev *dev) static int toim3232_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -242,8 +238,6 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed) u8 byte; static int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - switch(state) { case SIRDEV_STATE_DONGLE_SPEED: @@ -345,8 +339,6 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed) static int toim3232_reset(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Switch off both DTR and RTS to switch off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 36e004288ea7..6960d4cd3cae 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -144,12 +144,10 @@ static int __init via_ircc_init(void) { int rc; - IRDA_DEBUG(3, "%s()\n", __func__); - rc = pci_register_driver(&via_driver); if (rc < 0) { - IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n", - __func__, rc); + pr_debug("%s(): error rc = %d, returning -ENODEV...\n", + __func__, rc); return -ENODEV; } return 0; @@ -162,11 +160,11 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase; chipio_t info; - IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device); + pr_debug("%s(): Device ID=(0X%X)\n", __func__, id->device); rc = pci_enable_device (pcidev); if (rc) { - IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc); + pr_debug("%s(): error rc = %d\n", __func__, rc); return -ENODEV; } @@ -177,7 +175,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) Chipset=0x3076; if (Chipset==0x3076) { - IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__); + pr_debug("%s(): Chipset = 3076\n", __func__); WriteLPCReg(7,0x0c ); temp=ReadLPCReg(0x30);//check if BIOS Enable Fir @@ -213,7 +211,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) } else rc = -ENODEV; //IR not turn on } else { //Not VT1211 - IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__); + pr_debug("%s(): Chipset = 3096\n", __func__); pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir if((bTmp&0x01)==1) { // BIOS enable FIR @@ -252,14 +250,12 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) rc = -ENODEV; //IR not turn on !!!!! }//Not VT1211 - IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc); + pr_debug("%s(): End - rc = %d\n", __func__, rc); return rc; } static void __exit via_ircc_cleanup(void) { - IRDA_DEBUG(3, "%s()\n", __func__); - /* Cleanup all instances of the driver */ pci_unregister_driver (&via_driver); } @@ -289,8 +285,6 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id) struct via_ircc_cb *self; int err; - IRDA_DEBUG(3, "%s()\n", __func__); - /* Allocate new instance of the driver */ dev = alloc_irdadev(sizeof(struct via_ircc_cb)); if (dev == NULL) @@ -316,8 +310,8 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id) /* Reserve the ioports that we need */ if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) { - IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n", - __func__, self->io.fir_base); + pr_debug("%s(), can't get iobase of 0x%03x\n", + __func__, self->io.fir_base); err = -ENODEV; goto err_out1; } @@ -391,7 +385,8 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id) if (err) goto err_out4; - IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name); + net_info_ratelimited("IrDA: Registered device %s (via-ircc)\n", + dev->name); /* Initialise the hardware.. */ @@ -422,8 +417,6 @@ static void via_remove_one(struct pci_dev *pdev) struct via_ircc_cb *self = pci_get_drvdata(pdev); int iobase; - IRDA_DEBUG(3, "%s()\n", __func__); - iobase = self->io.fir_base; ResetChip(iobase, 5); //hardware reset. @@ -431,8 +424,8 @@ static void via_remove_one(struct pci_dev *pdev) unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ - IRDA_DEBUG(2, "%s(), Releasing Region %03x\n", - __func__, self->io.fir_base); + pr_debug("%s(), Releasing Region %03x\n", + __func__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) dma_free_coherent(&pdev->dev, self->tx_buff.truesize, @@ -457,8 +450,6 @@ static void via_hw_init(struct via_ircc_cb *self) { int iobase = self->io.fir_base; - IRDA_DEBUG(3, "%s()\n", __func__); - SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095 // FIFO Init EnRXFIFOReadyInt(iobase, OFF); @@ -510,7 +501,7 @@ static void via_hw_init(struct via_ircc_cb *self) */ static int via_ircc_read_dongle_id(int iobase) { - IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n"); + net_err_ratelimited("via-ircc: dongle probing not supported, please specify dongle_id module parameter\n"); return 9; /* Default to IBM */ } @@ -527,8 +518,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed, /* speed is unused, as we use IsSIROn()/IsMIROn() */ speed = speed; - IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n", - __func__, speed, iobase, dongle_id); + pr_debug("%s(): change_dongle_speed to %d for 0x%x, %d\n", + __func__, speed, iobase, dongle_id); switch (dongle_id) { @@ -617,7 +608,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed, case 0x11: /* Temic TFDS4500 */ - IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__); + pr_debug("%s: Temic TFDS4500: One RX pin, TX normal, RX inverted\n", + __func__); UseOneRX(iobase, ON); //use ONE RX....RX1 InvertTX(iobase, OFF); @@ -635,7 +627,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed, SlowIRRXLowActive(iobase, OFF); } else{ - IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__); + pr_debug("%s: Warning: TFDS4500 not running in SIR mode !\n", + __func__); } break; @@ -652,8 +645,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed, break; default: - IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n", - __func__, dongle_id); + net_err_ratelimited("%s: Error: dongle_id %d unsupported !\n", + __func__, dongle_id); } } @@ -672,7 +665,7 @@ static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed) iobase = self->io.fir_base; /* Update accounting for new speed */ self->io.speed = speed; - IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed); + pr_debug("%s: change_speed to %d bps.\n", __func__, speed); WriteReg(iobase, I_ST_CT_0, 0x0); @@ -902,10 +895,10 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase) ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start - self->tx_buff.head) + self->tx_buff_dma, self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE); - IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n", - __func__, self->tx_fifo.ptr, - self->tx_fifo.queue[self->tx_fifo.ptr].len, - self->tx_fifo.len); + pr_debug("%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n", + __func__, self->tx_fifo.ptr, + self->tx_fifo.queue[self->tx_fifo.ptr].len, + self->tx_fifo.len); SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len); RXStart(iobase, OFF); @@ -926,8 +919,6 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self) int iobase; u8 Tx_status; - IRDA_DEBUG(3, "%s()\n", __func__); - iobase = self->io.fir_base; /* Disable DMA */ // DisableDmaChannel(self->io.dma); @@ -957,10 +948,9 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self) self->tx_fifo.ptr++; } } - IRDA_DEBUG(1, - "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n", - __func__, - self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free); + pr_debug("%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n", + __func__, + self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free); /* F01_S // Any frames to be sent back-to-back? if (self->tx_fifo.len) { @@ -995,8 +985,6 @@ static int via_ircc_dma_receive(struct via_ircc_cb *self) iobase = self->io.fir_base; - IRDA_DEBUG(3, "%s()\n", __func__); - self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; self->RxDataReady = 0; @@ -1078,15 +1066,15 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self, if (len == 0) return TRUE; //interrupt only, data maybe move by RxT if (((len - 4) < 2) || ((len - 4) > 2048)) { - IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n", - __func__, len, RxCurCount(iobase, self), - self->RxLastCount); + pr_debug("%s(): Trouble:len=%x,CurCount=%x,LastCount=%x\n", + __func__, len, RxCurCount(iobase, self), + self->RxLastCount); hwreset(self); return FALSE; } - IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n", - __func__, - st_fifo->len, len - 4, RxCurCount(iobase, self)); + pr_debug("%s(): fifo.len=%x,len=%x,CurCount=%x..\n", + __func__, + st_fifo->len, len - 4, RxCurCount(iobase, self)); st_fifo->entries[st_fifo->tail].status = status; st_fifo->entries[st_fifo->tail].len = len; @@ -1133,8 +1121,8 @@ F01_E */ skb_put(skb, len - 4); skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); - IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__, - len - 4, self->rx_buff.data); + pr_debug("%s(): len=%x.rx_buff=%p\n", __func__, + len - 4, self->rx_buff.data); // Move to next frame self->rx_buff.data += len; @@ -1163,7 +1151,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase) len = GetRecvByte(iobase, self); - IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len); + pr_debug("%s(): len=%x\n", __func__, len); if ((len - 4) < 2) { self->netdev->stats.rx_dropped++; @@ -1248,8 +1236,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase) skb_put(skb, len - 4); skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); - IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__, - len - 4, st_fifo->head); + pr_debug("%s(): len=%x.head=%x\n", __func__, + len - 4, st_fifo->head); // Move to next frame self->rx_buff.data += len; @@ -1262,10 +1250,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase) } //while self->RetryCount = 0; - IRDA_DEBUG(2, - "%s(): End of upload HostStatus=%x,RxStatus=%x\n", - __func__, - GetHostStatus(iobase), GetRXStatus(iobase)); + pr_debug("%s(): End of upload HostStatus=%x,RxStatus=%x\n", + __func__, GetHostStatus(iobase), GetRXStatus(iobase)); /* * if frame is receive complete at this routine ,then upload @@ -1303,12 +1289,12 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id) spin_lock(&self->lock); iHostIntType = GetHostStatus(iobase); - IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n", - __func__, iHostIntType, - (iHostIntType & 0x40) ? "Timer" : "", - (iHostIntType & 0x20) ? "Tx" : "", - (iHostIntType & 0x10) ? "Rx" : "", - (iHostIntType & 0x0e) >> 1); + pr_debug("%s(): iHostIntType %02x: %s %s %s %02x\n", + __func__, iHostIntType, + (iHostIntType & 0x40) ? "Timer" : "", + (iHostIntType & 0x20) ? "Tx" : "", + (iHostIntType & 0x10) ? "Rx" : "", + (iHostIntType & 0x0e) >> 1); if ((iHostIntType & 0x40) != 0) { //Timer Event self->EventFlag.TimeOut++; @@ -1333,12 +1319,12 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id) if ((iHostIntType & 0x20) != 0) { //Tx Event iTxIntType = GetTXStatus(iobase); - IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n", - __func__, iTxIntType, - (iTxIntType & 0x08) ? "FIFO underr." : "", - (iTxIntType & 0x04) ? "EOM" : "", - (iTxIntType & 0x02) ? "FIFO ready" : "", - (iTxIntType & 0x01) ? "Early EOM" : ""); + pr_debug("%s(): iTxIntType %02x: %s %s %s %s\n", + __func__, iTxIntType, + (iTxIntType & 0x08) ? "FIFO underr." : "", + (iTxIntType & 0x04) ? "EOM" : "", + (iTxIntType & 0x02) ? "FIFO ready" : "", + (iTxIntType & 0x01) ? "Early EOM" : ""); if (iTxIntType & 0x4) { self->EventFlag.EOMessage++; // read and will auto clean @@ -1357,17 +1343,17 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id) /* Check if DMA has finished */ iRxIntType = GetRXStatus(iobase); - IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n", - __func__, iRxIntType, - (iRxIntType & 0x80) ? "PHY err." : "", - (iRxIntType & 0x40) ? "CRC err" : "", - (iRxIntType & 0x20) ? "FIFO overr." : "", - (iRxIntType & 0x10) ? "EOF" : "", - (iRxIntType & 0x08) ? "RxData" : "", - (iRxIntType & 0x02) ? "RxMaxLen" : "", - (iRxIntType & 0x01) ? "SIR bad" : ""); + pr_debug("%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n", + __func__, iRxIntType, + (iRxIntType & 0x80) ? "PHY err." : "", + (iRxIntType & 0x40) ? "CRC err" : "", + (iRxIntType & 0x20) ? "FIFO overr." : "", + (iRxIntType & 0x10) ? "EOF" : "", + (iRxIntType & 0x08) ? "RxData" : "", + (iRxIntType & 0x02) ? "RxMaxLen" : "", + (iRxIntType & 0x01) ? "SIR bad" : ""); if (!iRxIntType) - IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__); + pr_debug("%s(): RxIRQ =0\n", __func__); if (iRxIntType & 0x10) { if (via_ircc_dma_receive_complete(self, iobase)) { @@ -1376,10 +1362,9 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id) } } // No ERR else { //ERR - IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n", - __func__, iRxIntType, iHostIntType, - RxCurCount(iobase, self), - self->RxLastCount); + pr_debug("%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n", + __func__, iRxIntType, iHostIntType, + RxCurCount(iobase, self), self->RxLastCount); if (iRxIntType & 0x20) { //FIFO OverRun ERR ResetChip(iobase, 0); @@ -1402,8 +1387,6 @@ static void hwreset(struct via_ircc_cb *self) int iobase; iobase = self->io.fir_base; - IRDA_DEBUG(3, "%s()\n", __func__); - ResetChip(iobase, 5); EnableDMA(iobase, OFF); EnableTX(iobase, OFF); @@ -1447,7 +1430,7 @@ static int via_ircc_is_receiving(struct via_ircc_cb *self) if (CkRxRecv(iobase, self)) status = TRUE; - IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status); + pr_debug("%s(): status=%x....\n", __func__, status); return status; } @@ -1465,16 +1448,14 @@ static int via_ircc_net_open(struct net_device *dev) int iobase; char hwname[32]; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); dev->stats.rx_packets = 0; IRDA_ASSERT(self != NULL, return 0;); iobase = self->io.fir_base; if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) { - IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name, - self->io.irq); + net_warn_ratelimited("%s, unable to allocate irq=%d\n", + driver_name, self->io.irq); return -EAGAIN; } /* @@ -1482,15 +1463,15 @@ static int via_ircc_net_open(struct net_device *dev) * failure. */ if (request_dma(self->io.dma, dev->name)) { - IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name, - self->io.dma); + net_warn_ratelimited("%s, unable to allocate dma=%d\n", + driver_name, self->io.dma); free_irq(self->io.irq, dev); return -EAGAIN; } if (self->io.dma2 != self->io.dma) { if (request_dma(self->io.dma2, dev->name)) { - IRDA_WARNING("%s, unable to allocate dma2=%d\n", - driver_name, self->io.dma2); + net_warn_ratelimited("%s, unable to allocate dma2=%d\n", + driver_name, self->io.dma2); free_irq(self->io.irq, dev); free_dma(self->io.dma); return -EAGAIN; @@ -1532,8 +1513,6 @@ static int via_ircc_net_close(struct net_device *dev) struct via_ircc_cb *self; int iobase; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); @@ -1576,8 +1555,8 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, - cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, + cmd); /* Disable interrupts & save flags */ spin_lock_irqsave(&self->lock, flags); switch (cmd) { diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index a2e556168286..ac39d9f33d5f 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -429,8 +429,8 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr if (rd->buf == NULL || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { if (rd->buf) { - IRDA_ERROR("%s: failed to create PCI-MAP for %p", - __func__, rd->buf); + net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", + __func__, rd->buf); kfree(rd->buf); rd->buf = NULL; } @@ -483,11 +483,8 @@ static int vlsi_create_hwif(vlsi_irda_dev_t *idev) ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr); - if (!ringarea) { - IRDA_ERROR("%s: insufficient memory for descriptor rings\n", - __func__); + if (!ringarea) goto out; - } hwmap = (struct ring_descr_hw *)ringarea; idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1], @@ -559,7 +556,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); len -= crclen; /* remove trailing CRC */ if (len <= 0) { - IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len); + pr_debug("%s: strange frame (len=%d)\n", __func__, len); ret |= VLSI_RX_DROP; goto done; } @@ -574,14 +571,14 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) */ le16_to_cpus(rd->buf+len); if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { - IRDA_DEBUG(0, "%s: crc error\n", __func__); + pr_debug("%s: crc error\n", __func__); ret |= VLSI_RX_CRC; goto done; } } if (!rd->skb) { - IRDA_WARNING("%s: rx packet lost\n", __func__); + net_warn_ratelimited("%s: rx packet lost\n", __func__); ret |= VLSI_RX_DROP; goto done; } @@ -610,8 +607,8 @@ static void vlsi_fill_rx(struct vlsi_ring *r) for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { if (rd_is_active(rd)) { - IRDA_WARNING("%s: driver bug: rx descr race with hw\n", - __func__); + net_warn_ratelimited("%s: driver bug: rx descr race with hw\n", + __func__); vlsi_ring_debug(r); break; } @@ -670,7 +667,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev) if (ring_first(r) == NULL) { /* we are in big trouble, if this should ever happen */ - IRDA_ERROR("%s: rx ring exhausted!\n", __func__); + net_err_ratelimited("%s: rx ring exhausted!\n", __func__); vlsi_ring_debug(r); } else @@ -692,7 +689,7 @@ static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) if (rd_is_active(rd)) { rd_set_status(rd, 0); if (rd_get_count(rd)) { - IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__); + pr_debug("%s - dropping rx packet\n", __func__); ret = -VLSI_RX_DROP; } rd_set_count(rd, 0); @@ -767,7 +764,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) int fifocnt; baudrate = idev->new_baud; - IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); + pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); if (baudrate == 4000000) { mode = IFF_FIR; config = IRCFG_FIR; @@ -783,8 +780,8 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY; switch(baudrate) { default: - IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n", - __func__, baudrate); + net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n", + __func__, baudrate); baudrate = 9600; /* fallthru */ case 2400: @@ -801,7 +798,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; if (fifocnt != 0) { - IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); + pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt); } outw(0, iobase+VLSI_PIO_IRENABLE); @@ -825,14 +822,16 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) config ^= IRENABLE_SIR_ON; if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { - IRDA_WARNING("%s: failed to set %s mode!\n", __func__, - (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR")); + net_warn_ratelimited("%s: failed to set %s mode!\n", + __func__, + mode == IFF_SIR ? "SIR" : + mode == IFF_MIR ? "MIR" : "FIR"); ret = -1; } else { if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { - IRDA_WARNING("%s: failed to apply baudrate %d\n", - __func__, baudrate); + net_warn_ratelimited("%s: failed to apply baudrate %d\n", + __func__, baudrate); ret = -1; } else { @@ -977,8 +976,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, */ if (len >= r->len-5) - IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n", - __func__); + net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n", + __func__); } else { /* hw deals with MIR/FIR mode wrapping */ @@ -1023,7 +1022,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; if (fifocnt != 0) { - IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); + pr_debug("%s: rx fifo not empty(%d)\n", + __func__, fifocnt); } config = inw(iobase+VLSI_PIO_IRCFG); @@ -1035,7 +1035,7 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, if (ring_put(r) == NULL) { netif_stop_queue(ndev); - IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__); + pr_debug("%s: tx ring full - queue stopped\n", __func__); } spin_unlock_irqrestore(&idev->lock, flags); @@ -1044,7 +1044,7 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, drop_unlock: spin_unlock_irqrestore(&idev->lock, flags); drop: - IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg); + net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg); dev_kfree_skb_any(skb); ndev->stats.tx_errors++; ndev->stats.tx_dropped++; @@ -1100,8 +1100,8 @@ static void vlsi_tx_interrupt(struct net_device *ndev) fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; if (fifocnt != 0) { - IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", - __func__, fifocnt); + pr_debug("%s: rx fifo not empty(%d)\n", + __func__, fifocnt); } outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); } @@ -1110,7 +1110,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev) if (netif_queue_stopped(ndev) && !idev->new_baud) { netif_wake_queue(ndev); - IRDA_DEBUG(3, "%s: queue awoken\n", __func__); + pr_debug("%s: queue awoken\n", __func__); } } @@ -1134,7 +1134,7 @@ static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) dev_kfree_skb_any(rd->skb); rd->skb = NULL; } - IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__); + pr_debug("%s - dropping tx packet\n", __func__); ret = -VLSI_TX_DROP; } else @@ -1183,8 +1183,8 @@ static int vlsi_start_clock(struct pci_dev *pdev) } if (count < 3) { if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ - IRDA_ERROR("%s: no PLL or failed to lock!\n", - __func__); + net_err_ratelimited("%s: no PLL or failed to lock!\n", + __func__); clkctl = CLKCTL_CLKSTP; pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); return -1; @@ -1192,8 +1192,8 @@ static int vlsi_start_clock(struct pci_dev *pdev) else /* was: clksrc=0(auto) */ clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ - IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n", - __func__, clksrc); + pr_debug("%s: PLL not locked, fallback to clksrc=%d\n", + __func__, clksrc); } else clksrc = 1; /* got successful PLL lock */ @@ -1265,7 +1265,7 @@ static int vlsi_init_chip(struct pci_dev *pdev) /* start the clock and clean the registers */ if (vlsi_start_clock(pdev)) { - IRDA_ERROR("%s: no valid clock source\n", __func__); + net_err_ratelimited("%s: no valid clock source\n", __func__); return -1; } iobase = ndev->base_addr; @@ -1389,8 +1389,8 @@ static void vlsi_tx_timeout(struct net_device *ndev) idev->new_baud = idev->baud; /* keep current baudrate */ if (vlsi_start_hw(idev)) - IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n", - __func__, pci_name(idev->pdev), ndev->name); + net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n", + __func__, pci_name(idev->pdev), ndev->name); else netif_start_queue(ndev); } @@ -1434,8 +1434,8 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) irq->ifr_receiving = (fifocnt!=0) ? 1 : 0; break; default: - IRDA_WARNING("%s: notsupp - cmd=%04x\n", - __func__, cmd); + net_warn_ratelimited("%s: notsupp - cmd=%04x\n", + __func__, cmd); ret = -EOPNOTSUPP; } @@ -1479,8 +1479,8 @@ static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) spin_unlock_irqrestore(&idev->lock,flags); if (boguscount <= 0) - IRDA_MESSAGE("%s: too much work in interrupt!\n", - __func__); + net_info_ratelimited("%s: too much work in interrupt!\n", + __func__); return IRQ_RETVAL(handled); } @@ -1493,7 +1493,7 @@ static int vlsi_open(struct net_device *ndev) char hwname[32]; if (pci_request_regions(idev->pdev, drivername)) { - IRDA_WARNING("%s: io resource busy\n", __func__); + net_warn_ratelimited("%s: io resource busy\n", __func__); goto errout; } ndev->base_addr = pci_resource_start(idev->pdev,0); @@ -1507,8 +1507,8 @@ static int vlsi_open(struct net_device *ndev) if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, drivername, ndev)) { - IRDA_WARNING("%s: couldn't get IRQ: %d\n", - __func__, ndev->irq); + net_warn_ratelimited("%s: couldn't get IRQ: %d\n", + __func__, ndev->irq); goto errout_io; } @@ -1529,7 +1529,8 @@ static int vlsi_open(struct net_device *ndev) netif_start_queue(ndev); - IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name); + net_info_ratelimited("%s: device %s operational\n", + __func__, ndev->name); return 0; @@ -1563,7 +1564,7 @@ static int vlsi_close(struct net_device *ndev) pci_release_regions(idev->pdev); - IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name); + net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name); return 0; } @@ -1590,7 +1591,8 @@ static int vlsi_irda_init(struct net_device *ndev) if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { - IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__); + net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n", + __func__); return -1; } @@ -1632,19 +1634,19 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) else pdev->current_state = 0; /* hw must be running now */ - IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n", - drivername, pci_name(pdev)); + net_info_ratelimited("%s: IrDA PCI controller %s detected\n", + drivername, pci_name(pdev)); if ( !pci_resource_start(pdev,0) || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { - IRDA_ERROR("%s: bar 0 invalid", __func__); + net_err_ratelimited("%s: bar 0 invalid", __func__); goto out_disable; } ndev = alloc_irdadev(sizeof(*idev)); if (ndev==NULL) { - IRDA_ERROR("%s: Unable to allocate device memory.\n", - __func__); + net_err_ratelimited("%s: Unable to allocate device memory.\n", + __func__); goto out_disable; } @@ -1659,7 +1661,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_freedev; if (register_netdev(ndev) < 0) { - IRDA_ERROR("%s: register_netdev failed\n", __func__); + net_err_ratelimited("%s: register_netdev failed\n", __func__); goto out_freedev; } @@ -1669,14 +1671,15 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root, VLSI_PROC_FOPS, ndev); if (!ent) { - IRDA_WARNING("%s: failed to create proc entry\n", - __func__); + net_warn_ratelimited("%s: failed to create proc entry\n", + __func__); } else { proc_set_size(ent, 0); } idev->proc_entry = ent; } - IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name); + net_info_ratelimited("%s: registered device %s\n", + drivername, ndev->name); pci_set_drvdata(pdev, ndev); mutex_unlock(&idev->mtx); @@ -1698,7 +1701,7 @@ static void vlsi_irda_remove(struct pci_dev *pdev) vlsi_irda_dev_t *idev; if (!ndev) { - IRDA_ERROR("%s: lost netdevice?\n", drivername); + net_err_ratelimited("%s: lost netdevice?\n", drivername); return; } @@ -1714,7 +1717,7 @@ static void vlsi_irda_remove(struct pci_dev *pdev) free_netdev(ndev); - IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev)); + net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev)); } #ifdef CONFIG_PM @@ -1733,8 +1736,8 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) vlsi_irda_dev_t *idev; if (!ndev) { - IRDA_ERROR("%s - %s: no netdevice\n", - __func__, pci_name(pdev)); + net_err_ratelimited("%s - %s: no netdevice\n", + __func__, pci_name(pdev)); return 0; } idev = netdev_priv(ndev); @@ -1745,7 +1748,9 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) pdev->current_state = state.event; } else - IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event); + net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n", + __func__, pci_name(pdev), + pdev->current_state, state.event); mutex_unlock(&idev->mtx); return 0; } @@ -1772,16 +1777,16 @@ static int vlsi_irda_resume(struct pci_dev *pdev) vlsi_irda_dev_t *idev; if (!ndev) { - IRDA_ERROR("%s - %s: no netdevice\n", - __func__, pci_name(pdev)); + net_err_ratelimited("%s - %s: no netdevice\n", + __func__, pci_name(pdev)); return 0; } idev = netdev_priv(ndev); mutex_lock(&idev->mtx); if (pdev->current_state == 0) { mutex_unlock(&idev->mtx); - IRDA_WARNING("%s - %s: already resumed\n", - __func__, pci_name(pdev)); + net_warn_ratelimited("%s - %s: already resumed\n", + __func__, pci_name(pdev)); return 0; } @@ -1800,7 +1805,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev) * now we explicitly set pdev->current_state = 0 after enabling the * device and independently resume_ok should catch any garbage config. */ - IRDA_WARNING("%s - hm, nothing to resume?\n", __func__); + net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__); mutex_unlock(&idev->mtx); return 0; } @@ -1837,7 +1842,8 @@ static int __init vlsi_mod_init(void) int i, ret; if (clksrc < 0 || clksrc > 3) { - IRDA_ERROR("%s: invalid clksrc=%d\n", drivername, clksrc); + net_err_ratelimited("%s: invalid clksrc=%d\n", + drivername, clksrc); return -1; } @@ -1850,7 +1856,10 @@ static int __init vlsi_mod_init(void) case 64: break; default: - IRDA_WARNING("%s: invalid %s ringsize %d, using default=8", drivername, (i)?"rx":"tx", ringsize[i]); + net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n", + drivername, + i ? "rx" : "tx", + ringsize[i]); ringsize[i] = 8; break; } diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h index 56399204e68c..f9119c6d2a09 100644 --- a/drivers/net/irda/vlsi_ir.h +++ b/drivers/net/irda/vlsi_ir.h @@ -615,7 +615,8 @@ static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s) */ if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) { - IRDA_ERROR("%s: pci busaddr inconsistency!\n", __func__); + net_err_ratelimited("%s: pci busaddr inconsistency!\n", + __func__); dump_stack(); return; } diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 11dbdf36d9c1..4e3d2e7c697c 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -110,8 +110,6 @@ static int __init w83977af_init(void) { int i; - IRDA_DEBUG(0, "%s()\n", __func__ ); - for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) { if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) return 0; @@ -129,8 +127,6 @@ static void __exit w83977af_cleanup(void) { int i; - IRDA_DEBUG(4, "%s()\n", __func__ ); - for (i=0; i < ARRAY_SIZE(dev_self); i++) { if (dev_self[i]) w83977af_close(dev_self[i]); @@ -157,12 +153,10 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq, struct w83977af_ir *self; int err; - IRDA_DEBUG(0, "%s()\n", __func__ ); - /* Lock the port that we need */ if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) { - IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n", - __func__ , iobase); + pr_debug("%s(), can't get iobase of 0x%03x\n", + __func__ , iobase); return -ENODEV; } @@ -236,10 +230,11 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq, err = register_netdev(dev); if (err) { - IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__); + net_err_ratelimited("%s(), register_netdevice() failed!\n", + __func__); goto err_out3; } - IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); + net_info_ratelimited("IrDA: Registered device %s\n", dev->name); /* Need to store self somewhere */ dev_self[i] = self; @@ -268,8 +263,6 @@ static int w83977af_close(struct w83977af_ir *self) { int iobase; - IRDA_DEBUG(0, "%s()\n", __func__ ); - iobase = self->io.fir_base; #ifdef CONFIG_USE_W977_PNP @@ -288,8 +281,8 @@ static int w83977af_close(struct w83977af_ir *self) unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ - IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n", - __func__ , self->io.fir_base); + pr_debug("%s(), Releasing Region %03x\n", + __func__ , self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) @@ -311,7 +304,6 @@ static int w83977af_probe(int iobase, int irq, int dma) int i; for (i=0; i < 2; i++) { - IRDA_DEBUG( 0, "%s()\n", __func__ ); #ifdef CONFIG_USE_W977_PNP /* Enter PnP configuration mode */ w977_efm_enter(efbase[i]); @@ -392,13 +384,13 @@ static int w83977af_probe(int iobase, int irq, int dma) switch_bank(iobase, SET7); outb(0x40, iobase+7); - IRDA_MESSAGE("W83977AF (IR) driver loaded. " - "Version: 0x%02x\n", version); + net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n", + version); return 0; } else { /* Try next extented function register address */ - IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ ); + pr_debug("%s(), Wrong chip version", __func__); } } return -1; @@ -434,19 +426,19 @@ static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed) case 115200: outb(0x01, iobase+ABLL); break; case 576000: ir_mode = HCR_MIR_576; - IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ ); + pr_debug("%s(), handling baud of 576000\n", __func__); break; case 1152000: ir_mode = HCR_MIR_1152; - IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ ); + pr_debug("%s(), handling baud of 1152000\n", __func__); break; case 4000000: ir_mode = HCR_FIR; - IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ ); + pr_debug("%s(), handling baud of 4000000\n", __func__); break; default: ir_mode = HCR_FIR; - IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed); + pr_debug("%s(), unknown baud rate of %d\n", __func__ , speed); break; } @@ -497,8 +489,8 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb, iobase = self->io.fir_base; - IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies, - (int) skb->len); + pr_debug("%s(%ld), skb->len=%d\n", __func__ , jiffies, + (int)skb->len); /* Lock transmit buffer */ netif_stop_queue(dev); @@ -525,7 +517,7 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb, self->tx_buff.len = skb->len; mtt = irda_get_mtt(skb); - IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); + pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); if (mtt) udelay(mtt); @@ -559,7 +551,7 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb, static void w83977af_dma_write(struct w83977af_ir *self, int iobase) { __u8 set; - IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len); + pr_debug("%s(), len=%d\n", __func__ , self->tx_buff.len); /* Save current set */ set = inb(iobase+SSR); @@ -594,19 +586,16 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size) int actual = 0; __u8 set; - IRDA_DEBUG(4, "%s()\n", __func__ ); - /* Save current bank */ set = inb(iobase+SSR); switch_bank(iobase, SET0); if (!(inb_p(iobase+USR) & USR_TSRE)) { - IRDA_DEBUG(4, - "%s(), warning, FIFO not empty yet!\n", __func__ ); + pr_debug("%s(), warning, FIFO not empty yet!\n", __func__); fifo_size -= 17; - IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n", - __func__ , fifo_size); + pr_debug("%s(), %d bytes left in tx fifo\n", + __func__ , fifo_size); } /* Fill FIFO with current frame */ @@ -615,8 +604,8 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size) outb(buf[actual++], iobase+TBR); } - IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", - __func__ , fifo_size, actual, len); + pr_debug("%s(), fifo_size %d ; %d sent of %d\n", + __func__ , fifo_size, actual, len); /* Restore bank */ outb(set, iobase+SSR); @@ -636,7 +625,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self) int iobase; __u8 set; - IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies); + pr_debug("%s(%ld)\n", __func__ , jiffies); IRDA_ASSERT(self != NULL, return;); @@ -651,7 +640,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self) /* Check for underrun! */ if (inb(iobase+AUDR) & AUDR_UNDR) { - IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ ); + pr_debug("%s(), Transmit underrun!\n", __func__); self->netdev->stats.tx_errors++; self->netdev->stats.tx_fifo_errors++; @@ -692,7 +681,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self) #endif IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(4, "%s\n", __func__ ); + pr_debug("%s\n", __func__); iobase= self->io.fir_base; @@ -763,7 +752,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self) __u8 set; __u8 status; - IRDA_DEBUG(4, "%s\n", __func__ ); + pr_debug("%s\n", __func__); st_fifo = &self->st_fifo; @@ -880,8 +869,6 @@ static void w83977af_pio_receive(struct w83977af_ir *self) __u8 byte = 0x00; int iobase; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); iobase = self->io.fir_base; @@ -907,7 +894,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr) __u8 set; int iobase; - IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr); + pr_debug("%s(), isr=%#x\n", __func__ , isr); iobase = self->io.fir_base; /* Transmit FIFO low on data */ @@ -943,8 +930,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr) if (isr & ISR_TXEMP_I) { /* Check if we need to change the speed? */ if (self->new_speed) { - IRDA_DEBUG(2, - "%s(), Changing speed!\n", __func__ ); + pr_debug("%s(), Changing speed!\n", __func__); w83977af_change_speed(self, self->new_speed); self->new_speed = 0; } @@ -1126,7 +1112,6 @@ static int w83977af_net_open(struct net_device *dev) char hwname[32]; __u8 set; - IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); @@ -1189,8 +1174,6 @@ static int w83977af_net_close(struct net_device *dev) int iobase; __u8 set; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); @@ -1244,7 +1227,7 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd); spin_lock_irqsave(&self->lock, flags); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 29b3bb410781..612e0731142d 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -272,7 +272,7 @@ static void macvlan_process_broadcast(struct work_struct *w) struct sk_buff *skb; struct sk_buff_head list; - skb_queue_head_init(&list); + __skb_queue_head_init(&list); spin_lock_bh(&port->bc_queue.lock); skb_queue_splice_tail_init(&port->bc_queue, &list); @@ -742,11 +742,12 @@ static struct lock_class_key macvlan_netdev_xmit_lock_key; static struct lock_class_key macvlan_netdev_addr_lock_key; #define ALWAYS_ON_FEATURES \ - (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX) + (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \ + NETIF_F_GSO_ROBUST) #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ - NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) @@ -783,6 +784,7 @@ static int macvlan_init(struct net_device *dev) (lowerdev->state & MACVLAN_STATE_MASK); dev->features = lowerdev->features & MACVLAN_FEATURES; dev->features |= ALWAYS_ON_FEATURES; + dev->hw_features |= NETIF_F_LRO; dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->gso_max_size = lowerdev->gso_max_size; dev->iflink = lowerdev->ifindex; @@ -872,7 +874,7 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev, static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, + const unsigned char *addr, u16 vid, u16 flags) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -897,7 +899,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr) + const unsigned char *addr, u16 vid) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; @@ -935,15 +937,15 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, netdev_features_t features) { struct macvlan_dev *vlan = netdev_priv(dev); + netdev_features_t lowerdev_features = vlan->lowerdev->features; netdev_features_t mask; features |= NETIF_F_ALL_FOR_ALL; features &= (vlan->set_features | ~MACVLAN_FEATURES); mask = features; - features = netdev_increment_features(vlan->lowerdev->features, - features, - mask); + lowerdev_features &= (features | ~NETIF_F_LRO); + features = netdev_increment_features(lowerdev_features, features, mask); features |= ALWAYS_ON_FEATURES; features &= ~NETIF_F_NETNS_LOCAL; @@ -1055,6 +1057,9 @@ static int macvlan_port_create(struct net_device *dev) if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) return -EINVAL; + if (netif_is_ipvlan_port(dev)) + return -EBUSY; + port = kzalloc(sizeof(*port), GFP_KERNEL); if (port == NULL) return -ENOMEM; @@ -1082,9 +1087,15 @@ static void macvlan_port_destroy(struct net_device *dev) { struct macvlan_port *port = macvlan_port_get_rtnl(dev); - cancel_work_sync(&port->bc_work); dev->priv_flags &= ~IFF_MACVLAN_PORT; netdev_rx_handler_unregister(dev); + + /* After this point, no packet can schedule bc_work anymore, + * but we need to cancel it and purge left skbs if any. + */ + cancel_work_sync(&port->bc_work); + __skb_queue_purge(&port->bc_queue); + kfree_rcu(port, rcu); } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 65e2892342bd..ba1e5db2152e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -15,7 +15,9 @@ #include <linux/cdev.h> #include <linux/idr.h> #include <linux/fs.h> +#include <linux/uio.h> +#include <net/ipv6.h> #include <net/net_namespace.h> #include <net/rtnetlink.h> #include <net/sock.h> @@ -65,7 +67,7 @@ static struct cdev macvtap_cdev; static const struct proto_ops macvtap_socket_ops; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6 | NETIF_F_UFO) + NETIF_F_TSO6) #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) @@ -569,7 +571,11 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb, gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: + pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n", + current->comm); gso_type = SKB_GSO_UDP; + if (skb->protocol == htons(ETH_P_IPV6)) + ipv6_proxy_select_ident(skb); break; default: return -EINVAL; @@ -614,8 +620,6 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (sinfo->gso_type & SKB_GSO_UDP) - vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (sinfo->gso_type & SKB_GSO_TCP_ECN) @@ -626,6 +630,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; vnet_hdr->csum_start = skb_checksum_start_offset(skb); + if (vlan_tx_tag_present(skb)) + vnet_hdr->csum_start += VLAN_HLEN; vnet_hdr->csum_offset = skb->csum_offset; } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; @@ -634,12 +640,12 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, /* Get packet from user space buffer */ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, - const struct iovec *iv, unsigned long total_len, - size_t count, int noblock) + struct iov_iter *from, int noblock) { int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); struct sk_buff *skb; struct macvlan_dev *vlan; + unsigned long total_len = iov_iter_count(from); unsigned long len = total_len; int err; struct virtio_net_hdr vnet_hdr = { 0 }; @@ -647,6 +653,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, int copylen = 0; bool zerocopy = false; size_t linear; + ssize_t n; if (q->flags & IFF_VNET_HDR) { vnet_hdr_len = q->vnet_hdr_sz; @@ -656,10 +663,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, goto err; len -= vnet_hdr_len; - err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, - sizeof(vnet_hdr)); - if (err < 0) + err = -EFAULT; + n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from); + if (n != sizeof(vnet_hdr)) goto err; + iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > vnet_hdr.hdr_len) @@ -674,17 +682,16 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (unlikely(len < ETH_HLEN)) goto err; - err = -EMSGSIZE; - if (unlikely(count > UIO_MAXIOV)) - goto err; - if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { + struct iov_iter i; + copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; if (copylen > good_linear) copylen = good_linear; linear = copylen; - if (iov_pages(iv, vnet_hdr_len + copylen, count) - <= MAX_SKB_FRAGS) + i = *from; + iov_iter_advance(&i, copylen); + if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) zerocopy = true; } @@ -702,10 +709,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, goto err; if (zerocopy) - err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); + err = zerocopy_sg_from_iter(skb, from); else { - err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, - len); + err = skb_copy_datagram_from_iter(skb, 0, from, len); if (!err && m && m->msg_control) { struct ubuf_info *uarg = m->msg_control; uarg->callback(uarg, false); @@ -758,46 +764,42 @@ err: return err; } -static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv, - unsigned long count, loff_t pos) +static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; - ssize_t result = -ENOLINK; struct macvtap_queue *q = file->private_data; - result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count, - file->f_flags & O_NONBLOCK); - return result; + return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); } /* Put packet to the user space buffer */ static ssize_t macvtap_put_user(struct macvtap_queue *q, const struct sk_buff *skb, - const struct iovec *iv, int len) + struct iov_iter *iter) { int ret; int vnet_hdr_len = 0; int vlan_offset = 0; - int copied, total; + int total; if (q->flags & IFF_VNET_HDR) { struct virtio_net_hdr vnet_hdr; vnet_hdr_len = q->vnet_hdr_sz; - if ((len -= vnet_hdr_len) < 0) + if (iov_iter_count(iter) < vnet_hdr_len) return -EINVAL; macvtap_skb_to_vnet_hdr(skb, &vnet_hdr); - if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) + if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != + sizeof(vnet_hdr)) return -EFAULT; + + iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); } - total = copied = vnet_hdr_len; + total = vnet_hdr_len; total += skb->len; - if (!vlan_tx_tag_present(skb)) - len = min_t(int, skb->len, len); - else { - int copy; + if (vlan_tx_tag_present(skb)) { struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; @@ -806,86 +808,77 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); - len = min_t(int, skb->len + VLAN_HLEN, len); total += VLAN_HLEN; - copy = min_t(int, vlan_offset, len); - ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); - len -= copy; - copied += copy; - if (ret || !len) + ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); + if (ret || !iov_iter_count(iter)) goto done; - copy = min_t(int, sizeof(veth), len); - ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); - len -= copy; - copied += copy; - if (ret || !len) + ret = copy_to_iter(&veth, sizeof(veth), iter); + if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } - ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); + ret = skb_copy_datagram_iter(skb, vlan_offset, iter, + skb->len - vlan_offset); done: return ret ? ret : total; } static ssize_t macvtap_do_read(struct macvtap_queue *q, - const struct iovec *iv, unsigned long len, + struct iov_iter *to, int noblock) { DEFINE_WAIT(wait); struct sk_buff *skb; ssize_t ret = 0; - while (len) { + if (!iov_iter_count(to)) + return 0; + + while (1) { if (!noblock) prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE); /* Read frames from the queue */ skb = skb_dequeue(&q->sk.sk_receive_queue); - if (!skb) { - if (noblock) { - ret = -EAGAIN; - break; - } - if (signal_pending(current)) { - ret = -ERESTARTSYS; - break; - } - /* Nothing to read, let's sleep */ - schedule(); - continue; + if (skb) + break; + if (noblock) { + ret = -EAGAIN; + break; } - ret = macvtap_put_user(q, skb, iv, len); - kfree_skb(skb); - break; + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + /* Nothing to read, let's sleep */ + schedule(); + } + if (skb) { + ret = macvtap_put_user(q, skb, to); + if (unlikely(ret < 0)) + kfree_skb(skb); + else + consume_skb(skb); } - if (!noblock) finish_wait(sk_sleep(&q->sk), &wait); return ret; } -static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv, - unsigned long count, loff_t pos) +static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct macvtap_queue *q = file->private_data; - ssize_t len, ret = 0; + ssize_t len = iov_iter_count(to), ret; - len = iov_length(iv, count); - if (len < 0) { - ret = -EINVAL; - goto out; - } - - ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK); + ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; -out: return ret; } @@ -950,9 +943,6 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) if (arg & TUN_F_TSO6) feature_mask |= NETIF_F_TSO6; } - - if (arg & TUN_F_UFO) - feature_mask |= NETIF_F_UFO; } /* tun/tap driver inverts the usage for TSO offloads, where @@ -963,7 +953,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) * When user space turns off TSO, we turn off GSO/LRO so that * user-space will not receive TSO frames. */ - if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) + if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) features |= RX_OFFLOADS; else features &= ~RX_OFFLOADS; @@ -1064,7 +1054,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN | TUN_F_UFO)) + TUN_F_TSO_ECN)) return -EINVAL; rtnl_lock(); @@ -1089,8 +1079,10 @@ static const struct file_operations macvtap_fops = { .owner = THIS_MODULE, .open = macvtap_open, .release = macvtap_release, - .aio_read = macvtap_aio_read, - .aio_write = macvtap_aio_write, + .read = new_sync_read, + .write = new_sync_write, + .read_iter = macvtap_read_iter, + .write_iter = macvtap_write_iter, .poll = macvtap_poll, .llseek = no_llseek, .unlocked_ioctl = macvtap_ioctl, @@ -1103,8 +1095,9 @@ static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); - return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen, - m->msg_flags & MSG_DONTWAIT); + struct iov_iter from; + iov_iter_init(&from, WRITE, m->msg_iov, m->msg_iovlen, total_len); + return macvtap_get_user(q, m, &from, m->msg_flags & MSG_DONTWAIT); } static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, @@ -1112,11 +1105,12 @@ static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, int flags) { struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); + struct iov_iter to; int ret; if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) return -EINVAL; - ret = macvtap_do_read(q, m->msg_iov, total_len, - flags & MSG_DONTWAIT); + iov_iter_init(&to, READ, m->msg_iov, m->msg_iovlen, total_len); + ret = macvtap_do_read(q, &to, flags & MSG_DONTWAIT); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 75472cf734de..b4b0f804e84c 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -26,7 +26,7 @@ config AMD_PHY config AMD_XGBE_PHY tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs" - depends on OF + depends on OF && HAS_IOMEM ---help--- Currently supports the AMD 10GbE PHY diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index c456559f6e7f..903dc3dc9ea7 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c @@ -992,7 +992,8 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev) if (ret & MDIO_CTRL1_RESET) return -ETIMEDOUT; - return 0; + /* Make sure the XPCS and SerDes are in compatible states */ + return amd_xgbe_phy_xgmii_mode(phydev); } static int amd_xgbe_phy_config_init(struct phy_device *phydev) @@ -1467,20 +1468,7 @@ static struct phy_driver amd_xgbe_phy_driver[] = { }, }; -static int __init amd_xgbe_phy_init(void) -{ - return phy_drivers_register(amd_xgbe_phy_driver, - ARRAY_SIZE(amd_xgbe_phy_driver)); -} - -static void __exit amd_xgbe_phy_exit(void) -{ - phy_drivers_unregister(amd_xgbe_phy_driver, - ARRAY_SIZE(amd_xgbe_phy_driver)); -} - -module_init(amd_xgbe_phy_init); -module_exit(amd_xgbe_phy_exit); +module_phy_driver(amd_xgbe_phy_driver); static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = { { XGBE_PHY_ID, XGBE_PHY_MASK }, diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index a3fb5ceb6487..65a488f82eb8 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -61,7 +61,7 @@ static int am79c_config_intr(struct phy_device *phydev) return err; } -static struct phy_driver am79c_driver = { +static struct phy_driver am79c_driver[] = { { .phy_id = PHY_ID_AM79C874, .name = "AM79C874", .phy_id_mask = 0xfffffff0, @@ -73,20 +73,9 @@ static struct phy_driver am79c_driver = { .ack_interrupt = am79c_ack_interrupt, .config_intr = am79c_config_intr, .driver = { .owner = THIS_MODULE,}, -}; - -static int __init am79c_init(void) -{ - return phy_driver_register(&am79c_driver); -} - -static void __exit am79c_exit(void) -{ - phy_driver_unregister(&am79c_driver); -} +} }; -module_init(am79c_init); -module_exit(am79c_exit); +module_phy_driver(am79c_driver); static struct mdio_device_id __maybe_unused amd_tbl[] = { { PHY_ID_AM79C874, 0xfffffff0 }, diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index fdc1b418fa6a..f80e19ac6704 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -352,19 +352,7 @@ static struct phy_driver at803x_driver[] = { }, } }; -static int __init atheros_init(void) -{ - return phy_drivers_register(at803x_driver, - ARRAY_SIZE(at803x_driver)); -} - -static void __exit atheros_exit(void) -{ - phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver)); -} - -module_init(atheros_init); -module_exit(atheros_exit); +module_phy_driver(at803x_driver); static struct mdio_device_id __maybe_unused atheros_tbl[] = { { ATH8030_PHY_ID, 0xffffffef }, diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index ac55b0807853..830ec31f952f 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -100,20 +100,7 @@ static struct phy_driver bcm63xx_driver[] = { .driver = { .owner = THIS_MODULE }, } }; -static int __init bcm63xx_phy_init(void) -{ - return phy_drivers_register(bcm63xx_driver, - ARRAY_SIZE(bcm63xx_driver)); -} - -static void __exit bcm63xx_phy_exit(void) -{ - phy_drivers_unregister(bcm63xx_driver, - ARRAY_SIZE(bcm63xx_driver)); -} - -module_init(bcm63xx_phy_init); -module_exit(bcm63xx_phy_exit); +module_phy_driver(bcm63xx_driver); static struct mdio_device_id __maybe_unused bcm63xx_tbl[] = { { 0x00406000, 0xfffffc00 }, diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 1d211d369039..974ec4515269 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -39,45 +39,15 @@ #define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0) #define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1) +#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2) #define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3) #define AFE_TX_CONFIG MISC_ADDR(0x39, 0) +#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1) +#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3) #define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0) #define CORE_EXPB0 0xb0 -static int bcm7445_config_init(struct phy_device *phydev) -{ - int ret; - const struct bcm7445_regs { - int reg; - u16 value; - } bcm7445_regs_cfg[] = { - /* increases ADC latency by 24ns */ - { MII_BCM54XX_EXP_SEL, 0x0038 }, - { MII_BCM54XX_EXP_DATA, 0xAB95 }, - /* increases internal 1V LDO voltage by 5% */ - { MII_BCM54XX_EXP_SEL, 0x2038 }, - { MII_BCM54XX_EXP_DATA, 0xBB22 }, - /* reduce RX low pass filter corner frequency */ - { MII_BCM54XX_EXP_SEL, 0x6038 }, - { MII_BCM54XX_EXP_DATA, 0xFFC5 }, - /* reduce RX high pass filter corner frequency */ - { MII_BCM54XX_EXP_SEL, 0x003a }, - { MII_BCM54XX_EXP_DATA, 0x2002 }, - }; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(bcm7445_regs_cfg); i++) { - ret = phy_write(phydev, - bcm7445_regs_cfg[i].reg, - bcm7445_regs_cfg[i].value); - if (ret) - return ret; - } - - return 0; -} - static void phy_write_exp(struct phy_device *phydev, u16 reg, u16 value) { @@ -102,7 +72,16 @@ static void phy_write_misc(struct phy_device *phydev, phy_write(phydev, MII_BCM54XX_EXP_DATA, value); } -static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev) +static void r_rc_cal_reset(struct phy_device *phydev) +{ + /* Reset R_CAL/RC_CAL Engine */ + phy_write_exp(phydev, 0x00b0, 0x0010); + + /* Disable Reset R_AL/RC_CAL Engine */ + phy_write_exp(phydev, 0x00b0, 0x0000); +} + +static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) { /* Increase VCO range to prevent unlocking problem of PLL at low * temp @@ -123,11 +102,7 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev) /* Switch to CORE_BASE1E */ phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd); - /* Reset R_CAL/RC_CAL Engine */ - phy_write_exp(phydev, CORE_EXPB0, 0x0010); - - /* Disable Reset R_CAL/RC_CAL Engine */ - phy_write_exp(phydev, CORE_EXPB0, 0x0000); + r_rc_cal_reset(phydev); /* write AFE_RXCONFIG_0 */ phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19); @@ -147,6 +122,71 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev) return 0; } +static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev) +{ + /* AFE_RXCONFIG_0 */ + phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb15); + + /* AFE_RXCONFIG_1 */ + phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f); + + /* AFE_RXCONFIG_2, set rCal offset for HT=0 code and LT=-2 code */ + phy_write_misc(phydev, AFE_RXCONFIG_2, 0x2003); + + /* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */ + phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0); + + /* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */ + phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061); + + /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */ + phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da); + + /* AFE_VDAC_OTHERS_0, set 1000BT Cidac=010 for all ports */ + phy_write_misc(phydev, AFE_VDAC_OTHERS_0, 0xa020); + + /* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal + * offset for HT=0 code + */ + phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3); + + /* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */ + phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010); + + /* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */ + phy_write_misc(phydev, DSP_TAP10, 0x011b); + + /* Reset R_CAL/RC_CAL engine */ + r_rc_cal_reset(phydev); + + return 0; +} + +static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev) +{ + /* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */ + phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f); + + /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */ + phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da); + + /* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal + * offset for HT=0 code + */ + phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3); + + /* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */ + phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010); + + /* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */ + phy_write_misc(phydev, DSP_TAP10, 0x011b); + + /* Reset R_CAL/RC_CAL engine */ + r_rc_cal_reset(phydev); + + return 0; +} + static int bcm7xxx_apd_enable(struct phy_device *phydev) { int val; @@ -200,15 +240,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) u8 patch = PHY_BRCM_7XXX_PATCH(phydev->dev_flags); int ret = 0; - dev_info(&phydev->dev, "PHY revision: 0x%02x, patch: %d\n", rev, patch); + pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n", + dev_name(&phydev->dev), phydev->drv->name, rev, patch); switch (rev) { - case 0xa0: case 0xb0: - ret = bcm7445_config_init(phydev); + ret = bcm7xxx_28nm_b0_afe_config_init(phydev); + break; + case 0xd0: + ret = bcm7xxx_28nm_d0_afe_config_init(phydev); + break; + case 0xe0: + case 0xf0: + /* Rev G0 introduces a roll over */ + case 0x10: + ret = bcm7xxx_28nm_e0_plus_afe_config_init(phydev); break; default: - ret = bcm7xxx_28nm_afe_config_init(phydev); break; } @@ -336,7 +384,7 @@ static int bcm7xxx_dummy_config_init(struct phy_device *phydev) .features = PHY_GBIT_FEATURES | \ SUPPORTED_Pause | SUPPORTED_Asym_Pause, \ .flags = PHY_IS_INTERNAL, \ - .config_init = bcm7xxx_28nm_afe_config_init, \ + .config_init = bcm7xxx_28nm_config_init, \ .config_aneg = genphy_config_aneg, \ .read_status = genphy_read_status, \ .resume = bcm7xxx_28nm_resume, \ @@ -416,20 +464,7 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { } }; -static int __init bcm7xxx_phy_init(void) -{ - return phy_drivers_register(bcm7xxx_driver, - ARRAY_SIZE(bcm7xxx_driver)); -} - -static void __exit bcm7xxx_phy_exit(void) -{ - phy_drivers_unregister(bcm7xxx_driver, - ARRAY_SIZE(bcm7xxx_driver)); -} - -module_init(bcm7xxx_phy_init); -module_exit(bcm7xxx_phy_exit); +module_phy_driver(bcm7xxx_driver); MODULE_DEVICE_TABLE(mdio, bcm7xxx_tbl); diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index 799789518e87..1eca20452f03 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -216,18 +216,6 @@ static struct phy_driver bcm87xx_driver[] = { .driver = { .owner = THIS_MODULE }, } }; -static int __init bcm87xx_init(void) -{ - return phy_drivers_register(bcm87xx_driver, - ARRAY_SIZE(bcm87xx_driver)); -} -module_init(bcm87xx_init); - -static void __exit bcm87xx_exit(void) -{ - phy_drivers_unregister(bcm87xx_driver, - ARRAY_SIZE(bcm87xx_driver)); -} -module_exit(bcm87xx_exit); +module_phy_driver(bcm87xx_driver); MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 854f2c9a7b2b..a52afb26421b 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -654,20 +654,7 @@ static struct phy_driver broadcom_drivers[] = { .driver = { .owner = THIS_MODULE }, } }; -static int __init broadcom_init(void) -{ - return phy_drivers_register(broadcom_drivers, - ARRAY_SIZE(broadcom_drivers)); -} - -static void __exit broadcom_exit(void) -{ - phy_drivers_unregister(broadcom_drivers, - ARRAY_SIZE(broadcom_drivers)); -} - -module_init(broadcom_init); -module_exit(broadcom_exit); +module_phy_driver(broadcom_drivers); static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM5411, 0xfffffff0 }, diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index b57ce0cc9657..27f5464899d4 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -129,20 +129,7 @@ static struct phy_driver cis820x_driver[] = { .driver = { .owner = THIS_MODULE,}, } }; -static int __init cicada_init(void) -{ - return phy_drivers_register(cis820x_driver, - ARRAY_SIZE(cis820x_driver)); -} - -static void __exit cicada_exit(void) -{ - phy_drivers_unregister(cis820x_driver, - ARRAY_SIZE(cis820x_driver)); -} - -module_init(cicada_init); -module_exit(cicada_exit); +module_phy_driver(cis820x_driver); static struct mdio_device_id __maybe_unused cicada_tbl[] = { { 0x000fc410, 0x000ffff0 }, diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index d2c08f625a41..0d16c7d9e1bf 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -182,20 +182,7 @@ static struct phy_driver dm91xx_driver[] = { .driver = { .owner = THIS_MODULE,}, } }; -static int __init davicom_init(void) -{ - return phy_drivers_register(dm91xx_driver, - ARRAY_SIZE(dm91xx_driver)); -} - -static void __exit davicom_exit(void) -{ - phy_drivers_unregister(dm91xx_driver, - ARRAY_SIZE(dm91xx_driver)); -} - -module_init(davicom_init); -module_exit(davicom_exit); +module_phy_driver(dm91xx_driver); static struct mdio_device_id __maybe_unused davicom_tbl[] = { { 0x0181b880, 0x0ffffff0 }, diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 2954052706e8..e22e602beef3 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) switch (type & PTP_CLASS_PMASK) { case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; break; case PTP_CLASS_IPV6: offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; @@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type) switch (type & PTP_CLASS_PMASK) { case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; break; case PTP_CLASS_IPV6: offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c index a8eb19ec3183..a907743816a8 100644 --- a/drivers/net/phy/et1011c.c +++ b/drivers/net/phy/et1011c.c @@ -87,7 +87,7 @@ static int et1011c_read_status(struct phy_device *phydev) return ret; } -static struct phy_driver et1011c_driver = { +static struct phy_driver et1011c_driver[] = { { .phy_id = 0x0282f014, .name = "ET1011C", .phy_id_mask = 0xfffffff0, @@ -96,20 +96,9 @@ static struct phy_driver et1011c_driver = { .config_aneg = et1011c_config_aneg, .read_status = et1011c_read_status, .driver = { .owner = THIS_MODULE,}, -}; - -static int __init et1011c_init(void) -{ - return phy_driver_register(&et1011c_driver); -} - -static void __exit et1011c_exit(void) -{ - phy_driver_unregister(&et1011c_driver); -} +} }; -module_init(et1011c_init); -module_exit(et1011c_exit); +module_phy_driver(et1011c_driver); static struct mdio_device_id __maybe_unused et1011c_tbl[] = { { 0x0282f014, 0xfffffff0 }, diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 97bf58bf4939..8644f039d922 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -253,20 +253,7 @@ static struct phy_driver icplus_driver[] = { .driver = { .owner = THIS_MODULE,}, } }; -static int __init icplus_init(void) -{ - return phy_drivers_register(icplus_driver, - ARRAY_SIZE(icplus_driver)); -} - -static void __exit icplus_exit(void) -{ - phy_drivers_unregister(icplus_driver, - ARRAY_SIZE(icplus_driver)); -} - -module_init(icplus_init); -module_exit(icplus_exit); +module_phy_driver(icplus_driver); static struct mdio_device_id __maybe_unused icplus_tbl[] = { { 0x02430d80, 0x0ffffff0 }, diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index 9108f3191701..a3a5a703635b 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -312,20 +312,7 @@ static struct phy_driver lxt97x_driver[] = { .driver = { .owner = THIS_MODULE,}, } }; -static int __init lxt_init(void) -{ - return phy_drivers_register(lxt97x_driver, - ARRAY_SIZE(lxt97x_driver)); -} - -static void __exit lxt_exit(void) -{ - phy_drivers_unregister(lxt97x_driver, - ARRAY_SIZE(lxt97x_driver)); -} - -module_init(lxt_init); -module_exit(lxt_exit); +module_phy_driver(lxt97x_driver); static struct mdio_device_id __maybe_unused lxt_tbl[] = { { 0x78100000, 0xfffffff0 }, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index bd37e45c89c0..1b1698f98818 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -50,9 +50,17 @@ #define MII_M1011_PHY_SCR 0x10 #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 +#define MII_M1145_PHY_EXT_SR 0x1b #define MII_M1145_PHY_EXT_CR 0x14 #define MII_M1145_RGMII_RX_DELAY 0x0080 #define MII_M1145_RGMII_TX_DELAY 0x0002 +#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 +#define MII_M1145_HWCFG_MODE_MASK 0xf +#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 + +#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 +#define MII_M1145_HWCFG_MODE_MASK 0xf +#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 #define MII_M1111_PHY_LED_CONTROL 0x18 #define MII_M1111_PHY_LED_DIRECT 0x4100 @@ -118,6 +126,9 @@ #define MII_M1116R_CONTROL_REG_MAC 21 +#define MII_88E3016_PHY_SPEC_CTRL 0x10 +#define MII_88E3016_DISABLE_SCRAMBLER 0x0200 +#define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030 MODULE_DESCRIPTION("Marvell PHY driver"); MODULE_AUTHOR("Andy Fleming"); @@ -434,6 +445,25 @@ static int m88e1116r_config_init(struct phy_device *phydev) return 0; } +static int m88e3016_config_init(struct phy_device *phydev) +{ + int reg; + + /* Enable Scrambler and Auto-Crossover */ + reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL); + if (reg < 0) + return reg; + + reg &= ~MII_88E3016_DISABLE_SCRAMBLER; + reg |= MII_88E3016_AUTO_MDIX_CROSSOVER; + + reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg); + if (reg < 0) + return reg; + + return 0; +} + static int m88e1111_config_init(struct phy_device *phydev) { int err; @@ -620,6 +650,7 @@ static int m88e1149_config_init(struct phy_device *phydev) static int m88e1145_config_init(struct phy_device *phydev) { int err; + int temp; /* Take care of errata E0 & E1 */ err = phy_write(phydev, 0x1d, 0x001b); @@ -676,6 +707,20 @@ static int m88e1145_config_init(struct phy_device *phydev) } } + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + temp = phy_read(phydev, MII_M1145_PHY_EXT_SR); + if (temp < 0) + return temp; + + temp &= ~MII_M1145_HWCFG_MODE_MASK; + temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK; + temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO; + + err = phy_write(phydev, MII_M1145_PHY_EXT_SR, temp); + if (err < 0) + return err; + } + err = marvell_of_reg_init(phydev); if (err < 0) return err; @@ -770,6 +815,12 @@ static int marvell_read_status(struct phy_device *phydev) return 0; } +static int marvell_aneg_done(struct phy_device *phydev) +{ + int retval = phy_read(phydev, MII_M1011_PHY_STATUS); + return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED); +} + static int m88e1121_did_interrupt(struct phy_device *phydev) { int imask; @@ -1050,22 +1101,26 @@ static struct phy_driver marvell_drivers[] = { .suspend = &genphy_suspend, .driver = { .owner = THIS_MODULE }, }, + { + .phy_id = MARVELL_PHY_ID_88E3016, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E3016", + .features = PHY_BASIC_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &genphy_config_aneg, + .config_init = &m88e3016_config_init, + .aneg_done = &marvell_aneg_done, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .resume = &genphy_resume, + .suspend = &genphy_suspend, + .driver = { .owner = THIS_MODULE }, + }, }; -static int __init marvell_init(void) -{ - return phy_drivers_register(marvell_drivers, - ARRAY_SIZE(marvell_drivers)); -} - -static void __exit marvell_exit(void) -{ - phy_drivers_unregister(marvell_drivers, - ARRAY_SIZE(marvell_drivers)); -} - -module_init(marvell_init); -module_exit(marvell_exit); +module_phy_driver(marvell_drivers); static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK }, @@ -1079,6 +1134,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { } }; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 8c2a29a9bd7f..c530de1e63f5 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -6,6 +6,7 @@ * Author: David J. Choi * * Copyright (c) 2010-2013 Micrel, Inc. + * Copyright (c) 2014 Johan Hovold <johan@kernel.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -30,30 +31,32 @@ /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 -#define KSZPHY_OMSO_B_CAST_OFF (1 << 9) -#define KSZPHY_OMSO_RMII_OVERRIDE (1 << 1) -#define KSZPHY_OMSO_MII_OVERRIDE (1 << 0) +#define KSZPHY_OMSO_B_CAST_OFF BIT(9) +#define KSZPHY_OMSO_RMII_OVERRIDE BIT(1) +#define KSZPHY_OMSO_MII_OVERRIDE BIT(0) /* general Interrupt control/status reg in vendor specific block. */ #define MII_KSZPHY_INTCS 0x1B -#define KSZPHY_INTCS_JABBER (1 << 15) -#define KSZPHY_INTCS_RECEIVE_ERR (1 << 14) -#define KSZPHY_INTCS_PAGE_RECEIVE (1 << 13) -#define KSZPHY_INTCS_PARELLEL (1 << 12) -#define KSZPHY_INTCS_LINK_PARTNER_ACK (1 << 11) -#define KSZPHY_INTCS_LINK_DOWN (1 << 10) -#define KSZPHY_INTCS_REMOTE_FAULT (1 << 9) -#define KSZPHY_INTCS_LINK_UP (1 << 8) +#define KSZPHY_INTCS_JABBER BIT(15) +#define KSZPHY_INTCS_RECEIVE_ERR BIT(14) +#define KSZPHY_INTCS_PAGE_RECEIVE BIT(13) +#define KSZPHY_INTCS_PARELLEL BIT(12) +#define KSZPHY_INTCS_LINK_PARTNER_ACK BIT(11) +#define KSZPHY_INTCS_LINK_DOWN BIT(10) +#define KSZPHY_INTCS_REMOTE_FAULT BIT(9) +#define KSZPHY_INTCS_LINK_UP BIT(8) #define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\ KSZPHY_INTCS_LINK_DOWN) -/* general PHY control reg in vendor specific block. */ -#define MII_KSZPHY_CTRL 0x1F +/* PHY Control 1 */ +#define MII_KSZPHY_CTRL_1 0x1e + +/* PHY Control 2 / PHY Control (if no PHY Control 1) */ +#define MII_KSZPHY_CTRL_2 0x1f +#define MII_KSZPHY_CTRL MII_KSZPHY_CTRL_2 /* bitmap of PHY register to set interrupt mode */ -#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9) -#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14) -#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) -#define KSZ8051_RMII_50MHZ_CLK (1 << 7) +#define KSZPHY_CTRL_INT_ACTIVE_HIGH BIT(9) +#define KSZPHY_RMII_REF_CLK_SEL BIT(7) /* Write/read to/from extended registers */ #define MII_KSZPHY_EXTREG 0x0b @@ -69,20 +72,46 @@ #define PS_TO_REG 200 -static int ksz_config_flags(struct phy_device *phydev) -{ - int regval; +struct kszphy_type { + u32 led_mode_reg; + u16 interrupt_level_mask; + bool has_broadcast_disable; + bool has_rmii_ref_clk_sel; +}; - if (phydev->dev_flags & (MICREL_PHY_50MHZ_CLK | MICREL_PHY_25MHZ_CLK)) { - regval = phy_read(phydev, MII_KSZPHY_CTRL); - if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) - regval |= KSZ8051_RMII_50MHZ_CLK; - else - regval &= ~KSZ8051_RMII_50MHZ_CLK; - return phy_write(phydev, MII_KSZPHY_CTRL, regval); - } - return 0; -} +struct kszphy_priv { + const struct kszphy_type *type; + int led_mode; + bool rmii_ref_clk_sel; + bool rmii_ref_clk_sel_val; +}; + +static const struct kszphy_type ksz8021_type = { + .led_mode_reg = MII_KSZPHY_CTRL_2, + .has_rmii_ref_clk_sel = true, +}; + +static const struct kszphy_type ksz8041_type = { + .led_mode_reg = MII_KSZPHY_CTRL_1, +}; + +static const struct kszphy_type ksz8051_type = { + .led_mode_reg = MII_KSZPHY_CTRL_2, +}; + +static const struct kszphy_type ksz8081_type = { + .led_mode_reg = MII_KSZPHY_CTRL_2, + .has_broadcast_disable = true, + .has_rmii_ref_clk_sel = true, +}; + +static const struct kszphy_type ks8737_type = { + .interrupt_level_mask = BIT(14), +}; + +static const struct kszphy_type ksz9021_type = { + .interrupt_level_mask = BIT(14), +}; static int kszphy_extended_write(struct phy_device *phydev, u32 regnum, u16 val) @@ -108,112 +137,137 @@ static int kszphy_ack_interrupt(struct phy_device *phydev) return (rc < 0) ? rc : 0; } -static int kszphy_set_interrupt(struct phy_device *phydev) +static int kszphy_config_intr(struct phy_device *phydev) { + const struct kszphy_type *type = phydev->drv->driver_data; int temp; - temp = (PHY_INTERRUPT_ENABLED == phydev->interrupts) ? - KSZPHY_INTCS_ALL : 0; - return phy_write(phydev, MII_KSZPHY_INTCS, temp); -} + u16 mask; -static int kszphy_config_intr(struct phy_device *phydev) -{ - int temp, rc; + if (type && type->interrupt_level_mask) + mask = type->interrupt_level_mask; + else + mask = KSZPHY_CTRL_INT_ACTIVE_HIGH; /* set the interrupt pin active low */ temp = phy_read(phydev, MII_KSZPHY_CTRL); - temp &= ~KSZPHY_CTRL_INT_ACTIVE_HIGH; + if (temp < 0) + return temp; + temp &= ~mask; phy_write(phydev, MII_KSZPHY_CTRL, temp); - rc = kszphy_set_interrupt(phydev); - return rc < 0 ? rc : 0; -} -static int ksz9021_config_intr(struct phy_device *phydev) -{ - int temp, rc; + /* enable / disable interrupts */ + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + temp = KSZPHY_INTCS_ALL; + else + temp = 0; - /* set the interrupt pin active low */ - temp = phy_read(phydev, MII_KSZPHY_CTRL); - temp &= ~KSZ9021_CTRL_INT_ACTIVE_HIGH; - phy_write(phydev, MII_KSZPHY_CTRL, temp); - rc = kszphy_set_interrupt(phydev); - return rc < 0 ? rc : 0; + return phy_write(phydev, MII_KSZPHY_INTCS, temp); } -static int ks8737_config_intr(struct phy_device *phydev) +static int kszphy_rmii_clk_sel(struct phy_device *phydev, bool val) { - int temp, rc; + int ctrl; - /* set the interrupt pin active low */ - temp = phy_read(phydev, MII_KSZPHY_CTRL); - temp &= ~KS8737_CTRL_INT_ACTIVE_HIGH; - phy_write(phydev, MII_KSZPHY_CTRL, temp); - rc = kszphy_set_interrupt(phydev); - return rc < 0 ? rc : 0; -} - -static int kszphy_setup_led(struct phy_device *phydev, - unsigned int reg, unsigned int shift) -{ + ctrl = phy_read(phydev, MII_KSZPHY_CTRL); + if (ctrl < 0) + return ctrl; - struct device *dev = &phydev->dev; - struct device_node *of_node = dev->of_node; - int rc, temp; - u32 val; + if (val) + ctrl |= KSZPHY_RMII_REF_CLK_SEL; + else + ctrl &= ~KSZPHY_RMII_REF_CLK_SEL; - if (!of_node && dev->parent->of_node) - of_node = dev->parent->of_node; + return phy_write(phydev, MII_KSZPHY_CTRL, ctrl); +} - if (of_property_read_u32(of_node, "micrel,led-mode", &val)) - return 0; +static int kszphy_setup_led(struct phy_device *phydev, u32 reg, int val) +{ + int rc, temp, shift; + + switch (reg) { + case MII_KSZPHY_CTRL_1: + shift = 14; + break; + case MII_KSZPHY_CTRL_2: + shift = 4; + break; + default: + return -EINVAL; + } temp = phy_read(phydev, reg); - if (temp < 0) - return temp; + if (temp < 0) { + rc = temp; + goto out; + } temp &= ~(3 << shift); temp |= val << shift; rc = phy_write(phydev, reg, temp); +out: + if (rc < 0) + dev_err(&phydev->dev, "failed to set led mode\n"); - return rc < 0 ? rc : 0; + return rc; } -static int kszphy_config_init(struct phy_device *phydev) +/* Disable PHY address 0 as the broadcast address, so that it can be used as a + * unique (non-broadcast) address on a shared bus. + */ +static int kszphy_broadcast_disable(struct phy_device *phydev) { - return 0; -} + int ret; -static int kszphy_config_init_led8041(struct phy_device *phydev) -{ - /* single led control, register 0x1e bits 15..14 */ - return kszphy_setup_led(phydev, 0x1e, 14); + ret = phy_read(phydev, MII_KSZPHY_OMSO); + if (ret < 0) + goto out; + + ret = phy_write(phydev, MII_KSZPHY_OMSO, ret | KSZPHY_OMSO_B_CAST_OFF); +out: + if (ret) + dev_err(&phydev->dev, "failed to disable broadcast address\n"); + + return ret; } -static int ksz8021_config_init(struct phy_device *phydev) +static int kszphy_config_init(struct phy_device *phydev) { - const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE; - int rc; + struct kszphy_priv *priv = phydev->priv; + const struct kszphy_type *type; + int ret; - rc = kszphy_setup_led(phydev, 0x1f, 4); - if (rc) - dev_err(&phydev->dev, "failed to set led mode\n"); + if (!priv) + return 0; - rc = ksz_config_flags(phydev); - if (rc < 0) - return rc; - rc = phy_write(phydev, MII_KSZPHY_OMSO, val); - return rc < 0 ? rc : 0; + type = priv->type; + + if (type->has_broadcast_disable) + kszphy_broadcast_disable(phydev); + + if (priv->rmii_ref_clk_sel) { + ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); + if (ret) { + dev_err(&phydev->dev, "failed to set rmii reference clock\n"); + return ret; + } + } + + if (priv->led_mode >= 0) + kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); + + return 0; } -static int ks8051_config_init(struct phy_device *phydev) +static int ksz8021_config_init(struct phy_device *phydev) { int rc; - rc = kszphy_setup_led(phydev, 0x1f, 4); + rc = kszphy_config_init(phydev); if (rc) - dev_err(&phydev->dev, "failed to set led mode\n"); + return rc; + + rc = kszphy_broadcast_disable(phydev); - rc = ksz_config_flags(phydev); return rc < 0 ? rc : 0; } @@ -394,8 +448,8 @@ static int ksz9031_config_init(struct phy_device *phydev) } #define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 -#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6) -#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4) +#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX BIT(6) +#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED BIT(4) static int ksz8873mll_read_status(struct phy_device *phydev) { int regval; @@ -446,24 +500,62 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum, { } -static int ksz8021_probe(struct phy_device *phydev) +static int kszphy_probe(struct phy_device *phydev) { + const struct kszphy_type *type = phydev->drv->driver_data; + struct device_node *np = phydev->dev.of_node; + struct kszphy_priv *priv; struct clk *clk; + int ret; + + priv = devm_kzalloc(&phydev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + priv->type = type; + + if (type->led_mode_reg) { + ret = of_property_read_u32(np, "micrel,led-mode", + &priv->led_mode); + if (ret) + priv->led_mode = -1; + + if (priv->led_mode > 3) { + dev_err(&phydev->dev, "invalid led mode: 0x%02x\n", + priv->led_mode); + priv->led_mode = -1; + } + } else { + priv->led_mode = -1; + } clk = devm_clk_get(&phydev->dev, "rmii-ref"); if (!IS_ERR(clk)) { unsigned long rate = clk_get_rate(clk); + bool rmii_ref_clk_sel_25_mhz; + + priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel; + rmii_ref_clk_sel_25_mhz = of_property_read_bool(np, + "micrel,rmii-reference-clock-select-25-mhz"); if (rate > 24500000 && rate < 25500000) { - phydev->dev_flags |= MICREL_PHY_25MHZ_CLK; + priv->rmii_ref_clk_sel_val = rmii_ref_clk_sel_25_mhz; } else if (rate > 49500000 && rate < 50500000) { - phydev->dev_flags |= MICREL_PHY_50MHZ_CLK; + priv->rmii_ref_clk_sel_val = !rmii_ref_clk_sel_25_mhz; } else { dev_err(&phydev->dev, "Clock rate out of range: %ld\n", rate); return -EINVAL; } } + /* Support legacy board-file configuration */ + if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) { + priv->rmii_ref_clk_sel = true; + priv->rmii_ref_clk_sel_val = true; + } + return 0; } @@ -474,11 +566,12 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KS8737", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .driver_data = &ks8737_type, .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, - .config_intr = ks8737_config_intr, + .config_intr = kszphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE,}, @@ -489,7 +582,8 @@ static struct phy_driver ksphy_driver[] = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .probe = ksz8021_probe, + .driver_data = &ksz8021_type, + .probe = kszphy_probe, .config_init = ksz8021_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -505,7 +599,8 @@ static struct phy_driver ksphy_driver[] = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .probe = ksz8021_probe, + .driver_data = &ksz8021_type, + .probe = kszphy_probe, .config_init = ksz8021_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -521,7 +616,9 @@ static struct phy_driver ksphy_driver[] = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .config_init = kszphy_config_init_led8041, + .driver_data = &ksz8041_type, + .probe = kszphy_probe, + .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, @@ -536,7 +633,9 @@ static struct phy_driver ksphy_driver[] = { .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .config_init = kszphy_config_init_led8041, + .driver_data = &ksz8041_type, + .probe = kszphy_probe, + .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, @@ -551,7 +650,9 @@ static struct phy_driver ksphy_driver[] = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .config_init = ks8051_config_init, + .driver_data = &ksz8051_type, + .probe = kszphy_probe, + .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, @@ -565,7 +666,9 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = 0x00ffffff, .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .config_init = kszphy_config_init_led8041, + .driver_data = &ksz8041_type, + .probe = kszphy_probe, + .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, @@ -579,6 +682,8 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = 0x00fffff0, .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .driver_data = &ksz8081_type, + .probe = kszphy_probe, .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -607,11 +712,12 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ9021 Gigabit PHY", .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .driver_data = &ksz9021_type, .config_init = ksz9021_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, - .config_intr = ksz9021_config_intr, + .config_intr = kszphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .read_mmd_indirect = ksz9021_rd_mmd_phyreg, @@ -623,11 +729,12 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ9031 Gigabit PHY", .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .driver_data = &ksz9021_type, .config_init = ksz9031_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, - .config_intr = ksz9021_config_intr, + .config_intr = kszphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE, }, @@ -657,20 +764,7 @@ static struct phy_driver ksphy_driver[] = { .driver = { .owner = THIS_MODULE, }, } }; -static int __init ksphy_init(void) -{ - return phy_drivers_register(ksphy_driver, - ARRAY_SIZE(ksphy_driver)); -} - -static void __exit ksphy_exit(void) -{ - phy_drivers_unregister(ksphy_driver, - ARRAY_SIZE(ksphy_driver)); -} - -module_init(ksphy_init); -module_exit(ksphy_exit); +module_phy_driver(ksphy_driver); MODULE_DESCRIPTION("Micrel PHY driver"); MODULE_AUTHOR("David J. Choi"); diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 9a5f234d95b0..0a7b9c7f09a2 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -129,7 +129,7 @@ static int ns_config_init(struct phy_device *phydev) return ns_ack_interrupt(phydev); } -static struct phy_driver dp83865_driver = { +static struct phy_driver dp83865_driver[] = { { .phy_id = DP83865_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "NatSemi DP83865", @@ -141,25 +141,14 @@ static struct phy_driver dp83865_driver = { .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, .driver = {.owner = THIS_MODULE,} -}; +} }; -static int __init ns_init(void) -{ - return phy_driver_register(&dp83865_driver); -} - -static void __exit ns_exit(void) -{ - phy_driver_unregister(&dp83865_driver); -} +module_phy_driver(dp83865_driver); MODULE_DESCRIPTION("NatSemi PHY driver"); MODULE_AUTHOR("Stuart Menefy"); MODULE_LICENSE("GPL"); -module_init(ns_init); -module_exit(ns_exit); - static struct mdio_device_id __maybe_unused ns_tbl[] = { { DP83865_PHY_ID, 0xfffffff0 }, { } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1dfffdc9dfc3..767cd110f496 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *mii_data = if_mii(ifr); u16 val = mii_data->val_in; + bool change_autoneg = false; switch (cmd) { case SIOCGMIIPHY: @@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mii_data->phy_id == phydev->addr) { switch (mii_data->reg_num) { case MII_BMCR: - if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) + if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) { + if (phydev->autoneg == AUTONEG_ENABLE) + change_autoneg = true; phydev->autoneg = AUTONEG_DISABLE; - else + if (val & BMCR_FULLDPLX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + if (val & BMCR_SPEED1000) + phydev->speed = SPEED_1000; + else if (val & BMCR_SPEED100) + phydev->speed = SPEED_100; + else phydev->speed = SPEED_10; + } + else { + if (phydev->autoneg == AUTONEG_DISABLE) + change_autoneg = true; phydev->autoneg = AUTONEG_ENABLE; - if (!phydev->autoneg && (val & BMCR_FULLDPLX)) - phydev->duplex = DUPLEX_FULL; - else - phydev->duplex = DUPLEX_HALF; - if (!phydev->autoneg && (val & BMCR_SPEED1000)) - phydev->speed = SPEED_1000; - else if (!phydev->autoneg && - (val & BMCR_SPEED100)) - phydev->speed = SPEED_100; + } break; case MII_ADVERTISE: - phydev->advertising = val; + phydev->advertising = mii_adv_to_ethtool_adv_t(val); + change_autoneg = true; break; default: /* do nothing */ @@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mii_data->reg_num == MII_BMCR && val & BMCR_RESET) return phy_init_hw(phydev); + + if (change_autoneg) + return phy_start_aneg(phydev); + return 0; case SIOCSHWTSTAMP: diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index fe0d0a15d5e1..be4c6f7c3645 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -111,7 +111,7 @@ static int qs6612_config_intr(struct phy_device *phydev) } -static struct phy_driver qs6612_driver = { +static struct phy_driver qs6612_driver[] = { { .phy_id = 0x00181440, .name = "QS6612", .phy_id_mask = 0xfffffff0, @@ -123,20 +123,9 @@ static struct phy_driver qs6612_driver = { .ack_interrupt = qs6612_ack_interrupt, .config_intr = qs6612_config_intr, .driver = { .owner = THIS_MODULE,}, -}; - -static int __init qs6612_init(void) -{ - return phy_driver_register(&qs6612_driver); -} - -static void __exit qs6612_exit(void) -{ - phy_driver_unregister(&qs6612_driver); -} +} }; -module_init(qs6612_init); -module_exit(qs6612_exit); +module_phy_driver(qs6612_driver); static struct mdio_device_id __maybe_unused qs6612_tbl[] = { { 0x00181440, 0xfffffff0 }, diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 45483fdfbe06..96a0f0fab3ca 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -101,18 +101,7 @@ static struct phy_driver realtek_drvs[] = { }, }; -static int __init realtek_init(void) -{ - return phy_drivers_register(realtek_drvs, ARRAY_SIZE(realtek_drvs)); -} - -static void __exit realtek_exit(void) -{ - phy_drivers_unregister(realtek_drvs, ARRAY_SIZE(realtek_drvs)); -} - -module_init(realtek_init); -module_exit(realtek_exit); +module_phy_driver(realtek_drvs); static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc912, 0x001fffff }, diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index a4b08198fb9f..c0f6479e19d4 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -250,24 +250,12 @@ static struct phy_driver smsc_phy_driver[] = { .driver = { .owner = THIS_MODULE, } } }; -static int __init smsc_init(void) -{ - return phy_drivers_register(smsc_phy_driver, - ARRAY_SIZE(smsc_phy_driver)); -} - -static void __exit smsc_exit(void) -{ - phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver)); -} +module_phy_driver(smsc_phy_driver); MODULE_DESCRIPTION("SMSC PHY driver"); MODULE_AUTHOR("Herbert Valerio Riedel"); MODULE_LICENSE("GPL"); -module_init(smsc_init); -module_exit(smsc_exit); - static struct mdio_device_id __maybe_unused smsc_tbl[] = { { 0x0007c0a0, 0xfffffff0 }, { 0x0007c0b0, 0xfffffff0 }, diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index eab57fc5b967..46530159256b 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -353,7 +353,9 @@ static int ks8995_probe(struct spi_device *spi) static int ks8995_remove(struct spi_device *spi) { - sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr); + struct ks8995_switch *ks = spi_get_drvdata(spi); + + sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr); return 0; } diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index 5e1eb138916f..3fc199b773e6 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -112,20 +112,7 @@ static struct phy_driver ste10xp_pdriver[] = { .driver = {.owner = THIS_MODULE,} } }; -static int __init ste10Xp_init(void) -{ - return phy_drivers_register(ste10xp_pdriver, - ARRAY_SIZE(ste10xp_pdriver)); -} - -static void __exit ste10Xp_exit(void) -{ - phy_drivers_unregister(ste10xp_pdriver, - ARRAY_SIZE(ste10xp_pdriver)); -} - -module_init(ste10Xp_init); -module_exit(ste10Xp_exit); +module_phy_driver(ste10xp_pdriver); static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = { { STE101P_PHY_ID, 0xfffffff0 }, diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 5dc0935da99c..76cad712ddb2 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -311,19 +311,7 @@ static struct phy_driver vsc82xx_driver[] = { .driver = { .owner = THIS_MODULE,}, } }; -static int __init vsc82xx_init(void) -{ - return phy_drivers_register(vsc82xx_driver, - ARRAY_SIZE(vsc82xx_driver)); -} - -static void __exit vsc82xx_exit(void) -{ - phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver)); -} - -module_init(vsc82xx_init); -module_exit(vsc82xx_exit); +module_phy_driver(vsc82xx_driver); static struct mdio_device_id __maybe_unused vitesse_tbl[] = { { PHY_ID_VSC8234, 0x000ffff0 }, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 68c3a3f4e0ab..794a47329368 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = get_filter(argp, &code); if (err >= 0) { + struct bpf_prog *pass_filter = NULL; struct sock_fprog_kern fprog = { .len = err, .filter = code, }; - ppp_lock(ppp); - if (ppp->pass_filter) { - bpf_prog_destroy(ppp->pass_filter); - ppp->pass_filter = NULL; + err = 0; + if (fprog.filter) + err = bpf_prog_create(&pass_filter, &fprog); + if (!err) { + ppp_lock(ppp); + if (ppp->pass_filter) + bpf_prog_destroy(ppp->pass_filter); + ppp->pass_filter = pass_filter; + ppp_unlock(ppp); } - if (fprog.filter != NULL) - err = bpf_prog_create(&ppp->pass_filter, - &fprog); - else - err = 0; kfree(code); - ppp_unlock(ppp); } break; } @@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = get_filter(argp, &code); if (err >= 0) { + struct bpf_prog *active_filter = NULL; struct sock_fprog_kern fprog = { .len = err, .filter = code, }; - ppp_lock(ppp); - if (ppp->active_filter) { - bpf_prog_destroy(ppp->active_filter); - ppp->active_filter = NULL; + err = 0; + if (fprog.filter) + err = bpf_prog_create(&active_filter, &fprog); + if (!err) { + ppp_lock(ppp); + if (ppp->active_filter) + bpf_prog_destroy(ppp->active_filter); + ppp->active_filter = active_filter; + ppp_unlock(ppp); } - if (fprog.filter != NULL) - err = bpf_prog_create(&ppp->active_filter, - &fprog); - else - err = 0; kfree(code); - ppp_unlock(ppp); } break; } diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 6c9c16d76935..d2408a5e43a6 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -869,7 +869,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr)); start = (char *)&ph->tag[0]; - error = memcpy_fromiovec(start, m->msg_iov, total_len); + error = memcpy_from_msg(start, m, total_len); if (error < 0) { kfree_skb(skb); goto end; @@ -981,7 +981,7 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, if (skb) { total_len = min_t(size_t, total_len, skb->len); - error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); + error = skb_copy_datagram_msg(skb, 0, m, total_len); if (error == 0) { consume_skb(skb); return total_len; diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 1aff970be33e..1dc628ffce2b 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, int len = sizeof(struct sockaddr_pppox); struct sockaddr_pppox sp; - sp.sa_family = AF_PPPOX; + memset(&sp.sa_addr, 0, sizeof(sp.sa_addr)); + + sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_PPTP; sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 2368395d8ae5..93e224217e24 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1179,6 +1179,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_enable_netpoll; } + if (!(dev->features & NETIF_F_LRO)) + dev_disable_lro(port_dev); + err = netdev_rx_handler_register(port_dev, team_handle_frame, port); if (err) { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 186ce541c657..9c58286b8a42 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -65,11 +65,13 @@ #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> +#include <net/ipv6.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/sock.h> #include <linux/seq_file.h> +#include <linux/uio.h> #include <asm/uaccess.h> @@ -174,7 +176,7 @@ struct tun_struct { struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ - NETIF_F_TSO6|NETIF_F_UFO) + NETIF_F_TSO6) int vnet_hdr_sz; int sndbuf; @@ -817,7 +819,7 @@ drop: skb_tx_error(skb); kfree_skb(skb); rcu_read_unlock(); - return NETDEV_TX_OK; + return NET_XMIT_DROP; } static void tun_net_mclist(struct net_device *dev) @@ -1010,28 +1012,29 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, - void *msg_control, const struct iovec *iv, - size_t total_len, size_t count, int noblock) + void *msg_control, struct iov_iter *from, + int noblock) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; + size_t total_len = iov_iter_count(from); size_t len = total_len, align = NET_SKB_PAD, linear; struct virtio_net_hdr gso = { 0 }; int good_linear; - int offset = 0; int copylen; bool zerocopy = false; int err; u32 rxhash; + ssize_t n; if (!(tun->flags & TUN_NO_PI)) { if (len < sizeof(pi)) return -EINVAL; len -= sizeof(pi); - if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) + n = copy_from_iter(&pi, sizeof(pi), from); + if (n != sizeof(pi)) return -EFAULT; - offset += sizeof(pi); } if (tun->flags & TUN_VNET_HDR) { @@ -1039,7 +1042,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EINVAL; len -= tun->vnet_hdr_sz; - if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) + n = copy_from_iter(&gso, sizeof(gso), from); + if (n != sizeof(gso)) return -EFAULT; if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && @@ -1048,7 +1052,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (gso.hdr_len > len) return -EINVAL; - offset += tun->vnet_hdr_sz; + iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso)); } if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { @@ -1061,6 +1065,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, good_linear = SKB_MAX_HEAD(align); if (msg_control) { + struct iov_iter i = *from; + /* There are 256 bytes to be copied in skb, so there is * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. @@ -1069,7 +1075,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (copylen > good_linear) copylen = good_linear; linear = copylen; - if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) + iov_iter_advance(&i, copylen); + if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) zerocopy = true; } @@ -1089,9 +1096,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } if (zerocopy) - err = zerocopy_sg_from_iovec(skb, iv, offset, count); + err = zerocopy_sg_from_iter(skb, from); else { - err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len); + err = skb_copy_datagram_from_iter(skb, 0, from, len); if (!err && msg_control) { struct ubuf_info *uarg = msg_control; uarg->callback(uarg, false); @@ -1139,6 +1146,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, break; } + skb_reset_network_header(skb); + if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -1149,8 +1158,20 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: + { + static bool warned; + + if (!warned) { + warned = true; + netdev_warn(tun->dev, + "%s: using disabled UFO feature; please fix this program\n", + current->comm); + } skb_shinfo(skb)->gso_type = SKB_GSO_UDP; + if (skb->protocol == htons(ETH_P_IPV6)) + ipv6_proxy_select_ident(skb); break; + } default: tun->dev->stats.rx_frame_errors++; kfree_skb(skb); @@ -1179,7 +1200,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; } - skb_reset_network_header(skb); skb_probe_transport_header(skb, 0); rxhash = skb_get_hash(skb); @@ -1192,8 +1212,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return total_len; } -static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, - unsigned long count, loff_t pos) +static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct tun_struct *tun = tun_get(file); @@ -1203,10 +1222,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, if (!tun) return -EBADFD; - tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count); - - result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count), - count, file->f_flags & O_NONBLOCK); + result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK); tun_put(tun); return result; @@ -1216,29 +1232,39 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, static ssize_t tun_put_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, - const struct iovec *iv, int len) + struct iov_iter *iter) { struct tun_pi pi = { 0, skb->protocol }; - ssize_t total = 0; - int vlan_offset = 0, copied; + ssize_t total; + int vlan_offset = 0; + int vlan_hlen = 0; + int vnet_hdr_sz = 0; + + if (vlan_tx_tag_present(skb)) + vlan_hlen = VLAN_HLEN; + + if (tun->flags & TUN_VNET_HDR) + vnet_hdr_sz = tun->vnet_hdr_sz; + + total = skb->len + vlan_hlen + vnet_hdr_sz; if (!(tun->flags & TUN_NO_PI)) { - if ((len -= sizeof(pi)) < 0) + if (iov_iter_count(iter) < sizeof(pi)) return -EINVAL; - if (len < skb->len) { + total += sizeof(pi); + if (iov_iter_count(iter) < total) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } - if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi))) + if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) return -EFAULT; - total += sizeof(pi); } - if (tun->flags & TUN_VNET_HDR) { + if (vnet_hdr_sz) { struct virtio_net_hdr gso = { 0 }; /* no info leak */ - if ((len -= tun->vnet_hdr_sz) < 0) + if (iov_iter_count(iter) < vnet_hdr_sz) return -EINVAL; if (skb_is_gso(skb)) { @@ -1251,8 +1277,6 @@ static ssize_t tun_put_user(struct tun_struct *tun, gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (sinfo->gso_type & SKB_GSO_UDP) - gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; else { pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", @@ -1272,24 +1296,21 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (skb->ip_summed == CHECKSUM_PARTIAL) { gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - gso.csum_start = skb_checksum_start_offset(skb); + gso.csum_start = skb_checksum_start_offset(skb) + + vlan_hlen; gso.csum_offset = skb->csum_offset; } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; } /* else everything is zero */ - if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, - sizeof(gso)))) + if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) return -EFAULT; - total += tun->vnet_hdr_sz; + + iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); } - copied = total; - total += skb->len; - if (!vlan_tx_tag_present(skb)) { - len = min_t(int, skb->len, len); - } else { - int copy, ret; + if (vlan_hlen) { + int ret; struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; @@ -1299,44 +1320,37 @@ static ssize_t tun_put_user(struct tun_struct *tun, veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); - len = min_t(int, skb->len + VLAN_HLEN, len); - total += VLAN_HLEN; - - copy = min_t(int, vlan_offset, len); - ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); - len -= copy; - copied += copy; - if (ret || !len) + + ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); + if (ret || !iov_iter_count(iter)) goto done; - copy = min_t(int, sizeof(veth), len); - ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); - len -= copy; - copied += copy; - if (ret || !len) + ret = copy_to_iter(&veth, sizeof(veth), iter); + if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } - skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); + skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); done: tun->dev->stats.tx_packets++; - tun->dev->stats.tx_bytes += len; + tun->dev->stats.tx_bytes += skb->len + vlan_hlen; return total; } static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, - const struct iovec *iv, ssize_t len, int noblock) + struct iov_iter *to, + int noblock) { struct sk_buff *skb; - ssize_t ret = 0; + ssize_t ret; int peeked, err, off = 0; tun_debug(KERN_INFO, tun, "tun_do_read\n"); - if (!len) - return ret; + if (!iov_iter_count(to)) + return 0; if (tun->dev->reg_state != NETREG_REGISTERED) return -EIO; @@ -1344,37 +1358,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, /* Read frames from queue */ skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0, &peeked, &off, &err); - if (skb) { - ret = tun_put_user(tun, tfile, skb, iv, len); + if (!skb) + return 0; + + ret = tun_put_user(tun, tfile, skb, to); + if (unlikely(ret < 0)) kfree_skb(skb); - } else - ret = err; + else + consume_skb(skb); return ret; } -static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, - unsigned long count, loff_t pos) +static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = __tun_get(tfile); - ssize_t len, ret; + ssize_t len = iov_iter_count(to), ret; if (!tun) return -EBADFD; - len = iov_length(iv, count); - if (len < 0) { - ret = -EINVAL; - goto out; - } - - ret = tun_do_read(tun, tfile, iv, len, - file->f_flags & O_NONBLOCK); + ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; -out: tun_put(tun); return ret; } @@ -1441,11 +1449,14 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, int ret; struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = __tun_get(tfile); + struct iov_iter from; if (!tun) return -EBADFD; - ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len, - m->msg_iovlen, m->msg_flags & MSG_DONTWAIT); + + iov_iter_init(&from, WRITE, m->msg_iov, m->msg_iovlen, total_len); + ret = tun_get_user(tun, tfile, m->msg_control, &from, + m->msg_flags & MSG_DONTWAIT); tun_put(tun); return ret; } @@ -1456,6 +1467,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = __tun_get(tfile); + struct iov_iter to; int ret; if (!tun) @@ -1470,8 +1482,8 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } - ret = tun_do_read(tun, tfile, m->msg_iov, total_len, - flags & MSG_DONTWAIT); + iov_iter_init(&to, READ, m->msg_iov, m->msg_iovlen, total_len); + ret = tun_do_read(tun, tfile, &to, flags & MSG_DONTWAIT); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; @@ -1762,11 +1774,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } - - if (arg & TUN_F_UFO) { - features |= NETIF_F_UFO; - arg &= ~TUN_F_UFO; - } } /* This gives the user a way to test for new features in future by @@ -2232,10 +2239,10 @@ static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f) static const struct file_operations tun_fops = { .owner = THIS_MODULE, .llseek = no_llseek, - .read = do_sync_read, - .aio_read = tun_chr_aio_read, - .write = do_sync_write, - .aio_write = tun_chr_aio_write, + .read = new_sync_read, + .write = new_sync_write, + .read_iter = tun_chr_read_iter, + .write_iter = tun_chr_write_iter, .poll = tun_chr_poll, .unlocked_ioctl = tun_chr_ioctl, #ifdef CONFIG_COMPAT diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 2c05f6cdb12f..bf49792062a2 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) return ret; } - ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL); - if (ret < 0) - return ret; - - msleep(150); - - ret = asix_sw_reset(dev, AX_SWRESET_CLEAR); - if (ret < 0) - return ret; - - msleep(150); - - ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE); + ax88772_reset(dev); /* Read PHYID register *AFTER* the PHY was reset properly */ phyid = asix_get_phyid(dev); @@ -499,8 +487,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) { - if (dev->driver_priv) - kfree(dev->driver_priv); + kfree(dev->driver_priv); } static const struct ethtool_ops ax88178_ethtool_ops = { diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index be4275721039..e6338c16081a 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -937,6 +937,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p) { struct usbnet *dev = netdev_priv(net); struct sockaddr *addr = p; + int ret; if (netif_running(net)) return -EBUSY; @@ -946,8 +947,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p) memcpy(net->dev_addr, addr->sa_data, ETH_ALEN); /* Set the MAC address */ - return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, + ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN, net->dev_addr); + if (ret < 0) + return ret; + + return 0; } static const struct net_device_ops ax88179_netdev_ops = { diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 2ec1500d0077..415ce8b882c6 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) struct page *page; int err; - page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL); + page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC); if (!page) return -ENOMEM; @@ -212,7 +212,7 @@ resubmit: if (page) put_page(page); if (req) - rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD); + rx_submit(pnd, req, GFP_ATOMIC); } static int usbpn_close(struct net_device *dev); @@ -231,7 +231,7 @@ static int usbpn_open(struct net_device *dev) for (i = 0; i < rxq_size; i++) { struct urb *req = usb_alloc_urb(0, GFP_KERNEL); - if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { + if (!req || rx_submit(pnd, req, GFP_KERNEL)) { usb_free_urb(req); usbpn_close(dev); return -ENOMEM; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 2a32d9167d3b..9311a08565be 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -67,6 +67,36 @@ static const u8 mbm_guid[16] = { 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a, }; +static void usbnet_cdc_update_filter(struct usbnet *dev) +{ + struct cdc_state *info = (void *) &dev->data; + struct usb_interface *intf = info->control; + struct net_device *net = dev->net; + + u16 cdc_filter = USB_CDC_PACKET_TYPE_DIRECTED + | USB_CDC_PACKET_TYPE_BROADCAST; + + /* filtering on the device is an optional feature and not worth + * the hassle so we just roughly care about snooping and if any + * multicast is requested, we take every multicast + */ + if (net->flags & IFF_PROMISC) + cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS; + if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) + cdc_filter |= USB_CDC_PACKET_TYPE_ALL_MULTICAST; + + usb_control_msg(dev->udev, + usb_sndctrlpipe(dev->udev, 0), + USB_CDC_SET_ETHERNET_PACKET_FILTER, + USB_TYPE_CLASS | USB_RECIP_INTERFACE, + cdc_filter, + intf->cur_altsetting->desc.bInterfaceNumber, + NULL, + 0, + USB_CTRL_SET_TIMEOUT + ); +} + /* probes control interface, claims data interface, collects the bulk * endpoints, activates data interface (if needed), maybe sets MTU. * all pure cdc, except for certain firmware workarounds, and knowing @@ -347,16 +377,8 @@ next_desc: * don't do reset all the way. So the packet filter should * be set to a sane initial value. */ - usb_control_msg(dev->udev, - usb_sndctrlpipe(dev->udev, 0), - USB_CDC_SET_ETHERNET_PACKET_FILTER, - USB_TYPE_CLASS | USB_RECIP_INTERFACE, - USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST, - intf->cur_altsetting->desc.bInterfaceNumber, - NULL, - 0, - USB_CTRL_SET_TIMEOUT - ); + usbnet_cdc_update_filter(dev); + return 0; bad_desc: @@ -468,10 +490,6 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf) return status; } - /* FIXME cdc-ether has some multicast code too, though it complains - * in routine cases. info->ether describes the multicast support. - * Implement that here, manipulating the cdc filter as needed. - */ return 0; } EXPORT_SYMBOL_GPL(usbnet_cdc_bind); @@ -482,6 +500,7 @@ static const struct driver_info cdc_info = { .bind = usbnet_cdc_bind, .unbind = usbnet_cdc_unbind, .status = usbnet_cdc_status, + .set_rx_mode = usbnet_cdc_update_filter, .manage_power = usbnet_manage_power, }; @@ -491,6 +510,7 @@ static const struct driver_info wwan_info = { .bind = usbnet_cdc_bind, .unbind = usbnet_cdc_unbind, .status = usbnet_cdc_status, + .set_rx_mode = usbnet_cdc_update_filter, .manage_power = usbnet_manage_power, }; diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 5ee7a1dbc023..96fc8a5bde84 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -402,7 +402,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_ /* map MBIM session to VLAN */ if (tci) - vlan_put_tag(skb, htons(ETH_P_8021Q), tci); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci); err: return skb; } diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index babda7d8693e..9c5aa922a9f4 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2746,8 +2746,7 @@ exit: tty_unregister_device(tty_drv, serial->minor); kfree(serial); } - if (hso_dev) - kfree(hso_dev); + kfree(hso_dev); return NULL; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 22756db53dca..b8a82b86f909 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -780,6 +780,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index e3d84c322e4e..2d1c77e81836 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -24,6 +24,7 @@ #include <net/ip6_checksum.h> #include <uapi/linux/mdio.h> #include <linux/mdio.h> +#include <linux/usb/cdc.h> /* Version Information */ #define DRIVER_VERSION "v1.07.0 (2014/10/09)" @@ -461,18 +462,11 @@ enum rtl8152_flags { /* Define these values to match your device */ #define VENDOR_ID_REALTEK 0x0bda -#define PRODUCT_ID_RTL8152 0x8152 -#define PRODUCT_ID_RTL8153 0x8153 - #define VENDOR_ID_SAMSUNG 0x04e8 -#define PRODUCT_ID_SAMSUNG 0xa101 #define MCU_TYPE_PLA 0x0100 #define MCU_TYPE_USB 0x0000 -#define REALTEK_USB_DEVICE(vend, prod) \ - USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC) - struct tally_counter { __le64 tx_packets; __le64 rx_packets; @@ -486,7 +480,7 @@ struct tally_counter { __le64 rx_broadcast; __le32 rx_multicast; __le16 tx_aborted; - __le16 tx_underun; + __le16 tx_underrun; }; struct rx_desc { @@ -690,6 +684,9 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, } } + if (ret == -ENODEV) + set_bit(RTL8152_UNPLUG, &tp->flags); + return ret; } @@ -757,6 +754,9 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, } error1: + if (ret == -ENODEV) + set_bit(RTL8152_UNPLUG, &tp->flags); + return ret; } @@ -1030,7 +1030,6 @@ static void read_bulk_callback(struct urb *urb) int status = urb->status; struct rx_agg *agg; struct r8152 *tp; - int result; agg = urb->context; if (!agg) @@ -1081,15 +1080,7 @@ static void read_bulk_callback(struct urb *urb) break; } - result = r8152_submit_rx(tp, agg, GFP_ATOMIC); - if (result == -ENODEV) { - netif_device_detach(tp->netdev); - } else if (result) { - spin_lock(&tp->rx_lock); - list_add_tail(&agg->list, &tp->rx_done); - spin_unlock(&tp->rx_lock); - tasklet_schedule(&tp->tl); - } + r8152_submit_rx(tp, agg, GFP_ATOMIC); } static void write_bulk_callback(struct urb *urb) @@ -1162,6 +1153,9 @@ static void intr_callback(struct urb *urb) case -ESHUTDOWN: netif_device_detach(tp->netdev); case -ENOENT: + case -EPROTO: + netif_info(tp, intr, tp->netdev, + "Stop submitting intr, status %d\n", status); return; case -EOVERFLOW: netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); @@ -1187,11 +1181,13 @@ static void intr_callback(struct urb *urb) resubmit: res = usb_submit_urb(urb, GFP_ATOMIC); - if (res == -ENODEV) + if (res == -ENODEV) { + set_bit(RTL8152_UNPLUG, &tp->flags); netif_device_detach(tp->netdev); - else if (res) + } else if (res) { netif_err(tp, intr, tp->netdev, "can't resubmit intr, status %d\n", res); + } } static inline void *rx_agg_align(void *data) @@ -1247,7 +1243,6 @@ static int alloc_all_mem(struct r8152 *tp) spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->tx_lock); - INIT_LIST_HEAD(&tp->rx_done); INIT_LIST_HEAD(&tp->tx_free); skb_queue_head_init(&tp->tx_queue); @@ -1673,7 +1668,6 @@ static void rx_bottom(struct r8152 *tp) int len_used = 0; struct urb *urb; u8 *rx_data; - int ret; list_del_init(cursor); @@ -1726,13 +1720,7 @@ find_next_rx: } submit: - ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); - if (ret && ret != -ENODEV) { - spin_lock_irqsave(&tp->rx_lock, flags); - list_add_tail(&agg->list, &tp->rx_done); - spin_unlock_irqrestore(&tp->rx_lock, flags); - tasklet_schedule(&tp->tl); - } + r8152_submit_rx(tp, agg, GFP_ATOMIC); } } @@ -1755,6 +1743,7 @@ static void tx_bottom(struct r8152 *tp) struct net_device *netdev = tp->netdev; if (res == -ENODEV) { + set_bit(RTL8152_UNPLUG, &tp->flags); netif_device_detach(netdev); } else { struct net_device_stats *stats = &netdev->stats; @@ -1789,6 +1778,8 @@ static void bottom_half(unsigned long data) if (!netif_carrier_ok(tp->netdev)) return; + clear_bit(SCHEDULE_TASKLET, &tp->flags); + rx_bottom(tp); tx_bottom(tp); } @@ -1796,11 +1787,28 @@ static void bottom_half(unsigned long data) static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) { + int ret; + usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), agg->head, agg_buf_sz, (usb_complete_t)read_bulk_callback, agg); - return usb_submit_urb(agg->urb, mem_flags); + ret = usb_submit_urb(agg->urb, mem_flags); + if (ret == -ENODEV) { + set_bit(RTL8152_UNPLUG, &tp->flags); + netif_device_detach(tp->netdev); + } else if (ret) { + struct urb *urb = agg->urb; + unsigned long flags; + + urb->actual_length = 0; + spin_lock_irqsave(&tp->rx_lock, flags); + list_add_tail(&agg->list, &tp->rx_done); + spin_unlock_irqrestore(&tp->rx_lock, flags); + tasklet_schedule(&tp->tl); + } + + return ret; } static void rtl_drop_queued_tx(struct r8152 *tp) @@ -1991,6 +1999,25 @@ static int rtl_start_rx(struct r8152 *tp) break; } + if (ret && ++i < RTL8152_MAX_RX) { + struct list_head rx_queue; + unsigned long flags; + + INIT_LIST_HEAD(&rx_queue); + + do { + struct rx_agg *agg = &tp->rx_info[i++]; + struct urb *urb = agg->urb; + + urb->actual_length = 0; + list_add_tail(&agg->list, &rx_queue); + } while (i < RTL8152_MAX_RX); + + spin_lock_irqsave(&tp->rx_lock, flags); + list_splice_tail(&rx_queue, &tp->rx_done); + spin_unlock_irqrestore(&tp->rx_lock, flags); + } + return ret; } @@ -2847,15 +2874,18 @@ static void rtl_work_func_t(struct work_struct *work) { struct r8152 *tp = container_of(work, struct r8152, schedule.work); + /* If the device is unplugged or !netif_running(), the workqueue + * doesn't need to wake the device, and could return directly. + */ + if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev)) + return; + if (usb_autopm_get_interface(tp->intf) < 0) return; if (!test_bit(WORK_ENABLE, &tp->flags)) goto out1; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) - goto out1; - if (!mutex_trylock(&tp->control)) { schedule_delayed_work(&tp->schedule, 0); goto out1; @@ -2891,6 +2921,9 @@ static int rtl8152_open(struct net_device *netdev) if (res) goto out; + /* set speed to 0 to avoid autoresume try to submit rx */ + tp->speed = 0; + res = usb_autopm_get_interface(tp->intf); if (res < 0) { free_all_mem(tp); @@ -2904,6 +2937,8 @@ static int rtl8152_open(struct net_device *netdev) clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); + + /* disable the tx/rx, if the workqueue has enabled them. */ if (tp->speed & LINK_STATUS) tp->rtl_ops.disable(tp); } @@ -2925,6 +2960,8 @@ static int rtl8152_open(struct net_device *netdev) netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", res); free_all_mem(tp); + } else { + tasklet_enable(&tp->tl); } mutex_unlock(&tp->control); @@ -2940,6 +2977,7 @@ static int rtl8152_close(struct net_device *netdev) struct r8152 *tp = netdev_priv(netdev); int res = 0; + tasklet_disable(&tp->tl); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); @@ -2955,14 +2993,9 @@ static int rtl8152_close(struct net_device *netdev) * be disable when autoresume occurs, because the * netif_running() would be false. */ - if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { - rtl_runtime_suspend_enable(tp, false); - clear_bit(SELECTIVE_SUSPEND, &tp->flags); - } + rtl_runtime_suspend_enable(tp, false); - tasklet_disable(&tp->tl); tp->rtl_ops.down(tp); - tasklet_enable(&tp->tl); mutex_unlock(&tp->control); @@ -3205,7 +3238,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) netif_device_detach(netdev); } - if (netif_running(netdev)) { + if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); tasklet_disable(&tp->tl); @@ -3253,6 +3286,8 @@ static int rtl8152_resume(struct usb_interface *intf) set_bit(WORK_ENABLE, &tp->flags); } usb_submit_urb(tp->intr_urb, GFP_KERNEL); + } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { + clear_bit(SELECTIVE_SUSPEND, &tp->flags); } mutex_unlock(&tp->control); @@ -3420,7 +3455,7 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev, data[9] = le64_to_cpu(tally.rx_broadcast); data[10] = le32_to_cpu(tally.rx_multicast); data[11] = le16_to_cpu(tally.tx_aborted); - data[12] = le16_to_cpu(tally.tx_underun); + data[12] = le16_to_cpu(tally.tx_underrun); } static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -3558,11 +3593,33 @@ out: return ret; } +static int rtl8152_nway_reset(struct net_device *dev) +{ + struct r8152 *tp = netdev_priv(dev); + int ret; + + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out; + + mutex_lock(&tp->control); + + ret = mii_nway_restart(&tp->mii); + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); + +out: + return ret; +} + static struct ethtool_ops ops = { .get_drvinfo = rtl8152_get_drvinfo, .get_settings = rtl8152_get_settings, .set_settings = rtl8152_set_settings, .get_link = ethtool_op_get_link, + .nway_reset = rtl8152_nway_reset, .get_msglevel = rtl8152_get_msglevel, .set_msglevel = rtl8152_set_msglevel, .get_wol = rtl8152_get_wol, @@ -3702,66 +3759,43 @@ static void rtl8153_unload(struct r8152 *tp) r8153_power_cut_en(tp, false); } -static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) +static int rtl_ops_init(struct r8152 *tp) { struct rtl_ops *ops = &tp->rtl_ops; - int ret = -ENODEV; - - switch (id->idVendor) { - case VENDOR_ID_REALTEK: - switch (id->idProduct) { - case PRODUCT_ID_RTL8152: - ops->init = r8152b_init; - ops->enable = rtl8152_enable; - ops->disable = rtl8152_disable; - ops->up = rtl8152_up; - ops->down = rtl8152_down; - ops->unload = rtl8152_unload; - ops->eee_get = r8152_get_eee; - ops->eee_set = r8152_set_eee; - ret = 0; - break; - case PRODUCT_ID_RTL8153: - ops->init = r8153_init; - ops->enable = rtl8153_enable; - ops->disable = rtl8153_disable; - ops->up = rtl8153_up; - ops->down = rtl8153_down; - ops->unload = rtl8153_unload; - ops->eee_get = r8153_get_eee; - ops->eee_set = r8153_set_eee; - ret = 0; - break; - default: - break; - } + int ret = 0; + + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + ops->init = r8152b_init; + ops->enable = rtl8152_enable; + ops->disable = rtl8152_disable; + ops->up = rtl8152_up; + ops->down = rtl8152_down; + ops->unload = rtl8152_unload; + ops->eee_get = r8152_get_eee; + ops->eee_set = r8152_set_eee; break; - case VENDOR_ID_SAMSUNG: - switch (id->idProduct) { - case PRODUCT_ID_SAMSUNG: - ops->init = r8153_init; - ops->enable = rtl8153_enable; - ops->disable = rtl8153_disable; - ops->up = rtl8153_up; - ops->down = rtl8153_down; - ops->unload = rtl8153_unload; - ops->eee_get = r8153_get_eee; - ops->eee_set = r8153_set_eee; - ret = 0; - break; - default: - break; - } + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + ops->init = r8153_init; + ops->enable = rtl8153_enable; + ops->disable = rtl8153_disable; + ops->up = rtl8153_up; + ops->down = rtl8153_down; + ops->unload = rtl8153_unload; + ops->eee_get = r8153_get_eee; + ops->eee_set = r8153_set_eee; break; default: + ret = -ENODEV; + netif_err(tp, probe, tp->netdev, "Unknown Device\n"); break; } - if (ret) - netif_err(tp, probe, tp->netdev, "Unknown Device\n"); - return ret; } @@ -3793,7 +3827,8 @@ static int rtl8152_probe(struct usb_interface *intf, tp->netdev = netdev; tp->intf = intf; - ret = rtl_ops_init(tp, id); + r8152b_get_version(tp); + ret = rtl_ops_init(tp); if (ret) goto out; @@ -3826,11 +3861,9 @@ static int rtl8152_probe(struct usb_interface *intf, tp->mii.phy_id_mask = 0x3f; tp->mii.reg_num_mask = 0x1f; tp->mii.phy_id = R8152_PHY_ID; - tp->mii.supports_gmii = 0; intf->needs_remote_wakeup = 1; - r8152b_get_version(tp); tp->rtl_ops.init(tp); set_ethernet_addr(tp); @@ -3848,12 +3881,15 @@ static int rtl8152_probe(struct usb_interface *intf, else device_set_wakeup_enable(&udev->dev, false); + tasklet_disable(&tp->tl); + netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION); return 0; out1: usb_set_intfdata(intf, NULL); + tasklet_kill(&tp->tl); out: free_netdev(netdev); return ret; @@ -3877,11 +3913,27 @@ static void rtl8152_disconnect(struct usb_interface *intf) } } +#define REALTEK_USB_DEVICE(vend, prod) \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ + USB_DEVICE_ID_MATCH_INT_CLASS, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bInterfaceClass = USB_CLASS_VENDOR_SPEC \ +}, \ +{ \ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | \ + USB_DEVICE_ID_MATCH_DEVICE, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* table of devices that work with this driver */ static struct usb_device_id rtl8152_table[] = { - {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, - {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, - {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, + {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {} }; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 6e87e5710048..d37b7dce2d40 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -753,14 +753,13 @@ static int rtl8150_open(struct net_device *netdev) static int rtl8150_close(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); - int res = 0; netif_stop_queue(netdev); if (!test_bit(RTL8150_UNPLUG, &dev->flags)) disable_net_traffic(dev); unlink_all_urbs(dev); - return res; + return 0; } static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index d07bf4cb893f..26423adc35ee 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1670,12 +1670,14 @@ done: static int smsc95xx_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); - struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); - u8 suspend_flags = pdata->suspend_flags; + struct smsc95xx_priv *pdata; + u8 suspend_flags; int ret; u32 val; BUG_ON(!dev); + pdata = (struct smsc95xx_priv *)(dev->data[0]); + suspend_flags = pdata->suspend_flags; netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 20615bbd693b..3a6770a65d78 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1052,6 +1052,21 @@ static void __handle_link_change(struct usbnet *dev) clear_bit(EVENT_LINK_CHANGE, &dev->flags); } +static void usbnet_set_rx_mode(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + usbnet_defer_kevent(dev, EVENT_SET_RX_MODE); +} + +static void __handle_set_rx_mode(struct usbnet *dev) +{ + if (dev->driver_info->set_rx_mode) + (dev->driver_info->set_rx_mode)(dev); + + clear_bit(EVENT_SET_RX_MODE, &dev->flags); +} + /* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, @@ -1157,6 +1172,10 @@ skip_reset: if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) __handle_link_change(dev); + if (test_bit (EVENT_SET_RX_MODE, &dev->flags)) + __handle_set_rx_mode(dev); + + if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); } @@ -1525,6 +1544,7 @@ static const struct net_device_ops usbnet_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_set_rx_mode = usbnet_set_rx_mode, .ndo_change_mtu = usbnet_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d75256bd1a6a..b0bc8ead47de 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -491,8 +491,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: + { + static bool warned; + + if (!warned) { + warned = true; + netdev_warn(dev, + "host using disabled UFO feature; please fix it\n"); + } skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; + } case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; @@ -881,8 +890,6 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) - hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) @@ -1666,6 +1673,40 @@ static const struct attribute_group virtio_net_mrg_rx_group = { }; #endif +static bool virtnet_fail_on_feature(struct virtio_device *vdev, + unsigned int fbit, + const char *fname, const char *dname) +{ + if (!virtio_has_feature(vdev, fbit)) + return false; + + dev_err(&vdev->dev, "device advertises feature %s but not %s", + fname, dname); + + return true; +} + +#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ + virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) + +static bool virtnet_validate_features(struct virtio_device *vdev) +{ + if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && + (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, + "VIRTIO_NET_F_CTRL_VQ"))) { + return false; + } + + return true; +} + static int virtnet_probe(struct virtio_device *vdev) { int i, err; @@ -1673,6 +1714,9 @@ static int virtnet_probe(struct virtio_device *vdev) struct virtnet_info *vi; u16 max_queue_pairs; + if (!virtnet_validate_features(vdev)) + return -EINVAL; + /* Find if host supports multiqueue virtio_net device */ err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, struct virtio_net_config, @@ -1705,7 +1749,7 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { - dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO + dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ @@ -1715,11 +1759,9 @@ static int virtnet_probe(struct virtio_device *vdev) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; - if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) - dev->hw_features |= NETIF_F_UFO; if (gso) - dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); + dev->features |= dev->hw_features & NETIF_F_ALL_TSO; /* (!csum && gso) case will be fixed by register_netdev() */ } if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) @@ -1757,8 +1799,7 @@ static int virtnet_probe(struct virtio_device *vdev) /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) @@ -1952,9 +1993,9 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, - VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, - VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, + VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 6dfcbf523936..afd295348ddb 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2199,13 +2199,6 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) if (adapter->rss) { struct UPT1_RSSConf *rssConf = adapter->rss_conf; - static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = { - 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac, - 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28, - 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70, - 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3, - 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9, - }; devRead->misc.uptFeatures |= UPT1_F_RSS; devRead->misc.numRxQueues = adapter->num_rx_queues; @@ -2216,7 +2209,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; - memcpy(rssConf->hashKey, rss_key, sizeof(rss_key)); + netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey)); for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = ethtool_rxfh_indir_default( diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index b725fd9e7803..b7b53329d575 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -583,12 +583,16 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev) } static int -vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key) +vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; unsigned int n = rssConf->indTableSize; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; while (n--) p[n] = rssConf->indTable[n]; return 0; @@ -596,13 +600,20 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key) } static int -vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key) +vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key, + const u8 hfunc) { unsigned int i; unsigned long flags; struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!p) + return 0; for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = p[i]; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ca309820d39e..31ecb03368c6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -67,12 +67,6 @@ #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ -/* VXLAN protocol header */ -struct vxlanhdr { - __be32 vx_flags; - __be32 vx_vni; -}; - /* UDP port for VXLAN traffic. * The IANA assigned port is 4789, but the Linux default is 8472 * for compatibility with early adopters. @@ -275,13 +269,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); } -/* Find VXLAN socket based on network namespace and UDP port */ -static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) +/* Find VXLAN socket based on network namespace, address family and UDP port */ +static struct vxlan_sock *vxlan_find_sock(struct net *net, + sa_family_t family, __be16 port) { struct vxlan_sock *vs; hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { - if (inet_sk(vs->sock->sk)->inet_sport == port) + if (inet_sk(vs->sock->sk)->inet_sport == port && + inet_sk(vs->sock->sk)->sk.sk_family == family) return vs; } return NULL; @@ -300,11 +296,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) } /* Look up VNI in a per net namespace table */ -static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) +static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, + sa_family_t family, __be16 port) { struct vxlan_sock *vs; - vs = vxlan_find_sock(net, port); + vs = vxlan_find_sock(net, family, port); if (!vs) return NULL; @@ -621,6 +618,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); int err = -ENOSYS; + udp_tunnel_gro_complete(skb, nhoff); + eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); type = eh->h_proto; @@ -850,7 +849,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, /* Add static entry (via netlink) */ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 flags) + const unsigned char *addr, u16 vid, u16 flags) { struct vxlan_dev *vxlan = netdev_priv(dev); /* struct net *net = dev_net(vxlan->dev); */ @@ -886,7 +885,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /* Delete entry (via netlink) */ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr) + const unsigned char *addr, u16 vid) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; @@ -1594,14 +1593,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, if (unlikely(err)) return err; - if (vlan_tx_tag_present(skb)) { - if (WARN_ON(!__vlan_put_tag(skb, - skb->vlan_proto, - vlan_tx_tag_get(skb)))) - return -ENOMEM; - - skb->vlan_tci = 0; - } + skb = vlan_hwaccel_push_inside(skb); + if (WARN_ON(!skb)) + return -ENOMEM; vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); @@ -1638,14 +1632,9 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, if (unlikely(err)) return err; - if (vlan_tx_tag_present(skb)) { - if (WARN_ON(!__vlan_put_tag(skb, - skb->vlan_proto, - vlan_tx_tag_get(skb)))) - return -ENOMEM; - - skb->vlan_tci = 0; - } + skb = vlan_hwaccel_push_inside(skb); + if (WARN_ON(!skb)) + return -ENOMEM; vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); @@ -1771,7 +1760,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *dst_vxlan; ip_rt_put(rt); - dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); + dst_vxlan = vxlan_find_vni(vxlan->net, vni, + dst->sa.sa_family, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); @@ -1825,7 +1815,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *dst_vxlan; dst_release(ndst); - dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); + dst_vxlan = vxlan_find_vni(vxlan->net, vni, + dst->sa.sa_family, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); @@ -1985,13 +1976,15 @@ static int vxlan_init(struct net_device *dev) struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_sock *vs; + bool ipv6 = vxlan->flags & VXLAN_F_IPV6; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; spin_lock(&vn->sock_lock); - vs = vxlan_find_sock(vxlan->net, vxlan->dst_port); + vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, + vxlan->dst_port); if (vs) { /* If we have a socket with same port already, reuse it */ atomic_inc(&vs->refcnt); @@ -2233,6 +2226,9 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, + [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, + [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, + [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -2303,9 +2299,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, if (ipv6) { udp_conf.family = AF_INET6; udp_conf.use_udp6_tx_checksums = - !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); + !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); udp_conf.use_udp6_rx_checksums = - !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); + !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); } else { udp_conf.family = AF_INET; udp_conf.local_ip.s_addr = INADDR_ANY; @@ -2382,6 +2378,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; + bool ipv6 = flags & VXLAN_F_IPV6; vs = vxlan_socket_create(net, port, rcv, data, flags); if (!IS_ERR(vs)) @@ -2391,7 +2388,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, return vs; spin_lock(&vn->sock_lock); - vs = vxlan_find_sock(net, port); + vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); if (vs) { if (vs->rcv == rcv) atomic_inc(&vs->refcnt); @@ -2550,7 +2547,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; - if (vxlan_find_vni(net, vni, vxlan->dst_port)) { + if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, + vxlan->dst_port)) { pr_info("duplicate VNI %u\n", vni); return -EEXIST; } diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index d4eb8d2e9cb7..083ecc93fe5e 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -176,10 +176,11 @@ struct xenvif_queue { /* Per-queue data for xenvif */ char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ struct xen_netif_rx_back_ring rx; struct sk_buff_head rx_queue; - RING_IDX rx_last_skb_slots; - unsigned long status; - struct timer_list rx_stalled; + unsigned int rx_queue_max; + unsigned int rx_queue_len; + unsigned long last_rx_time; + bool stalled; struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; @@ -199,18 +200,14 @@ struct xenvif_queue { /* Per-queue data for xenvif */ struct xenvif_stats stats; }; +/* Maximum number of Rx slots a to-guest packet may use, including the + * slot needed for GSO meta-data. + */ +#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1) + enum state_bit_shift { /* This bit marks that the vif is connected */ VIF_STATUS_CONNECTED, - /* This bit signals the RX thread that queuing was stopped (in - * start_xmit), and either the timer fired or an RX interrupt came - */ - QUEUE_STATUS_RX_PURGE_EVENT, - /* This bit tells the interrupt handler that this queue was the reason - * for the carrier off, so it should kick the thread. Only queues which - * brought it down can turn on the carrier. - */ - QUEUE_STATUS_RX_STALLED }; struct xenvif { @@ -228,9 +225,6 @@ struct xenvif { u8 ip_csum:1; u8 ipv6_csum:1; - /* Internal feature information. */ - u8 can_queue:1; /* can queue packets for receiver? */ - /* Is this interface disabled? True when backend discovers * frontend is rogue. */ @@ -240,6 +234,9 @@ struct xenvif { /* Queues */ struct xenvif_queue *queues; unsigned int num_queues; /* active queues, resource allocated */ + unsigned int stalled_queues; + + spinlock_t lock; #ifdef CONFIG_DEBUG_FS struct dentry *xenvif_dbg_root; @@ -249,6 +246,14 @@ struct xenvif { struct net_device *dev; }; +struct xenvif_rx_cb { + unsigned long expires; + int meta_slots_used; + bool full_coalesce; +}; + +#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) + static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif) { return to_xenbus_device(vif->dev->dev.parent); @@ -272,8 +277,6 @@ void xenvif_xenbus_fini(void); int xenvif_schedulable(struct xenvif *vif); -int xenvif_must_stop_queue(struct xenvif_queue *queue); - int xenvif_queue_stopped(struct xenvif_queue *queue); void xenvif_wake_queue(struct xenvif_queue *queue); @@ -296,6 +299,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue); int xenvif_dealloc_kthread(void *data); +void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); + /* Determine whether the needed number of slots (req) are available, * and set req_event if not. */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f379689dde30..a6a32d337bbb 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -43,6 +43,9 @@ #define XENVIF_QUEUE_LENGTH 32 #define XENVIF_NAPI_WEIGHT 64 +/* Number of bytes allowed on the internal guest Rx queue. */ +#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) + /* This function is used to set SKBTX_DEV_ZEROCOPY as well as * increasing the inflight counter. We need to increase the inflight * counter because core driver calls into xenvif_zerocopy_callback @@ -60,20 +63,11 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) atomic_dec(&queue->inflight_packets); } -static inline void xenvif_stop_queue(struct xenvif_queue *queue) -{ - struct net_device *dev = queue->vif->dev; - - if (!queue->vif->can_queue) - return; - - netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); -} - int xenvif_schedulable(struct xenvif *vif) { return netif_running(vif->dev) && - test_bit(VIF_STATUS_CONNECTED, &vif->status); + test_bit(VIF_STATUS_CONNECTED, &vif->status) && + !vif->disabled; } static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) @@ -114,16 +108,7 @@ int xenvif_poll(struct napi_struct *napi, int budget) static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) { struct xenvif_queue *queue = dev_id; - struct netdev_queue *net_queue = - netdev_get_tx_queue(queue->vif->dev, queue->id); - /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR - * the carrier went down and this queue was previously blocked - */ - if (unlikely(netif_tx_queue_stopped(net_queue) || - (!netif_carrier_ok(queue->vif->dev) && - test_bit(QUEUE_STATUS_RX_STALLED, &queue->status)))) - set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); xenvif_kick_thread(queue); return IRQ_HANDLED; @@ -151,24 +136,13 @@ void xenvif_wake_queue(struct xenvif_queue *queue) netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); } -/* Callback to wake the queue's thread and turn the carrier off on timeout */ -static void xenvif_rx_stalled(unsigned long data) -{ - struct xenvif_queue *queue = (struct xenvif_queue *)data; - - if (xenvif_queue_stopped(queue)) { - set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); - xenvif_kick_thread(queue); - } -} - static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; unsigned int num_queues = vif->num_queues; u16 index; - int min_slots_needed; + struct xenvif_rx_cb *cb; BUG_ON(skb->dev != dev); @@ -191,30 +165,10 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) !xenvif_schedulable(vif)) goto drop; - /* At best we'll need one slot for the header and one for each - * frag. - */ - min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; - - /* If the skb is GSO then we'll also need an extra slot for the - * metadata. - */ - if (skb_is_gso(skb)) - min_slots_needed++; - - /* If the skb can't possibly fit in the remaining slots - * then turn off the queue to give the ring a chance to - * drain. - */ - if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { - queue->rx_stalled.function = xenvif_rx_stalled; - queue->rx_stalled.data = (unsigned long)queue; - xenvif_stop_queue(queue); - mod_timer(&queue->rx_stalled, - jiffies + rx_drain_timeout_jiffies); - } + cb = XENVIF_RX_CB(skb); + cb->expires = jiffies + rx_drain_timeout_jiffies; - skb_queue_tail(&queue->rx_queue, skb); + xenvif_rx_queue_tail(queue, skb); xenvif_kick_thread(queue); return NETDEV_TX_OK; @@ -281,10 +235,10 @@ static void xenvif_down(struct xenvif *vif) for (queue_index = 0; queue_index < num_queues; ++queue_index) { queue = &vif->queues[queue_index]; - napi_disable(&queue->napi); disable_irq(queue->tx_irq); if (queue->tx_irq != queue->rx_irq) disable_irq(queue->rx_irq); + napi_disable(&queue->napi); del_timer_sync(&queue->credit_timeout); } } @@ -465,6 +419,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, vif->queues = NULL; vif->num_queues = 0; + spin_lock_init(&vif->lock); + dev->netdev_ops = &xenvif_netdev_ops; dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -508,6 +464,8 @@ int xenvif_init_queue(struct xenvif_queue *queue) init_timer(&queue->credit_timeout); queue->credit_window_start = get_jiffies_64(); + queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; + skb_queue_head_init(&queue->rx_queue); skb_queue_head_init(&queue->tx_queue); @@ -539,8 +497,6 @@ int xenvif_init_queue(struct xenvif_queue *queue) queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; } - init_timer(&queue->rx_stalled); - return 0; } @@ -551,7 +507,6 @@ void xenvif_carrier_on(struct xenvif *vif) dev_set_mtu(vif->dev, ETH_DATA_LEN); netdev_update_features(vif->dev); set_bit(VIF_STATUS_CONNECTED, &vif->status); - netif_carrier_on(vif->dev); if (netif_running(vif->dev)) xenvif_up(vif); rtnl_unlock(); @@ -611,6 +566,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, disable_irq(queue->rx_irq); } + queue->stalled = true; + task = kthread_create(xenvif_kthread_guest_rx, (void *)queue, "%s-guest-rx", queue->name); if (IS_ERR(task)) { @@ -674,7 +631,6 @@ void xenvif_disconnect(struct xenvif *vif) netif_napi_del(&queue->napi); if (queue->task) { - del_timer_sync(&queue->rx_stalled); kthread_stop(queue->task); queue->task = NULL; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 08f65996534c..4a509f715fe8 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -55,13 +55,20 @@ bool separate_tx_rx_irq = 1; module_param(separate_tx_rx_irq, bool, 0644); -/* When guest ring is filled up, qdisc queues the packets for us, but we have - * to timeout them, otherwise other guests' packets can get stuck there +/* The time that packets can stay on the guest Rx internal queue + * before they are dropped. */ unsigned int rx_drain_timeout_msecs = 10000; module_param(rx_drain_timeout_msecs, uint, 0444); unsigned int rx_drain_timeout_jiffies; +/* The length of time before the frontend is considered unresponsive + * because it isn't providing Rx slots. + */ +static unsigned int rx_stall_timeout_msecs = 60000; +module_param(rx_stall_timeout_msecs, uint, 0444); +static unsigned int rx_stall_timeout_jiffies; + unsigned int xenvif_max_queues; module_param_named(max_queues, xenvif_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, @@ -75,6 +82,16 @@ MODULE_PARM_DESC(max_queues, static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; module_param(fatal_skb_slots, uint, 0444); +/* The amount to copy out of the first guest Tx slot into the skb's + * linear area. If the first slot has more data, it will be mapped + * and put into the first frag. + * + * This is sized to avoid pulling headers from the frags for most + * TCP/IP packets. + */ +#define XEN_NETBACK_TX_COPY_LEN 128 + + static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, u8 status); @@ -83,7 +100,6 @@ static void make_tx_response(struct xenvif_queue *queue, s8 st); static inline int tx_work_todo(struct xenvif_queue *queue); -static inline int rx_work_todo(struct xenvif_queue *queue); static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, u16 id, @@ -119,13 +135,6 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) pending_tx_info[0]); } -/* This is a miniumum size for the linear area to avoid lots of - * calls to __pskb_pull_tail() as we set up checksum offsets. The - * value 128 was chosen as it covers all IPv4 and most likely - * IPv6 headers. - */ -#define PKT_PROT_LEN 128 - static u16 frag_get_pending_idx(skb_frag_t *frag) { return (u16)frag->page_offset; @@ -163,6 +172,69 @@ bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) return false; } +void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) +{ + unsigned long flags; + + spin_lock_irqsave(&queue->rx_queue.lock, flags); + + __skb_queue_tail(&queue->rx_queue, skb); + + queue->rx_queue_len += skb->len; + if (queue->rx_queue_len > queue->rx_queue_max) + netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); + + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); +} + +static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + + spin_lock_irq(&queue->rx_queue.lock); + + skb = __skb_dequeue(&queue->rx_queue); + if (skb) + queue->rx_queue_len -= skb->len; + + spin_unlock_irq(&queue->rx_queue.lock); + + return skb; +} + +static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue) +{ + spin_lock_irq(&queue->rx_queue.lock); + + if (queue->rx_queue_len < queue->rx_queue_max) + netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); + + spin_unlock_irq(&queue->rx_queue.lock); +} + + +static void xenvif_rx_queue_purge(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + while ((skb = xenvif_rx_dequeue(queue)) != NULL) + kfree_skb(skb); +} + +static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + + for(;;) { + skb = skb_peek(&queue->rx_queue); + if (!skb) + break; + if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) + break; + xenvif_rx_dequeue(queue); + kfree_skb(skb); + } +} + /* * Returns true if we should start a new receive buffer instead of * adding 'size' bytes to a buffer which currently contains 'offset' @@ -237,13 +309,6 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, return meta; } -struct xenvif_rx_cb { - int meta_slots_used; - bool full_coalesce; -}; - -#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) - /* * Set up the grant operations for this fragment. If it's a flipping * interface, we also set up the unmap request from here. @@ -587,12 +652,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue) skb_queue_head_init(&rxq); - while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) { + while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) + && (skb = xenvif_rx_dequeue(queue)) != NULL) { RING_IDX max_slots_needed; RING_IDX old_req_cons; RING_IDX ring_slots_used; int i; + queue->last_rx_time = jiffies; + /* We need a cheap worse case estimate for the number of * slots we'll use. */ @@ -634,15 +702,6 @@ static void xenvif_rx_action(struct xenvif_queue *queue) skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) max_slots_needed++; - /* If the skb may not fit then bail out now */ - if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) { - skb_queue_head(&queue->rx_queue, skb); - need_to_notify = true; - queue->rx_last_skb_slots = max_slots_needed; - break; - } else - queue->rx_last_skb_slots = 0; - old_req_cons = queue->rx.req_cons; XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); ring_slots_used = queue->rx.req_cons - old_req_cons; @@ -1390,9 +1449,9 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, index = pending_index(queue->pending_cons); pending_idx = queue->pending_ring[index]; - data_len = (txreq.size > PKT_PROT_LEN && + data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? - PKT_PROT_LEN : txreq.size; + XEN_NETBACK_TX_COPY_LEN : txreq.size; skb = xenvif_alloc_skb(data_len); if (unlikely(skb == NULL)) { @@ -1494,7 +1553,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s unsigned int len; BUG_ON(i >= MAX_SKB_FRAGS); - page = alloc_page(GFP_ATOMIC|__GFP_COLD); + page = alloc_page(GFP_ATOMIC); if (!page) { int j; skb->truesize += skb->data_len; @@ -1597,11 +1656,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) } } - if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { - int target = min_t(int, skb->len, PKT_PROT_LEN); - __pskb_pull_tail(skb, target - skb_headlen(skb)); - } - skb->dev = queue->vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); @@ -1869,12 +1923,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) } } -static inline int rx_work_todo(struct xenvif_queue *queue) -{ - return (!skb_queue_empty(&queue->rx_queue) && - xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)); -} - static inline int tx_work_todo(struct xenvif_queue *queue) { if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) @@ -1931,92 +1979,121 @@ err: return err; } -static void xenvif_start_queue(struct xenvif_queue *queue) +static void xenvif_queue_carrier_off(struct xenvif_queue *queue) +{ + struct xenvif *vif = queue->vif; + + queue->stalled = true; + + /* At least one queue has stalled? Disable the carrier. */ + spin_lock(&vif->lock); + if (vif->stalled_queues++ == 0) { + netdev_info(vif->dev, "Guest Rx stalled"); + netif_carrier_off(vif->dev); + } + spin_unlock(&vif->lock); +} + +static void xenvif_queue_carrier_on(struct xenvif_queue *queue) { - if (xenvif_schedulable(queue->vif)) - xenvif_wake_queue(queue); + struct xenvif *vif = queue->vif; + + queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ + queue->stalled = false; + + /* All queues are ready? Enable the carrier. */ + spin_lock(&vif->lock); + if (--vif->stalled_queues == 0) { + netdev_info(vif->dev, "Guest Rx ready"); + netif_carrier_on(vif->dev); + } + spin_unlock(&vif->lock); } -/* Only called from the queue's thread, it handles the situation when the guest - * doesn't post enough requests on the receiving ring. - * First xenvif_start_xmit disables QDisc and start a timer, and then either the - * timer fires, or the guest send an interrupt after posting new request. If it - * is the timer, the carrier is turned off here. - * */ -static void xenvif_rx_purge_event(struct xenvif_queue *queue) +static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) { - /* Either the last unsuccesful skb or at least 1 slot should fit */ - int needed = queue->rx_last_skb_slots ? - queue->rx_last_skb_slots : 1; + RING_IDX prod, cons; - /* It is assumed that if the guest post new slots after this, the RX - * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up - * the thread again - */ - set_bit(QUEUE_STATUS_RX_STALLED, &queue->status); - if (!xenvif_rx_ring_slots_available(queue, needed)) { - rtnl_lock(); - if (netif_carrier_ok(queue->vif->dev)) { - /* Timer fired and there are still no slots. Turn off - * everything except the interrupts - */ - netif_carrier_off(queue->vif->dev); - skb_queue_purge(&queue->rx_queue); - queue->rx_last_skb_slots = 0; - if (net_ratelimit()) - netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id); - } else { - /* Probably an another queue already turned the carrier - * off, make sure nothing is stucked in the internal - * queue of this queue - */ - skb_queue_purge(&queue->rx_queue); - queue->rx_last_skb_slots = 0; - } - rtnl_unlock(); - } else if (!netif_carrier_ok(queue->vif->dev)) { - unsigned int num_queues = queue->vif->num_queues; - unsigned int i; - /* The carrier was down, but an interrupt kicked - * the thread again after new requests were - * posted - */ - clear_bit(QUEUE_STATUS_RX_STALLED, - &queue->status); - rtnl_lock(); - netif_carrier_on(queue->vif->dev); - netif_tx_wake_all_queues(queue->vif->dev); - rtnl_unlock(); + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; - for (i = 0; i < num_queues; i++) { - struct xenvif_queue *temp = &queue->vif->queues[i]; + return !queue->stalled + && prod - cons < XEN_NETBK_RX_SLOTS_MAX + && time_after(jiffies, + queue->last_rx_time + rx_stall_timeout_jiffies); +} - xenvif_napi_schedule_or_enable_events(temp); - } - if (net_ratelimit()) - netdev_err(queue->vif->dev, "Carrier on again\n"); - } else { - /* Queuing were stopped, but the guest posted - * new requests and sent an interrupt - */ - clear_bit(QUEUE_STATUS_RX_STALLED, - &queue->status); - del_timer_sync(&queue->rx_stalled); - xenvif_start_queue(queue); +static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) +{ + RING_IDX prod, cons; + + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; + + return queue->stalled + && prod - cons >= XEN_NETBK_RX_SLOTS_MAX; +} + +static bool xenvif_have_rx_work(struct xenvif_queue *queue) +{ + return (!skb_queue_empty(&queue->rx_queue) + && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)) + || xenvif_rx_queue_stalled(queue) + || xenvif_rx_queue_ready(queue) + || kthread_should_stop() + || queue->vif->disabled; +} + +static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + long timeout; + + skb = skb_peek(&queue->rx_queue); + if (!skb) + return MAX_SCHEDULE_TIMEOUT; + + timeout = XENVIF_RX_CB(skb)->expires - jiffies; + return timeout < 0 ? 0 : timeout; +} + +/* Wait until the guest Rx thread has work. + * + * The timeout needs to be adjusted based on the current head of the + * queue (and not just the head at the beginning). In particular, if + * the queue is initially empty an infinite timeout is used and this + * needs to be reduced when a skb is queued. + * + * This cannot be done with wait_event_timeout() because it only + * calculates the timeout once. + */ +static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) +{ + DEFINE_WAIT(wait); + + if (xenvif_have_rx_work(queue)) + return; + + for (;;) { + long ret; + + prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); + if (xenvif_have_rx_work(queue)) + break; + ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); + if (!ret) + break; } + finish_wait(&queue->wq, &wait); } int xenvif_kthread_guest_rx(void *data) { struct xenvif_queue *queue = data; - struct sk_buff *skb; + struct xenvif *vif = queue->vif; - while (!kthread_should_stop()) { - wait_event_interruptible(queue->wq, - rx_work_todo(queue) || - queue->vif->disabled || - test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) || - kthread_should_stop()); + for (;;) { + xenvif_wait_for_rx_work(queue); if (kthread_should_stop()) break; @@ -2028,35 +2105,38 @@ int xenvif_kthread_guest_rx(void *data) * context so we defer it here, if this thread is * associated with queue 0. */ - if (unlikely(queue->vif->disabled && queue->id == 0)) { - xenvif_carrier_off(queue->vif); - } else if (unlikely(queue->vif->disabled)) { - /* kthread_stop() would be called upon this thread soon, - * be a bit proactive - */ - skb_queue_purge(&queue->rx_queue); - queue->rx_last_skb_slots = 0; - } else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT, - &queue->status))) { - xenvif_rx_purge_event(queue); - } else if (!netif_carrier_ok(queue->vif->dev)) { - /* Another queue stalled and turned the carrier off, so - * purge the internal queue of queues which were not - * blocked - */ - skb_queue_purge(&queue->rx_queue); - queue->rx_last_skb_slots = 0; + if (unlikely(vif->disabled && queue->id == 0)) { + xenvif_carrier_off(vif); + xenvif_rx_queue_purge(queue); + continue; } if (!skb_queue_empty(&queue->rx_queue)) xenvif_rx_action(queue); + /* If the guest hasn't provided any Rx slots for a + * while it's probably not responsive, drop the + * carrier so packets are dropped earlier. + */ + if (xenvif_rx_queue_stalled(queue)) + xenvif_queue_carrier_off(queue); + else if (xenvif_rx_queue_ready(queue)) + xenvif_queue_carrier_on(queue); + + /* Queued packets may have foreign pages from other + * domains. These cannot be queued indefinitely as + * this would starve guests of grant refs and transmit + * slots. + */ + xenvif_rx_queue_drop_expired(queue); + + xenvif_rx_queue_maybe_wake(queue); + cond_resched(); } /* Bin any remaining skbs */ - while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) - dev_kfree_skb(skb); + xenvif_rx_queue_purge(queue); return 0; } @@ -2113,6 +2193,7 @@ static int __init netback_init(void) goto failed_init; rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs); + rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs); #ifdef CONFIG_DEBUG_FS xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 8079c31ac5e6..fab0d4b42f58 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -39,7 +39,7 @@ struct backend_info { static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); static void connect(struct backend_info *be); static int read_xenbus_vif_flags(struct backend_info *be); -static void backend_create_xenvif(struct backend_info *be); +static int backend_create_xenvif(struct backend_info *be); static void unregister_hotplug_status_watch(struct backend_info *be); static void set_backend_state(struct backend_info *be, enum xenbus_state state); @@ -52,6 +52,7 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v) struct xenvif_queue *queue = m->private; struct xen_netif_tx_back_ring *tx_ring = &queue->tx; struct xen_netif_rx_back_ring *rx_ring = &queue->rx; + struct netdev_queue *dev_queue; if (tx_ring->sring) { struct xen_netif_tx_sring *sring = tx_ring->sring; @@ -112,6 +113,13 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v) queue->credit_timeout.expires, jiffies); + dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id); + + seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n", + queue->rx_queue_len, queue->rx_queue_max, + skb_queue_len(&queue->rx_queue), + netif_tx_queue_stopped(dev_queue) ? "stopped" : "running"); + return 0; } @@ -344,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev, be->state = XenbusStateInitWait; /* This kicks hotplug scripts, so do it immediately. */ - backend_create_xenvif(be); + err = backend_create_xenvif(be); + if (err) + goto fail; return 0; @@ -389,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev, } -static void backend_create_xenvif(struct backend_info *be) +static int backend_create_xenvif(struct backend_info *be) { int err; long handle; struct xenbus_device *dev = be->dev; if (be->vif != NULL) - return; + return 0; err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); if (err != 1) { xenbus_dev_fatal(dev, err, "reading handle"); - return; + return (err < 0) ? err : -EINVAL; } be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); @@ -409,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be) err = PTR_ERR(be->vif); be->vif = NULL; xenbus_dev_fatal(dev, err, "creating interface"); - return; + return err; } kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); + return 0; } static void backend_disconnect(struct backend_info *be) @@ -703,6 +714,7 @@ static void connect(struct backend_info *be) be->vif->queues = vzalloc(requested_num_queues * sizeof(struct xenvif_queue)); be->vif->num_queues = requested_num_queues; + be->vif->stalled_queues = requested_num_queues; for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { queue = &be->vif->queues[queue_index]; @@ -873,15 +885,10 @@ static int read_xenbus_vif_flags(struct backend_info *be) if (!rx_copy) return -EOPNOTSUPP; - if (vif->dev->tx_queue_len != 0) { - if (xenbus_scanf(XBT_NIL, dev->otherend, - "feature-rx-notify", "%d", &val) < 0) - val = 0; - if (val) - vif->can_queue = 1; - else - /* Must be non-zero for pfifo_fast to work. */ - vif->dev->tx_queue_len = 1; + if (xenbus_scanf(XBT_NIL, dev->otherend, + "feature-rx-notify", "%d", &val) < 0 || val == 0) { + xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory"); + return -EINVAL; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index cca871346a0f..88a70f5ed594 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -77,7 +77,9 @@ struct netfront_cb { #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) -#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) + +/* Minimum number of Rx slots (includes slot for GSO metadata). */ +#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) /* Queue name is interface name with "-qNNN" appended */ #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) @@ -137,13 +139,6 @@ struct netfront_queue { struct xen_netif_rx_front_ring rx; int rx_ring_ref; - /* Receive-ring batched refills. */ -#define RX_MIN_TARGET 8 -#define RX_DFL_MIN_TARGET 64 -#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) - unsigned rx_min_target, rx_max_target, rx_target; - struct sk_buff_head rx_batch; - struct timer_list rx_refill_timer; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; @@ -251,7 +246,7 @@ static void rx_refill_timeout(unsigned long data) static int netfront_tx_slot_available(struct netfront_queue *queue) { return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < - (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); + (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); } static void xennet_maybe_wake_tx(struct netfront_queue *queue) @@ -265,77 +260,55 @@ static void xennet_maybe_wake_tx(struct netfront_queue *queue) netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); } -static void xennet_alloc_rx_buffers(struct netfront_queue *queue) + +static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) { - unsigned short id; struct sk_buff *skb; struct page *page; - int i, batch_target, notify; - RING_IDX req_prod = queue->rx.req_prod_pvt; - grant_ref_t ref; - unsigned long pfn; - void *vaddr; - struct xen_netif_rx_request *req; - if (unlikely(!netif_carrier_ok(queue->info->netdev))) - return; + skb = __netdev_alloc_skb(queue->info->netdev, + RX_COPY_THRESHOLD + NET_IP_ALIGN, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; - /* - * Allocate skbuffs greedily, even though we batch updates to the - * receive ring. This creates a less bursty demand on the memory - * allocator, so should reduce the chance of failed allocation requests - * both for ourself and for other kernel subsystems. - */ - batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons); - for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) { - skb = __netdev_alloc_skb(queue->info->netdev, - RX_COPY_THRESHOLD + NET_IP_ALIGN, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) - goto no_skb; - - /* Align ip header to a 16 bytes boundary */ - skb_reserve(skb, NET_IP_ALIGN); - - page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); - if (!page) { - kfree_skb(skb); -no_skb: - /* Could not allocate any skbuffs. Try again later. */ - mod_timer(&queue->rx_refill_timer, - jiffies + (HZ/10)); - - /* Any skbuffs queued for refill? Force them out. */ - if (i != 0) - goto refill; - break; - } - - skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); - __skb_queue_tail(&queue->rx_batch, skb); + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + if (!page) { + kfree_skb(skb); + return NULL; } + skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); + + /* Align ip header to a 16 bytes boundary */ + skb_reserve(skb, NET_IP_ALIGN); + skb->dev = queue->info->netdev; + + return skb; +} + - /* Is the batch large enough to be worthwhile? */ - if (i < (queue->rx_target/2)) { - if (req_prod > queue->rx.sring->req_prod) - goto push; +static void xennet_alloc_rx_buffers(struct netfront_queue *queue) +{ + RING_IDX req_prod = queue->rx.req_prod_pvt; + int notify; + + if (unlikely(!netif_carrier_ok(queue->info->netdev))) return; - } - /* Adjust our fill target if we risked running out of buffers. */ - if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) && - ((queue->rx_target *= 2) > queue->rx_max_target)) - queue->rx_target = queue->rx_max_target; + for (req_prod = queue->rx.req_prod_pvt; + req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; + req_prod++) { + struct sk_buff *skb; + unsigned short id; + grant_ref_t ref; + unsigned long pfn; + struct xen_netif_rx_request *req; - refill: - for (i = 0; ; i++) { - skb = __skb_dequeue(&queue->rx_batch); - if (skb == NULL) + skb = xennet_alloc_one_rx_buffer(queue); + if (!skb) break; - skb->dev = queue->info->netdev; - - id = xennet_rxidx(req_prod + i); + id = xennet_rxidx(req_prod); BUG_ON(queue->rx_skbs[id]); queue->rx_skbs[id] = skb; @@ -345,9 +318,8 @@ no_skb: queue->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); - vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); - req = RING_GET_REQUEST(&queue->rx, req_prod + i); + req = RING_GET_REQUEST(&queue->rx, req_prod); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, pfn_to_mfn(pfn), @@ -357,11 +329,16 @@ no_skb: req->gref = ref; } + queue->rx.req_prod_pvt = req_prod; + + /* Not enough requests? Try again later. */ + if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { + mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); + return; + } + wmb(); /* barrier so backend seens requests */ - /* Above is a suitable barrier to ensure backend will see requests. */ - queue->rx.req_prod_pvt = req_prod + i; - push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); if (notify) notify_remote_via_irq(queue->rx_irq); @@ -1070,13 +1047,6 @@ err: work_done -= handle_incoming_queue(queue, &rxq); - /* If we get a callback with very few responses, reduce fill target. */ - /* NB. Note exponential increase, linear decrease. */ - if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) > - ((3*queue->rx_target) / 4)) && - (--queue->rx_target < queue->rx_min_target)) - queue->rx_target = queue->rx_min_target; - xennet_alloc_rx_buffers(queue); if (work_done < budget) { @@ -1643,11 +1613,6 @@ static int xennet_init_queue(struct netfront_queue *queue) spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); - skb_queue_head_init(&queue->rx_batch); - queue->rx_target = RX_DFL_MIN_TARGET; - queue->rx_min_target = RX_DFL_MIN_TARGET; - queue->rx_max_target = RX_MAX_TARGET; - init_timer(&queue->rx_refill_timer); queue->rx_refill_timer.data = (unsigned long)queue; queue->rx_refill_timer.function = rx_refill_timeout; @@ -1670,7 +1635,7 @@ static int xennet_init_queue(struct netfront_queue *queue) } /* A grant for every tx ring slot */ - if (gnttab_alloc_grant_references(TX_MAX_TARGET, + if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, &queue->gref_tx_head) < 0) { pr_alert("can't alloc tx grant refs\n"); err = -ENOMEM; @@ -1678,7 +1643,7 @@ static int xennet_init_queue(struct netfront_queue *queue) } /* A grant for every rx ring slot */ - if (gnttab_alloc_grant_references(RX_MAX_TARGET, + if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, &queue->gref_rx_head) < 0) { pr_alert("can't alloc rx grant refs\n"); err = -ENOMEM; @@ -2146,83 +2111,18 @@ static const struct ethtool_ops xennet_ethtool_ops = }; #ifdef CONFIG_SYSFS -static ssize_t show_rxbuf_min(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct net_device *netdev = to_net_dev(dev); - struct netfront_info *info = netdev_priv(netdev); - unsigned int num_queues = netdev->real_num_tx_queues; - - if (num_queues) - return sprintf(buf, "%u\n", info->queues[0].rx_min_target); - else - return sprintf(buf, "%u\n", RX_MIN_TARGET); -} - -static ssize_t store_rxbuf_min(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) +static ssize_t show_rxbuf(struct device *dev, + struct device_attribute *attr, char *buf) { - struct net_device *netdev = to_net_dev(dev); - struct netfront_info *np = netdev_priv(netdev); - unsigned int num_queues = netdev->real_num_tx_queues; - char *endp; - unsigned long target; - unsigned int i; - struct netfront_queue *queue; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - target = simple_strtoul(buf, &endp, 0); - if (endp == buf) - return -EBADMSG; - - if (target < RX_MIN_TARGET) - target = RX_MIN_TARGET; - if (target > RX_MAX_TARGET) - target = RX_MAX_TARGET; - - for (i = 0; i < num_queues; ++i) { - queue = &np->queues[i]; - spin_lock_bh(&queue->rx_lock); - if (target > queue->rx_max_target) - queue->rx_max_target = target; - queue->rx_min_target = target; - if (target > queue->rx_target) - queue->rx_target = target; - - xennet_alloc_rx_buffers(queue); - - spin_unlock_bh(&queue->rx_lock); - } - return len; -} - -static ssize_t show_rxbuf_max(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct net_device *netdev = to_net_dev(dev); - struct netfront_info *info = netdev_priv(netdev); - unsigned int num_queues = netdev->real_num_tx_queues; - - if (num_queues) - return sprintf(buf, "%u\n", info->queues[0].rx_max_target); - else - return sprintf(buf, "%u\n", RX_MAX_TARGET); + return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); } -static ssize_t store_rxbuf_max(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) +static ssize_t store_rxbuf(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) { - struct net_device *netdev = to_net_dev(dev); - struct netfront_info *np = netdev_priv(netdev); - unsigned int num_queues = netdev->real_num_tx_queues; char *endp; unsigned long target; - unsigned int i = 0; - struct netfront_queue *queue = NULL; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -2231,44 +2131,15 @@ static ssize_t store_rxbuf_max(struct device *dev, if (endp == buf) return -EBADMSG; - if (target < RX_MIN_TARGET) - target = RX_MIN_TARGET; - if (target > RX_MAX_TARGET) - target = RX_MAX_TARGET; - - for (i = 0; i < num_queues; ++i) { - queue = &np->queues[i]; - spin_lock_bh(&queue->rx_lock); - if (target < queue->rx_min_target) - queue->rx_min_target = target; - queue->rx_max_target = target; - if (target < queue->rx_target) - queue->rx_target = target; - - xennet_alloc_rx_buffers(queue); + /* rxbuf_min and rxbuf_max are no longer configurable. */ - spin_unlock_bh(&queue->rx_lock); - } return len; } -static ssize_t show_rxbuf_cur(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct net_device *netdev = to_net_dev(dev); - struct netfront_info *info = netdev_priv(netdev); - unsigned int num_queues = netdev->real_num_tx_queues; - - if (num_queues) - return sprintf(buf, "%u\n", info->queues[0].rx_target); - else - return sprintf(buf, "0\n"); -} - static struct device_attribute xennet_attrs[] = { - __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), - __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), - __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), + __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), + __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), + __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) |