diff options
Diffstat (limited to 'drivers/infiniband/core')
23 files changed, 217 insertions, 219 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 5c9fac7cf420..d77959089c38 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -121,7 +121,7 @@ struct ib_gid_table { u32 default_gid_indices; }; -static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port) +static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port) { struct ib_event event; @@ -197,7 +197,7 @@ int ib_cache_gid_parse_type_str(const char *buf) } EXPORT_SYMBOL(ib_cache_gid_parse_type_str); -static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port) +static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port) { return device->port_data[port].cache.gid; } @@ -237,10 +237,10 @@ static void put_gid_ndev(struct rcu_head *head) static void free_gid_entry_locked(struct ib_gid_table_entry *entry) { struct ib_device *device = entry->attr.device; - u8 port_num = entry->attr.port_num; + u32 port_num = entry->attr.port_num; struct ib_gid_table *table = rdma_gid_table(device, port_num); - dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__, + dev_dbg(&device->dev, "%s port=%u index=%d gid %pI6\n", __func__, port_num, entry->attr.index, entry->attr.gid.raw); write_lock_irq(&table->rwlock); @@ -282,7 +282,7 @@ static void free_gid_work(struct work_struct *work) struct ib_gid_table_entry *entry = container_of(work, struct ib_gid_table_entry, del_work); struct ib_device *device = entry->attr.device; - u8 port_num = entry->attr.port_num; + u32 port_num = entry->attr.port_num; struct ib_gid_table *table = rdma_gid_table(device, port_num); mutex_lock(&table->lock); @@ -379,7 +379,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry) * @ix: GID entry index to delete * */ -static void del_gid(struct ib_device *ib_dev, u8 port, +static void del_gid(struct ib_device *ib_dev, u32 port, struct ib_gid_table *table, int ix) { struct roce_gid_ndev_storage *ndev_storage; @@ -387,7 +387,7 @@ static void del_gid(struct ib_device *ib_dev, u8 port, lockdep_assert_held(&table->lock); - dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port, + dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port, ix, table->data_vec[ix]->attr.gid.raw); write_lock_irq(&table->rwlock); @@ -543,7 +543,7 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid) addrconf_ifid_eui48(&gid->raw[8], dev); } -static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port, +static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr, unsigned long mask, bool default_gid) { @@ -587,7 +587,7 @@ out_unlock: return ret; } -int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr) { unsigned long mask = GID_ATTR_FIND_MASK_GID | @@ -598,7 +598,7 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, } static int -_ib_cache_gid_del(struct ib_device *ib_dev, u8 port, +_ib_cache_gid_del(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr, unsigned long mask, bool default_gid) { @@ -627,7 +627,7 @@ out_unlock: return ret; } -int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr) { unsigned long mask = GID_ATTR_FIND_MASK_GID | @@ -638,7 +638,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false); } -int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { struct ib_gid_table *table; @@ -683,7 +683,7 @@ const struct ib_gid_attr * rdma_find_gid_by_port(struct ib_device *ib_dev, const union ib_gid *gid, enum ib_gid_type gid_type, - u8 port, struct net_device *ndev) + u32 port, struct net_device *ndev) { int local_index; struct ib_gid_table *table; @@ -734,7 +734,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port); * */ const struct ib_gid_attr *rdma_find_gid_by_filter( - struct ib_device *ib_dev, const union ib_gid *gid, u8 port, + struct ib_device *ib_dev, const union ib_gid *gid, u32 port, bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *, void *), void *context) @@ -818,7 +818,7 @@ static void release_gid_table(struct ib_device *device, kfree(table); } -static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port, +static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port, struct ib_gid_table *table) { int i; @@ -834,7 +834,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port, mutex_unlock(&table->lock); } -void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, +void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, struct net_device *ndev, unsigned long gid_type_mask, enum ib_cache_gid_default_mode mode) @@ -867,7 +867,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, } } -static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port, +static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port, struct ib_gid_table *table) { unsigned int i; @@ -884,7 +884,7 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port, static void gid_table_release_one(struct ib_device *ib_dev) { - unsigned int p; + u32 p; rdma_for_each_port (ib_dev, p) { release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid); @@ -895,7 +895,7 @@ static void gid_table_release_one(struct ib_device *ib_dev) static int _gid_table_setup_one(struct ib_device *ib_dev) { struct ib_gid_table *table; - unsigned int rdma_port; + u32 rdma_port; rdma_for_each_port (ib_dev, rdma_port) { table = alloc_gid_table( @@ -915,7 +915,7 @@ rollback_table_setup: static void gid_table_cleanup_one(struct ib_device *ib_dev) { - unsigned int p; + u32 p; rdma_for_each_port (ib_dev, p) cleanup_gid_table_port(ib_dev, p, @@ -950,7 +950,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev) * Returns 0 on success or appropriate error code. * */ -int rdma_query_gid(struct ib_device *device, u8 port_num, +int rdma_query_gid(struct ib_device *device, u32 port_num, int index, union ib_gid *gid) { struct ib_gid_table *table; @@ -1014,7 +1014,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, unsigned long mask = GID_ATTR_FIND_MASK_GID | GID_ATTR_FIND_MASK_GID_TYPE; struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type}; - unsigned int p; + u32 p; if (ndev) mask |= GID_ATTR_FIND_MASK_NETDEV; @@ -1043,7 +1043,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, EXPORT_SYMBOL(rdma_find_gid); int ib_get_cached_pkey(struct ib_device *device, - u8 port_num, + u32 port_num, int index, u16 *pkey) { @@ -1069,9 +1069,8 @@ int ib_get_cached_pkey(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_pkey); -int ib_get_cached_subnet_prefix(struct ib_device *device, - u8 port_num, - u64 *sn_pfx) +int ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num, + u64 *sn_pfx) { unsigned long flags; @@ -1086,10 +1085,8 @@ int ib_get_cached_subnet_prefix(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_subnet_prefix); -int ib_find_cached_pkey(struct ib_device *device, - u8 port_num, - u16 pkey, - u16 *index) +int ib_find_cached_pkey(struct ib_device *device, u32 port_num, + u16 pkey, u16 *index) { struct ib_pkey_cache *cache; unsigned long flags; @@ -1132,10 +1129,8 @@ err: } EXPORT_SYMBOL(ib_find_cached_pkey); -int ib_find_exact_cached_pkey(struct ib_device *device, - u8 port_num, - u16 pkey, - u16 *index) +int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num, + u16 pkey, u16 *index) { struct ib_pkey_cache *cache; unsigned long flags; @@ -1169,9 +1164,7 @@ err: } EXPORT_SYMBOL(ib_find_exact_cached_pkey); -int ib_get_cached_lmc(struct ib_device *device, - u8 port_num, - u8 *lmc) +int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc) { unsigned long flags; int ret = 0; @@ -1187,8 +1180,7 @@ int ib_get_cached_lmc(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_lmc); -int ib_get_cached_port_state(struct ib_device *device, - u8 port_num, +int ib_get_cached_port_state(struct ib_device *device, u32 port_num, enum ib_port_state *port_state) { unsigned long flags; @@ -1222,7 +1214,7 @@ EXPORT_SYMBOL(ib_get_cached_port_state); * code. */ const struct ib_gid_attr * -rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index) +rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index) { const struct ib_gid_attr *attr = ERR_PTR(-ENODATA); struct ib_gid_table *table; @@ -1263,7 +1255,7 @@ ssize_t rdma_query_gid_table(struct ib_device *device, const struct ib_gid_attr *gid_attr; ssize_t num_entries = 0, ret; struct ib_gid_table *table; - unsigned int port_num, i; + u32 port_num, i; struct net_device *ndev; unsigned long flags; @@ -1361,7 +1353,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr) container_of(attr, struct ib_gid_table_entry, attr); struct ib_device *device = entry->attr.device; struct net_device *ndev = ERR_PTR(-EINVAL); - u8 port_num = entry->attr.port_num; + u32 port_num = entry->attr.port_num; struct ib_gid_table *table; unsigned long flags; bool valid; @@ -1441,7 +1433,7 @@ int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr, EXPORT_SYMBOL(rdma_read_gid_l2_fields); static int config_non_roce_gid_cache(struct ib_device *device, - u8 port, int gid_tbl_len) + u32 port, int gid_tbl_len) { struct ib_gid_attr gid_attr = {}; struct ib_gid_table *table; @@ -1472,7 +1464,7 @@ err: } static int -ib_cache_update(struct ib_device *device, u8 port, bool enforce_security) +ib_cache_update(struct ib_device *device, u32 port, bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; @@ -1621,7 +1613,7 @@ EXPORT_SYMBOL(ib_dispatch_event); int ib_cache_setup_one(struct ib_device *device) { - unsigned int p; + u32 p; int err; rwlock_init(&device->cache_lock); @@ -1641,7 +1633,7 @@ int ib_cache_setup_one(struct ib_device *device) void ib_cache_release_one(struct ib_device *device) { - unsigned int p; + u32 p; /* * The release function frees all the cache elements. diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 3d194bb60840..74ca5e67a113 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -202,7 +202,7 @@ static struct attribute *cm_counter_default_attrs[] = { struct cm_port { struct cm_device *cm_dev; struct ib_mad_agent *mad_agent; - u8 port_num; + u32 port_num; struct list_head cm_priv_prim_list; struct list_head cm_priv_altr_list; struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; @@ -1631,7 +1631,7 @@ static bool cm_req_has_alt_path(struct cm_req_msg *req_msg) req_msg)))); } -static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num, +static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num, struct sa_path_rec *path, union ib_gid *gid) { if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num)) @@ -1750,7 +1750,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, static u16 cm_get_bth_pkey(struct cm_work *work) { struct ib_device *ib_dev = work->port->cm_dev->ib_device; - u8 port_num = work->port->port_num; + u32 port_num = work->port->port_num; u16 pkey_index = work->mad_recv_wc->wc->pkey_index; u16 pkey; int ret; @@ -1778,7 +1778,7 @@ static void cm_opa_to_ib_sgid(struct cm_work *work, struct sa_path_rec *path) { struct ib_device *dev = work->port->cm_dev->ib_device; - u8 port_num = work->port->port_num; + u32 port_num = work->port->port_num; if (rdma_cap_opa_ah(dev, port_num) && (ib_is_opa_gid(&path->sgid))) { @@ -4334,7 +4334,7 @@ static int cm_add_one(struct ib_device *ib_device) unsigned long flags; int ret; int count = 0; - unsigned int i; + u32 i; cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt), GFP_KERNEL); @@ -4432,7 +4432,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) .clr_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; - unsigned int i; + u32 i; write_lock_irqsave(&cm.device_lock, flags); list_del(&cm_dev->list); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 532f2c09a53d..5ad22b261b2b 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -278,7 +278,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, } int cma_get_default_gid_type(struct cma_device *cma_dev, - unsigned int port) + u32 port) { if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; @@ -287,7 +287,7 @@ int cma_get_default_gid_type(struct cma_device *cma_dev, } int cma_set_default_gid_type(struct cma_device *cma_dev, - unsigned int port, + u32 port, enum ib_gid_type default_gid_type) { unsigned long supported_gids; @@ -310,7 +310,7 @@ int cma_set_default_gid_type(struct cma_device *cma_dev, return 0; } -int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port) +int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) { if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; @@ -318,7 +318,7 @@ int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port) return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; } -int cma_set_default_roce_tos(struct cma_device *cma_dev, unsigned int port, +int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, u8 default_roce_tos) { if (!rdma_is_port_valid(cma_dev->device, port)) @@ -553,7 +553,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a } static const struct ib_gid_attr * -cma_validate_port(struct ib_device *device, u8 port, +cma_validate_port(struct ib_device *device, u32 port, enum ib_gid_type gid_type, union ib_gid *gid, struct rdma_id_private *id_priv) @@ -611,7 +611,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) struct cma_device *cma_dev; enum ib_gid_type gid_type; int ret = -ENODEV; - unsigned int port; + u32 port; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) @@ -702,8 +702,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev; enum ib_gid_type gid_type; int ret = -ENODEV; - unsigned int port; union ib_gid gid; + u32 port; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) @@ -1572,7 +1572,7 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv, static bool cma_protocol_roce(const struct rdma_cm_id *id) { struct ib_device *device = id->device; - const int port_num = id->port_num ?: rdma_start_port(device); + const u32 port_num = id->port_num ?: rdma_start_port(device); return rdma_protocol_roce(device, port_num); } @@ -4861,9 +4861,9 @@ static int cma_add_one(struct ib_device *device) struct rdma_id_private *to_destroy; struct cma_device *cma_dev; struct rdma_id_private *id_priv; - unsigned int i; unsigned long supported_gids = 0; int ret; + u32 i; cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); if (!cma_dev) diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index e0d5e3bae458..9ac16e0db761 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -43,7 +43,7 @@ struct cma_device; struct cma_dev_group; struct cma_dev_port_group { - unsigned int port_num; + u32 port_num; struct cma_dev_group *cma_dev_group; struct config_group group; }; @@ -200,10 +200,10 @@ static const struct config_item_type cma_port_group_type = { static int make_cma_ports(struct cma_dev_group *cma_dev_group, struct cma_device *cma_dev) { - struct ib_device *ibdev; - unsigned int i; - unsigned int ports_num; struct cma_dev_port_group *ports; + struct ib_device *ibdev; + u32 ports_num; + u32 i; ibdev = cma_get_ib_dev(cma_dev); diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h index caece96ebcf5..8af6af4bbf6e 100644 --- a/drivers/infiniband/core/cma_priv.h +++ b/drivers/infiniband/core/cma_priv.h @@ -117,11 +117,11 @@ void cma_dev_put(struct cma_device *dev); typedef bool (*cma_device_filter)(struct ib_device *, void *); struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, void *cookie); -int cma_get_default_gid_type(struct cma_device *dev, unsigned int port); -int cma_set_default_gid_type(struct cma_device *dev, unsigned int port, +int cma_get_default_gid_type(struct cma_device *dev, u32 port); +int cma_set_default_gid_type(struct cma_device *dev, u32 port, enum ib_gid_type default_gid_type); -int cma_get_default_roce_tos(struct cma_device *dev, unsigned int port); -int cma_set_default_roce_tos(struct cma_device *dev, unsigned int port, +int cma_get_default_roce_tos(struct cma_device *dev, u32 port); +int cma_set_default_roce_tos(struct cma_device *dev, u32 port, u8 default_roce_tos); struct ib_device *cma_get_ib_dev(struct cma_device *dev); diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 315f7a297eee..29809dd30041 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -83,14 +83,14 @@ void ib_device_unregister_sysfs(struct ib_device *device); int ib_device_rename(struct ib_device *ibdev, const char *name); int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim); -typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, +typedef void (*roce_netdev_callback)(struct ib_device *device, u32 port, struct net_device *idev, void *cookie); -typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port, +typedef bool (*roce_netdev_filter)(struct ib_device *device, u32 port, struct net_device *idev, void *cookie); struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, - unsigned int port); + u32 port); void ib_enum_roce_netdev(struct ib_device *ib_dev, roce_netdev_filter filter, @@ -113,7 +113,7 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, struct ib_client_nl_info { struct sk_buff *nl_msg; struct device *cdev; - unsigned int port; + u32 port; u64 abi; }; int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name, @@ -128,24 +128,24 @@ int ib_cache_gid_parse_type_str(const char *buf); const char *ib_cache_gid_type_str(enum ib_gid_type gid_type); -void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, +void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, struct net_device *ndev, unsigned long gid_type_mask, enum ib_cache_gid_default_mode mode); -int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr); -int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr); -int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, struct net_device *ndev); int roce_gid_mgmt_init(void); void roce_gid_mgmt_cleanup(void); -unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port); +unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port); int ib_cache_setup_one(struct ib_device *device); void ib_cache_cleanup_one(struct ib_device *device); @@ -215,14 +215,14 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb, struct netlink_ext_ack *extack); int ib_get_cached_subnet_prefix(struct ib_device *device, - u8 port_num, - u64 *sn_pfx); + u32 port_num, + u64 *sn_pfx); #ifdef CONFIG_SECURITY_INFINIBAND void ib_security_release_port_pkey_list(struct ib_device *device); void ib_security_cache_change(struct ib_device *device, - u8 port_num, + u32 port_num, u64 subnet_prefix); int ib_security_modify_qp(struct ib_qp *qp, @@ -247,7 +247,7 @@ static inline void ib_security_release_port_pkey_list(struct ib_device *device) } static inline void ib_security_cache_change(struct ib_device *device, - u8 port_num, + u32 port_num, u64 subnet_prefix) { } @@ -381,7 +381,7 @@ int ib_setup_port_attrs(struct ib_core_device *coredev); int rdma_compatdev_set(u8 enable); -int ib_port_register_module_stat(struct ib_device *device, u8 port_num, +int ib_port_register_module_stat(struct ib_device *device, u32 port_num, struct kobject *kobj, struct kobj_type *ktype, const char *name); void ib_port_unregister_module_stat(struct kobject *kobj); diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index f3a7c1f404af..a284a358de51 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -34,7 +34,7 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter, * * Return 0 on success. */ -int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port, +int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mask mask, struct netlink_ext_ack *extack) { @@ -100,7 +100,7 @@ static int __rdma_counter_bind_qp(struct rdma_counter *counter, return ret; } -static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u8 port, +static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port, struct ib_qp *qp, enum rdma_nl_counter_mode mode) { @@ -238,7 +238,7 @@ static void counter_history_stat_update(struct rdma_counter *counter) * Return: The counter (with ref-count increased) if found */ static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp, - u8 port) + u32 port) { struct rdma_port_counter *port_counter; struct rdma_counter *counter = NULL; @@ -282,7 +282,7 @@ static void counter_release(struct kref *kref) * rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on * the auto-mode rule */ -int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port) +int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port) { struct rdma_port_counter *port_counter; struct ib_device *dev = qp->device; @@ -352,7 +352,7 @@ int rdma_counter_query_stats(struct rdma_counter *counter) } static u64 get_running_counters_hwstat_sum(struct ib_device *dev, - u8 port, u32 index) + u32 port, u32 index) { struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; @@ -388,7 +388,7 @@ next: * rdma_counter_get_hwstat_value() - Get the sum value of all counters on a * specific port, including the running ones and history data */ -u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index) +u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index) { struct rdma_port_counter *port_counter; u64 sum; @@ -443,7 +443,7 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev, /* * rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id */ -int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, +int rdma_counter_bind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id) { struct rdma_port_counter *port_counter; @@ -493,7 +493,7 @@ err: * rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it * The id of new counter is returned in @counter_id */ -int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, +int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port, u32 qp_num, u32 *counter_id) { struct rdma_port_counter *port_counter; @@ -540,7 +540,7 @@ err: /* * rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter */ -int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port, +int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id) { struct rdma_port_counter *port_counter; @@ -573,7 +573,7 @@ out: return ret; } -int rdma_counter_get_mode(struct ib_device *dev, u8 port, +int rdma_counter_get_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mode *mode, enum rdma_nl_counter_mask *mask) { diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 08f4844abbe3..c660cef66ac6 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -779,7 +779,7 @@ static void remove_client_context(struct ib_device *device, static int alloc_port_data(struct ib_device *device) { struct ib_port_data_rcu *pdata_rcu; - unsigned int port; + u32 port; if (device->port_data) return 0; @@ -788,6 +788,10 @@ static int alloc_port_data(struct ib_device *device) if (WARN_ON(!device->phys_port_cnt)) return -EINVAL; + /* Reserve U32_MAX so the logic to go over all the ports is sane */ + if (WARN_ON(device->phys_port_cnt == U32_MAX)) + return -EINVAL; + /* * device->port_data is indexed directly by the port number to make * access to this data as efficient as possible. @@ -819,7 +823,7 @@ static int alloc_port_data(struct ib_device *device) return 0; } -static int verify_immutable(const struct ib_device *dev, u8 port) +static int verify_immutable(const struct ib_device *dev, u32 port) { return WARN_ON(!rdma_cap_ib_mad(dev, port) && rdma_max_mad_size(dev, port) != 0); @@ -827,7 +831,7 @@ static int verify_immutable(const struct ib_device *dev, u8 port) static int setup_port_data(struct ib_device *device) { - unsigned int port; + u32 port; int ret; ret = alloc_port_data(device); @@ -2005,7 +2009,7 @@ void ib_dispatch_event_clients(struct ib_event *event) } static int iw_query_port(struct ib_device *device, - u8 port_num, + u32 port_num, struct ib_port_attr *port_attr) { struct in_device *inetdev; @@ -2044,7 +2048,7 @@ static int iw_query_port(struct ib_device *device, } static int __ib_query_port(struct ib_device *device, - u8 port_num, + u32 port_num, struct ib_port_attr *port_attr) { union ib_gid gid = {}; @@ -2078,7 +2082,7 @@ static int __ib_query_port(struct ib_device *device, * @port_attr pointer. */ int ib_query_port(struct ib_device *device, - u8 port_num, + u32 port_num, struct ib_port_attr *port_attr) { if (!rdma_is_port_valid(device, port_num)) @@ -2130,7 +2134,7 @@ static void add_ndev_hash(struct ib_port_data *pdata) * NETDEV_UNREGISTER event. */ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, - unsigned int port) + u32 port) { struct net_device *old_ndev; struct ib_port_data *pdata; @@ -2173,7 +2177,7 @@ EXPORT_SYMBOL(ib_device_set_netdev); static void free_netdevs(struct ib_device *ib_dev) { unsigned long flags; - unsigned int port; + u32 port; if (!ib_dev->port_data) return; @@ -2204,7 +2208,7 @@ static void free_netdevs(struct ib_device *ib_dev) } struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, - unsigned int port) + u32 port) { struct ib_port_data *pdata; struct net_device *res; @@ -2291,7 +2295,7 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev, roce_netdev_callback cb, void *cookie) { - unsigned int port; + u32 port; rdma_for_each_port (ib_dev, port) if (rdma_protocol_roce(ib_dev, port)) { @@ -2369,7 +2373,7 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, * ib_query_pkey() fetches the specified P_Key table entry. */ int ib_query_pkey(struct ib_device *device, - u8 port_num, u16 index, u16 *pkey) + u32 port_num, u16 index, u16 *pkey) { if (!rdma_is_port_valid(device, port_num)) return -EINVAL; @@ -2414,7 +2418,7 @@ EXPORT_SYMBOL(ib_modify_device); * @port_modify_mask and @port_modify structure. */ int ib_modify_port(struct ib_device *device, - u8 port_num, int port_modify_mask, + u32 port_num, int port_modify_mask, struct ib_port_modify *port_modify) { int rc; @@ -2446,10 +2450,10 @@ EXPORT_SYMBOL(ib_modify_port); * parameter may be NULL. */ int ib_find_gid(struct ib_device *device, union ib_gid *gid, - u8 *port_num, u16 *index) + u32 *port_num, u16 *index) { union ib_gid tmp_gid; - unsigned int port; + u32 port; int ret, i; rdma_for_each_port (device, port) { @@ -2483,7 +2487,7 @@ EXPORT_SYMBOL(ib_find_gid); * @index: The index into the PKey table where the PKey was found. */ int ib_find_pkey(struct ib_device *device, - u8 port_num, u16 pkey, u16 *index) + u32 port_num, u16 pkey, u16 *index) { int ret, i; u16 tmp_pkey; @@ -2526,7 +2530,7 @@ EXPORT_SYMBOL(ib_find_pkey); * */ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, - u8 port, + u32 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 9355e521d9f4..ce0397fd4b7d 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -61,7 +61,7 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, { u16 pkey; struct ib_device *dev = qp_info->port_priv->device; - u8 pnum = qp_info->port_priv->port_num; + u32 pnum = qp_info->port_priv->port_num; struct ib_ud_wr *wr = &mad_send_wr->send_wr; struct rdma_ah_attr attr = {}; @@ -118,7 +118,7 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); * Assumes ib_mad_port_list_lock is being held */ static inline struct ib_mad_port_private * -__ib_get_mad_port(struct ib_device *device, int port_num) +__ib_get_mad_port(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *entry; @@ -134,7 +134,7 @@ __ib_get_mad_port(struct ib_device *device, int port_num) * for a device/port */ static inline struct ib_mad_port_private * -ib_get_mad_port(struct ib_device *device, int port_num) +ib_get_mad_port(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *entry; unsigned long flags; @@ -222,7 +222,7 @@ EXPORT_SYMBOL(ib_response_mad); * Context: Process context. */ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, - u8 port_num, + u32 port_num, enum ib_qp_type qp_type, struct ib_mad_reg_req *mad_reg_req, u8 rmpp_version, @@ -549,7 +549,7 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list) } static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, - u16 pkey_index, u8 port_num, struct ib_wc *wc) + u16 pkey_index, u32 port_num, struct ib_wc *wc) { memset(wc, 0, sizeof *wc); wc->wr_cqe = cqe; @@ -608,7 +608,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_port_private *port_priv; struct ib_mad_agent_private *recv_mad_agent = NULL; struct ib_device *device = mad_agent_priv->agent.device; - u8 port_num; + u32 port_num; struct ib_wc mad_wc; struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); @@ -1613,7 +1613,7 @@ out: if (mad_agent && !mad_agent->agent.recv_handler) { dev_notice(&port_priv->device->dev, - "No receive handler for client %p on port %d\n", + "No receive handler for client %p on port %u\n", &mad_agent->agent, port_priv->port_num); deref_mad_agent(mad_agent); mad_agent = NULL; @@ -1685,7 +1685,7 @@ static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_ u8 send_resp, rcv_resp; union ib_gid sgid; struct ib_device *device = mad_agent_priv->agent.device; - u8 port_num = mad_agent_priv->agent.port_num; + u32 port_num = mad_agent_priv->agent.port_num; u8 lmc; bool has_grh; @@ -1867,7 +1867,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, const struct ib_mad_qp_info *qp_info, const struct ib_wc *wc, - int port_num, + u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response) { @@ -1954,7 +1954,7 @@ static enum smi_action handle_opa_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, - int port_num, + u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response) { @@ -2010,7 +2010,7 @@ static enum smi_action handle_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, - int port_num, + u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response, bool opa) @@ -2034,7 +2034,7 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *recv, *response = NULL; struct ib_mad_agent_private *mad_agent; - int port_num; + u32 port_num; int ret = IB_MAD_RESULT_SUCCESS; size_t mad_size; u16 resp_mad_pkey_index = 0; @@ -2947,7 +2947,7 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) * Create the QP, PD, MR, and CQ if needed */ static int ib_mad_port_open(struct ib_device *device, - int port_num) + u32 port_num) { int ret, cq_size; struct ib_mad_port_private *port_priv; @@ -3002,7 +3002,7 @@ static int ib_mad_port_open(struct ib_device *device, if (ret) goto error7; - snprintf(name, sizeof name, "ib_mad%d", port_num); + snprintf(name, sizeof(name), "ib_mad%u", port_num); port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); if (!port_priv->wq) { ret = -ENOMEM; @@ -3048,7 +3048,7 @@ error3: * If there are no classes using the port, free the port * resources (CQ, MR, PD, QP) and remove the port's info structure */ -static int ib_mad_port_close(struct ib_device *device, int port_num) +static int ib_mad_port_close(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *port_priv; unsigned long flags; @@ -3057,7 +3057,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) port_priv = __ib_get_mad_port(device, port_num); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); - dev_err(&device->dev, "Port %d not found\n", port_num); + dev_err(&device->dev, "Port %u not found\n", port_num); return -ENODEV; } list_del_init(&port_priv->port_list); diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 57519ca6cd2c..a5dd4b7a74bc 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -63,7 +63,7 @@ struct mcast_port { struct rb_root table; atomic_t refcount; struct completion comp; - u8 port_num; + u32 port_num; }; struct mcast_device { @@ -605,7 +605,7 @@ found: */ struct ib_sa_multicast * ib_sa_join_multicast(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, gfp_t gfp_mask, int (*callback)(int status, @@ -690,7 +690,7 @@ void ib_sa_free_multicast(struct ib_sa_multicast *multicast) } EXPORT_SYMBOL(ib_sa_free_multicast); -int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num, +int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num, union ib_gid *mgid, struct ib_sa_mcmember_rec *rec) { struct mcast_device *dev; @@ -732,7 +732,7 @@ EXPORT_SYMBOL(ib_sa_get_mcmember_rec); * success or appropriate error code. * */ -int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, +int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num, struct ib_sa_mcmember_rec *rec, struct net_device *ndev, enum ib_gid_type gid_type, diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index d306049c22a2..b8dc002a2478 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -242,7 +242,7 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) { char fw[IB_FW_VERSION_NAME_MAX]; int ret = 0; - u8 port; + u32 port; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h index af4879bdf3d6..64e2822af70f 100644 --- a/drivers/infiniband/core/opa_smi.h +++ b/drivers/infiniband/core/opa_smi.h @@ -40,11 +40,11 @@ #include "smi.h" enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, - int port_num, int phys_port_cnt); + u32 port_num, int phys_port_cnt); int opa_smi_get_fwd_port(struct opa_smp *smp); extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp); extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, - bool is_switch, int port_num); + bool is_switch, u32 port_num); /* * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index 34fff94eaa38..7b638d91a4ec 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -70,7 +70,7 @@ struct netdev_event_work { }; static const struct { - bool (*is_supported)(const struct ib_device *device, u8 port_num); + bool (*is_supported)(const struct ib_device *device, u32 port_num); enum ib_gid_type gid_type; } PORT_CAP_TO_GID_TYPE[] = { {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE}, @@ -79,7 +79,7 @@ static const struct { #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE) -unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port) +unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port) { int i; unsigned int ret_flags = 0; @@ -96,7 +96,7 @@ unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port) EXPORT_SYMBOL(roce_gid_type_mask_support); static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev, - u8 port, union ib_gid *gid, + u32 port, union ib_gid *gid, struct ib_gid_attr *gid_attr) { int i; @@ -144,7 +144,7 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \ BONDING_SLAVE_STATE_NA) static bool -is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port, +is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *real_dev; @@ -168,7 +168,7 @@ is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port, } static bool -is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port, +is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *master_dev; @@ -197,7 +197,7 @@ is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port, * considered for deriving default RoCE GID, returns false otherwise. */ static bool -is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port, +is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *cookie_ndev = cookie; @@ -223,13 +223,13 @@ is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port, return res; } -static bool pass_all_filter(struct ib_device *ib_dev, u8 port, +static bool pass_all_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { return true; } -static bool upper_device_filter(struct ib_device *ib_dev, u8 port, +static bool upper_device_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { bool res; @@ -260,7 +260,7 @@ static bool upper_device_filter(struct ib_device *ib_dev, u8 port, * not have been established as slave device yet. */ static bool -is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port, +is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { @@ -280,7 +280,7 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port, static void update_gid_ip(enum gid_op_type gid_op, struct ib_device *ib_dev, - u8 port, struct net_device *ndev, + u32 port, struct net_device *ndev, struct sockaddr *addr) { union ib_gid gid; @@ -294,7 +294,7 @@ static void update_gid_ip(enum gid_op_type gid_op, } static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, - u8 port, + u32 port, struct net_device *rdma_ndev, struct net_device *event_ndev) { @@ -328,7 +328,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, } static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, - u8 port, struct net_device *ndev) + u32 port, struct net_device *ndev) { const struct in_ifaddr *ifa; struct in_device *in_dev; @@ -372,7 +372,7 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, } static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, - u8 port, struct net_device *ndev) + u32 port, struct net_device *ndev) { struct inet6_ifaddr *ifp; struct inet6_dev *in6_dev; @@ -417,7 +417,7 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, } } -static void _add_netdev_ips(struct ib_device *ib_dev, u8 port, +static void _add_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { enum_netdev_ipv4_ips(ib_dev, port, ndev); @@ -425,13 +425,13 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port, enum_netdev_ipv6_ips(ib_dev, port, ndev); } -static void add_netdev_ips(struct ib_device *ib_dev, u8 port, +static void add_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { _add_netdev_ips(ib_dev, port, cookie); } -static void del_netdev_ips(struct ib_device *ib_dev, u8 port, +static void del_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie); @@ -446,7 +446,7 @@ static void del_netdev_ips(struct ib_device *ib_dev, u8 port, * * del_default_gids() deletes the default GIDs of the event/cookie netdevice. */ -static void del_default_gids(struct ib_device *ib_dev, u8 port, +static void del_default_gids(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *cookie_ndev = cookie; @@ -458,7 +458,7 @@ static void del_default_gids(struct ib_device *ib_dev, u8 port, IB_CACHE_GID_DEFAULT_MODE_DELETE); } -static void add_default_gids(struct ib_device *ib_dev, u8 port, +static void add_default_gids(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *event_ndev = cookie; @@ -470,7 +470,7 @@ static void add_default_gids(struct ib_device *ib_dev, u8 port, } static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev, - u8 port, + u32 port, struct net_device *rdma_ndev, void *cookie) { @@ -515,7 +515,7 @@ void rdma_roce_rescan_device(struct ib_device *ib_dev) EXPORT_SYMBOL(rdma_roce_rescan_device); static void callback_for_addr_gid_device_scan(struct ib_device *device, - u8 port, + u32 port, struct net_device *rdma_ndev, void *cookie) { @@ -547,10 +547,10 @@ static int netdev_upper_walk(struct net_device *upper, return 0; } -static void handle_netdev_upper(struct ib_device *ib_dev, u8 port, +static void handle_netdev_upper(struct ib_device *ib_dev, u32 port, void *cookie, void (*handle_netdev)(struct ib_device *ib_dev, - u8 port, + u32 port, struct net_device *ndev)) { struct net_device *ndev = cookie; @@ -574,25 +574,25 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port, } } -static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, +static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, struct net_device *event_ndev) { ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev); } -static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port, +static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids); } -static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port, +static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips); } -static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port, +static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 31156e22d3e7..a588c2038479 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -25,7 +25,7 @@ MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations"); * registration is also enabled if registering memory might yield better * performance than using multiple SGE entries, see rdma_rw_io_needs_mr() */ -static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num) +static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num) { if (rdma_protocol_iwarp(dev, port_num)) return true; @@ -42,7 +42,7 @@ static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num) * optimization otherwise. Additionally we have a debug option to force usage * of MRs to help testing this code path. */ -static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num, +static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num, enum dma_data_direction dir, int dma_nents) { if (dir == DMA_FROM_DEVICE) { @@ -87,7 +87,7 @@ static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg) } /* Caller must have zero-initialized *reg. */ -static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, +static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num, struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, u32 sg_cnt, u32 offset) { @@ -121,7 +121,7 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, } static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { struct rdma_rw_reg_ctx *prev = NULL; @@ -308,7 +308,7 @@ static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg, * Returns the number of WQEs that will be needed on the workqueue if * successful, or a negative error code. */ -int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, +int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { @@ -377,7 +377,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_init); * successful, or a negative error code. */ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct scatterlist *sg, u32 sg_cnt, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, struct scatterlist *prot_sg, u32 prot_sg_cnt, struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey, enum dma_data_direction dir) @@ -505,7 +505,7 @@ static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval) * completion notification. */ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) + u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) { struct ib_send_wr *first_wr, *last_wr; int i; @@ -562,7 +562,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_wrs); * is not set @cqe must be set so that the caller gets a completion * notification. */ -int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, +int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) { struct ib_send_wr *first_wr; @@ -581,8 +581,9 @@ EXPORT_SYMBOL(rdma_rw_ctx_post); * @sg_cnt: number of entries in @sg * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ */ -void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, - struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir) +void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, + enum dma_data_direction dir) { int i; @@ -620,7 +621,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_destroy); * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ */ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct scatterlist *sg, u32 sg_cnt, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, struct scatterlist *prot_sg, u32 prot_sg_cnt, enum dma_data_direction dir) { @@ -647,7 +648,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); * compute max_rdma_ctxts and the size of the transport's Send and * Send Completion Queues. */ -unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, +unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, unsigned int maxpages) { unsigned int mr_pages; diff --git a/drivers/infiniband/core/sa.h b/drivers/infiniband/core/sa.h index cbaaaa92fff3..143de37ae598 100644 --- a/drivers/infiniband/core/sa.h +++ b/drivers/infiniband/core/sa.h @@ -49,7 +49,7 @@ static inline void ib_sa_client_put(struct ib_sa_client *client) } int ib_sa_mcmember_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, u8 method, + struct ib_device *device, u32 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 9ef1a355131b..8f1705c403b4 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -95,7 +95,7 @@ struct ib_sa_port { struct delayed_work ib_cpi_work; spinlock_t classport_lock; /* protects class port info set */ spinlock_t ah_lock; - u8 port_num; + u32 port_num; }; struct ib_sa_device { @@ -1194,7 +1194,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query) } EXPORT_SYMBOL(ib_sa_cancel_query); -static u8 get_src_path_mask(struct ib_device *device, u8 port_num) +static u8 get_src_path_mask(struct ib_device *device, u32 port_num) { struct ib_sa_device *sa_dev; struct ib_sa_port *port; @@ -1213,7 +1213,7 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num) return src_path_mask; } -static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num, +static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num, struct sa_path_rec *rec, struct rdma_ah_attr *ah_attr, const struct ib_gid_attr *gid_attr) @@ -1251,7 +1251,7 @@ static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num, * User must invoke rdma_destroy_ah_attr() to release reference to SGID * attributes which are initialized using ib_init_ah_attr_from_path(). */ -int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, +int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num, struct sa_path_rec *rec, struct rdma_ah_attr *ah_attr, const struct ib_gid_attr *gid_attr) @@ -1409,7 +1409,7 @@ EXPORT_SYMBOL(ib_sa_pack_path); static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, struct ib_sa_device *sa_dev, - u8 port_num) + u32 port_num) { struct ib_sa_port *port; unsigned long flags; @@ -1444,7 +1444,7 @@ enum opa_pr_supported { */ static int opa_pr_query_possible(struct ib_sa_client *client, struct ib_sa_device *sa_dev, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, struct sa_path_rec *rec) { struct ib_port_attr port_attr; @@ -1533,7 +1533,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) * the query. */ int ib_sa_path_rec_get(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, struct sa_path_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, @@ -1688,7 +1688,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) * the query. */ int ib_sa_service_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, u8 method, + struct ib_device *device, u32 port_num, u8 method, struct ib_sa_service_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, @@ -1784,7 +1784,7 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) } int ib_sa_mcmember_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, @@ -1876,7 +1876,7 @@ static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) } int ib_sa_guid_info_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, struct ib_sa_guidinfo_rec *rec, ib_sa_comp_mask comp_mask, u8 method, unsigned long timeout_ms, gfp_t gfp_mask, @@ -2265,7 +2265,7 @@ static void ib_sa_event(struct ib_event_handler *handler, unsigned long flags; struct ib_sa_device *sa_dev = container_of(handler, typeof(*sa_dev), event_handler); - u8 port_num = event->element.port_num - sa_dev->start_port; + u32 port_num = event->element.port_num - sa_dev->start_port; struct ib_sa_port *port = &sa_dev->port[port_num]; if (!rdma_cap_ib_sa(handler->device, port->port_num)) diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 75e7ec017836..e5a78d1a63c9 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -193,7 +193,7 @@ static void qp_to_error(struct ib_qp_security *sec) static inline void check_pkey_qps(struct pkey_index_qp_list *pkey, struct ib_device *device, - u8 port_num, + u32 port_num, u64 subnet_prefix) { struct ib_port_pkey *pp, *tmp_pp; @@ -245,7 +245,7 @@ static int port_pkey_list_insert(struct ib_port_pkey *pp) struct pkey_index_qp_list *tmp_pkey; struct pkey_index_qp_list *pkey; struct ib_device *dev; - u8 port_num = pp->port_num; + u32 port_num = pp->port_num; int ret = 0; if (pp->state != IB_PORT_PKEY_VALID) @@ -538,7 +538,7 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec) } void ib_security_cache_change(struct ib_device *device, - u8 port_num, + u32 port_num, u64 subnet_prefix) { struct pkey_index_qp_list *pkey; @@ -649,7 +649,7 @@ int ib_security_modify_qp(struct ib_qp *qp, } static int ib_security_pkey_access(struct ib_device *dev, - u8 port_num, + u32 port_num, u16 pkey_index, void *sec) { diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index f19b23817c2b..45f09b75c893 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -41,7 +41,7 @@ #include "smi.h" #include "opa_smi.h" -static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num, +static enum smi_action __smi_handle_dr_smp_send(bool is_switch, u32 port_num, u8 *hop_ptr, u8 hop_cnt, const u8 *initial_path, const u8 *return_path, @@ -127,7 +127,7 @@ static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num, * Return IB_SMI_DISCARD if the SMP should be discarded */ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, - bool is_switch, int port_num) + bool is_switch, u32 port_num) { return __smi_handle_dr_smp_send(is_switch, port_num, &smp->hop_ptr, smp->hop_cnt, @@ -139,7 +139,7 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, } enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, - bool is_switch, int port_num) + bool is_switch, u32 port_num) { return __smi_handle_dr_smp_send(is_switch, port_num, &smp->hop_ptr, smp->hop_cnt, @@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, OPA_LID_PERMISSIVE); } -static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num, +static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, u32 port_num, int phys_port_cnt, u8 *hop_ptr, u8 hop_cnt, const u8 *initial_path, @@ -238,7 +238,7 @@ static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num, * Return IB_SMI_DISCARD if the SMP should be dropped */ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, - int port_num, int phys_port_cnt) + u32 port_num, int phys_port_cnt) { return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, &smp->hop_ptr, smp->hop_cnt, @@ -254,7 +254,7 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, * Return IB_SMI_DISCARD if the SMP should be dropped */ enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, - int port_num, int phys_port_cnt) + u32 port_num, int phys_port_cnt) { return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, &smp->hop_ptr, smp->hop_cnt, diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h index 91d9b353ab85..e350ed623c45 100644 --- a/drivers/infiniband/core/smi.h +++ b/drivers/infiniband/core/smi.h @@ -52,11 +52,11 @@ enum smi_forward_action { }; enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, - int port_num, int phys_port_cnt); + u32 port_num, int phys_port_cnt); int smi_get_fwd_port(struct ib_smp *smp); extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, - bool is_switch, int port_num); + bool is_switch, u32 port_num); /* * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index b8abb30f80df..91a53b2b4718 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -62,7 +62,7 @@ struct ib_port { const struct attribute_group *pma_table; struct attribute_group *hw_stats_ag; struct rdma_hw_stats *hw_stats; - u8 port_num; + u32 port_num; }; struct port_attribute { @@ -94,7 +94,7 @@ struct hw_stats_attribute { const char *buf, size_t count); int index; - u8 port_num; + u32 port_num; }; static ssize_t port_attr_show(struct kobject *kobj, @@ -812,7 +812,7 @@ static const struct attribute_group *get_counter_table(struct ib_device *dev, } static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats, - u8 port_num, int index) + u32 port_num, int index) { int ret; @@ -938,7 +938,7 @@ static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group) kfree(attr_group); } -static struct attribute *alloc_hsa(int index, u8 port_num, const char *name) +static struct attribute *alloc_hsa(int index, u32 port_num, const char *name) { struct hw_stats_attribute *hsa; @@ -956,7 +956,7 @@ static struct attribute *alloc_hsa(int index, u8 port_num, const char *name) return &hsa->attr; } -static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) +static struct attribute *alloc_hsa_lifespan(char *name, u32 port_num) { struct hw_stats_attribute *hsa; @@ -975,7 +975,7 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) } static void setup_hw_stats(struct ib_device *device, struct ib_port *port, - u8 port_num) + u32 port_num) { struct attribute_group *hsag; struct rdma_hw_stats *stats; @@ -1383,7 +1383,7 @@ void ib_free_port_attrs(struct ib_core_device *coredev) int ib_setup_port_attrs(struct ib_core_device *coredev) { struct ib_device *device = rdma_device_to_ibdev(&coredev->dev); - unsigned int port; + u32 port; int ret; coredev->ports_kobj = kobject_create_and_add("ports", @@ -1437,7 +1437,7 @@ void ib_device_unregister_sysfs(struct ib_device *device) * @ktype: pointer to the ktype for this kobject. * @name: the name of the kobject */ -int ib_port_register_module_stat(struct ib_device *device, u8 port_num, +int ib_port_register_module_stat(struct ib_device *device, u32 port_num, struct kobject *kobj, struct kobj_type *ktype, const char *name) { diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index dd7f3b437c6b..a183b2747c3e 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -101,7 +101,7 @@ struct ib_umad_port { struct ib_device *ib_dev; struct ib_umad_device *umad_dev; int dev_num; - u8 port_num; + u32 port_num; }; struct ib_umad_device { @@ -1145,7 +1145,7 @@ static const struct file_operations umad_sm_fops = { static struct ib_umad_port *get_port(struct ib_device *ibdev, struct ib_umad_device *umad_dev, - unsigned int port) + u32 port) { if (!umad_dev) return ERR_PTR(-EOPNOTSUPP); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index f5b8be3bedde..9e070ffa3520 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -364,7 +364,7 @@ static void copy_query_dev_fields(struct ib_ucontext *ucontext, resp->max_srq_sge = attr->max_srq_sge; resp->max_pkeys = attr->max_pkeys; resp->local_ca_ack_delay = attr->local_ca_ack_delay; - resp->phys_port_cnt = ib_dev->phys_port_cnt; + resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX); } static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 28464c58738c..c576e2bc39c6 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -227,7 +227,8 @@ rdma_node_get_transport(unsigned int node_type) } EXPORT_SYMBOL(rdma_node_get_transport); -enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) +enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, + u32 port_num) { enum rdma_transport_type lt; if (device->ops.get_link_layer) @@ -658,7 +659,7 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr) EXPORT_SYMBOL(ib_get_rdma_header_version); static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, - u8 port_num, + u32 port_num, const struct ib_grh *grh) { int grh_version; @@ -701,7 +702,7 @@ static bool find_gid_index(const union ib_gid *gid, } static const struct ib_gid_attr * -get_sgid_attr_from_eth(struct ib_device *device, u8 port_num, +get_sgid_attr_from_eth(struct ib_device *device, u32 port_num, u16 vlan_id, const union ib_gid *sgid, enum ib_gid_type gid_type) { @@ -788,7 +789,7 @@ static int ib_resolve_unicast_gid_dmac(struct ib_device *device, * On success the caller is responsible to call rdma_destroy_ah_attr on the * attr. */ -int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, +int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num, const struct ib_wc *wc, const struct ib_grh *grh, struct rdma_ah_attr *ah_attr) { @@ -919,7 +920,7 @@ void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr) EXPORT_SYMBOL(rdma_destroy_ah_attr); struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, - const struct ib_grh *grh, u8 port_num) + const struct ib_grh *grh, u32 port_num) { struct rdma_ah_attr ah_attr; struct ib_ah *ah; @@ -1673,7 +1674,7 @@ static bool is_qp_type_connected(const struct ib_qp *qp) static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { - u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; + u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; const struct ib_gid_attr *old_sgid_attr_av; const struct ib_gid_attr *old_sgid_attr_alt_av; int ret; @@ -1801,7 +1802,7 @@ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr, } EXPORT_SYMBOL(ib_modify_qp_with_udata); -int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width) +int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width) { int rc; u32 netdev_speed; @@ -2467,7 +2468,7 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, } EXPORT_SYMBOL(ib_check_mr_status); -int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, +int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port, int state) { if (!device->ops.set_vf_link_state) @@ -2477,7 +2478,7 @@ int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, } EXPORT_SYMBOL(ib_set_vf_link_state); -int ib_get_vf_config(struct ib_device *device, int vf, u8 port, +int ib_get_vf_config(struct ib_device *device, int vf, u32 port, struct ifla_vf_info *info) { if (!device->ops.get_vf_config) @@ -2487,7 +2488,7 @@ int ib_get_vf_config(struct ib_device *device, int vf, u8 port, } EXPORT_SYMBOL(ib_get_vf_config); -int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, +int ib_get_vf_stats(struct ib_device *device, int vf, u32 port, struct ifla_vf_stats *stats) { if (!device->ops.get_vf_stats) @@ -2497,7 +2498,7 @@ int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, } EXPORT_SYMBOL(ib_get_vf_stats); -int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, +int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid, int type) { if (!device->ops.set_vf_guid) @@ -2507,7 +2508,7 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, } EXPORT_SYMBOL(ib_set_vf_guid); -int ib_get_vf_guid(struct ib_device *device, int vf, u8 port, +int ib_get_vf_guid(struct ib_device *device, int vf, u32 port, struct ifla_vf_guid *node_guid, struct ifla_vf_guid *port_guid) { @@ -2849,7 +2850,7 @@ void ib_drain_qp(struct ib_qp *qp) } EXPORT_SYMBOL(ib_drain_qp); -struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, +struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *)) @@ -2875,7 +2876,7 @@ struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, } EXPORT_SYMBOL(rdma_alloc_netdev); -int rdma_init_netdev(struct ib_device *device, u8 port_num, +int rdma_init_netdev(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *), |