Round one of 4.7 rc fixes

- Multiple minor fixes to the rdma core
 - Multiple minor fixes to hfi1
 - Multiple minor fixes to mlx5
 - A very few other minor fixes (SRP, IPoIB, usNIC, mlx4)
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJXVt8TAAoJELgmozMOVy/dqkUQAIBZXo6VqyykcO1hAvT8kvNG
 bi5wuTDe7SVU7dHE4DBjCHbxEYRwWXEqlWpywSHd84pv7awvUTXHiHppdYxkor5V
 1Yo+gz/WHFaE8dHNRuiWpzqNxd7+D16mx3Xaq2xVGdKSG0kQln3Wo0s8g3yAKyNS
 KTDSIN5ik4WwK043REWPjyPLtmy3sssmC6OnqF9arFqq1W1xy4I6CudRHrCmGD9b
 f7Q9lrFk30nAtOyoyFJNMXpHtrgiIbbvpa6yF5FxY/Aoa7bOAJfI2oTZZOL0wHqH
 GhHw/uLxDotBckDUCOT90JT4Bkq7Qd9nzqNgpvfsCSVr3z64PNYYM5+pJc5kYZOB
 0VwxCXrKIuo+YWF6/L3SWef6rfHltFlhjaAobbuNsze/cm9kR9pFqm7y/6cMrDWS
 fNSi50ljhOcrw9aAvC8zUbHCkP4Mw3A4L9btZlOJsfZ/heHDVJ6b90fcV/976xQQ
 B5ml/RcIs3afH03Mn4OD52YEFV0h7JgXq9YPYugjSbL73sKwHFDHSJ6NaK1DFu1V
 j8NYiIP+vTmcgoUfe6g0Qd9Vnz1+zw0wqLvKV9GPD+tflPDy13+2VMhvvn3VPlSc
 ndK+JnQYuzJdWitrY3a1RUB2BziDnzQ/Hu6QZ09zGvuEIABf3xfrvJX5dgRYYBR7
 t2CeyZu0Q9wp54+LzLIE
 =LjUT
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 "This is the first -rc pull for the RDMA subsystem.  The patch count is
  high, but they are all smallish patches fixing simple things for the
  most part, and the overall line count of changes here is smaller than
  the patch count would lead a person to believe.

  Code is up and running in my labs, including direct testing of cxgb4,
  mlx4, mlx5, ocrdma, and qib.

  Summary:

   - Multiple minor fixes to the rdma core
   - Multiple minor fixes to hfi1
   - Multiple minor fixes to mlx5
   - A very few other minor fixes (SRP, IPoIB, usNIC, mlx4)"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (35 commits)
  IB/IPoIB: Don't update neigh validity for unresolved entries
  IB/mlx5: Fix alternate path code
  IB/mlx5: Fix pkey_index length in the QP path record
  IB/mlx5: Fix entries check in mlx5_ib_resize_cq
  IB/mlx5: Fix entries checks in mlx5_ib_create_cq
  IB/mlx5: Check BlueFlame HCA support
  IB/mlx5: Fix returned values of query QP
  IB/mlx5: Limit query HCA clock
  IB/mlx5: Fix FW version diaplay in sysfs
  IB/mlx5: Return PORT_ERR in Active to Initializing tranisition
  IB/mlx5: Set flow steering capability bit
  IB/core: Make all casts in ib_device_cap_flags enum consistent
  IB/core: Fix bit curruption in ib_device_cap_flags structure
  IB/core: Initialize sysfs attributes before sysfs create group
  IB/IPoIB: Disable bottom half when dealing with device address
  IB/core: Fix removal of default GID cache entry
  IB/IPoIB: Fix race between ipoib_remove_one to sysfs functions
  IB/core: Fix query port failure in RoCE
  IB/core: fix error unwind in sysfs hw counters code
  IB/core: Fix array length allocation
  ...
This commit is contained in:
Linus Torvalds 2016-06-09 14:36:12 -07:00
commit 147d9e7bca
26 changed files with 148 additions and 110 deletions

View File

@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
{
int ret = 0;
struct net_device *old_net_dev;
enum ib_gid_type old_gid_type;
/* in rdma_cap_roce_gid_table, this funciton should be protected by a
* sleep-able lock.
@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
}
old_net_dev = table->data_vec[ix].attr.ndev;
old_gid_type = table->data_vec[ix].attr.gid_type;
if (old_net_dev && old_net_dev != attr->ndev)
dev_put(old_net_dev);
/* if modify_gid failed, just delete the old gid */
@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
attr = &zattr;
table->data_vec[ix].context = NULL;
}
if (default_gid)
table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
if (default_gid) {
table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
if (action == GID_TABLE_WRITE_ACTION_DEL)
table->data_vec[ix].attr.gid_type = old_gid_type;
}
if (table->data_vec[ix].attr.ndev &&
table->data_vec[ix].attr.ndev != old_net_dev)
dev_hold(table->data_vec[ix].attr.ndev);

View File

@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
work->cm_event.event = IB_CM_USER_ESTABLISHED;
/* Check if the device started its remove_one */
spin_lock_irq(&cm.lock);
spin_lock_irqsave(&cm.lock, flags);
if (!cm_dev->going_down) {
queue_delayed_work(cm.wq, &work->work, 0);
} else {
kfree(work);
ret = -ENODEV;
}
spin_unlock_irq(&cm.lock);
spin_unlock_irqrestore(&cm.lock, flags);
out:
return ret;

View File

@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device,
if (err || port_attr->subnet_prefix)
return err;
if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
return 0;
err = ib_query_gid(device, port_num, 0, &gid, NULL);
if (err)
return err;
@ -1024,7 +1027,8 @@ static int __init ib_core_init(void)
goto err_mad;
}
if (ib_add_ibnl_clients()) {
ret = ib_add_ibnl_clients();
if (ret) {
pr_warn("Couldn't register ibnl clients\n");
goto err_sa;
}

View File

@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
if (!nlmsg_request) {
pr_info("%s: Could not find a matching request (seq = %u)\n",
__func__, msg_seq);
return -EINVAL;
return -EINVAL;
}
pm_msg = nlmsg_request->req_buffer;
local_sockaddr = (struct sockaddr_storage *)

View File

@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
/* Now, check to see if there are any methods still in use */
if (!check_method_table(method)) {
/* If not, release management method table */
kfree(method);
class->method_table[mgmt_class] = NULL;
/* Any management classes left ? */
kfree(method);
class->method_table[mgmt_class] = NULL;
/* Any management classes left ? */
if (!check_class_table(class)) {
/* If not, release management class table */
kfree(class);

View File

@ -889,9 +889,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
u8 port_num)
{
struct attribute_group *hsag = NULL;
struct attribute_group *hsag;
struct rdma_hw_stats *stats;
int i = 0, ret;
int i, ret;
stats = device->alloc_hw_stats(device, port_num);
@ -899,19 +899,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
return;
if (!stats->names || stats->num_counters <= 0)
goto err;
goto err_free_stats;
/*
* Two extra attribue elements here, one for the lifespan entry and
* one to NULL terminate the list for the sysfs core code
*/
hsag = kzalloc(sizeof(*hsag) +
// 1 extra for the lifespan config entry
sizeof(void *) * (stats->num_counters + 1),
sizeof(void *) * (stats->num_counters + 2),
GFP_KERNEL);
if (!hsag)
return;
goto err_free_stats;
ret = device->get_hw_stats(device, stats, port_num,
stats->num_counters);
if (ret != stats->num_counters)
goto err;
goto err_free_hsag;
stats->timestamp = jiffies;
@ -922,10 +925,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
if (!hsag->attrs[i])
goto err;
sysfs_attr_init(hsag->attrs[i]);
}
/* treat an error here as non-fatal */
hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
if (hsag->attrs[i])
sysfs_attr_init(hsag->attrs[i]);
if (port) {
struct kobject *kobj = &port->kobj;
@ -946,10 +952,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
return;
err:
kfree(stats);
for (; i >= 0; i--)
kfree(hsag->attrs[i]);
err_free_hsag:
kfree(hsag);
err_free_stats:
kfree(stats);
return;
}

View File

@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
const struct cpumask *node_mask,
*proc_mask = tsk_cpus_allowed(current);
struct cpu_mask_set *set = &dd->affinity->proc;
char buf[1024];
/*
* check whether process/context affinity has already
* been set
*/
if (cpumask_weight(proc_mask) == 1) {
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
current->pid, current->comm, buf);
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
/*
* Mark the pre-set CPU as used. This is atomic so we don't
* need the lock
@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_set_cpu(cpu, &set->used);
goto done;
} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
current->pid, current->comm, buf);
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
goto done;
}
@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
&dd->affinity->rcv_intr.mask :
&dd->affinity->rcv_intr.used));
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
cpumask_pr_args(intrs));
/*
* If we don't have a NUMA node requested, preference is towards
@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
if (node == -1)
node = dd->node;
node_mask = cpumask_of_node(node);
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
cpumask_pr_args(node_mask));
/* diff will hold all unused cpus */
cpumask_andnot(diff, &set->mask, &set->used);
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
/* get cpumask of available CPUs on preferred NUMA */
cpumask_and(mask, diff, node_mask);
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
/*
* At first, we don't want to place processes on the same
@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_andnot(diff, &set->mask, &set->used);
cpumask_andnot(mask, diff, node_mask);
}
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
cpumask_pr_args(mask));
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids) /* empty */

View File

@ -7832,8 +7832,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
* save first 2 flits in the packet that caused
* the error
*/
dd->err_info_rcvport.packet_flit1 = hdr0;
dd->err_info_rcvport.packet_flit2 = hdr1;
dd->err_info_rcvport.packet_flit1 = hdr0;
dd->err_info_rcvport.packet_flit2 = hdr1;
}
switch (info) {
case 1:
@ -11906,7 +11906,7 @@ static void update_synth_timer(unsigned long opaque)
hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
}
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
}
#define C_MAX_NAME 13 /* 12 chars + one for /0 */

View File

@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
(void *)dd->rcvhdrtail_dummy_kvaddr,
dd->rcvhdrtail_dummy_physaddr);
dd->rcvhdrtail_dummy_kvaddr = NULL;
dd->rcvhdrtail_dummy_kvaddr = NULL;
}
for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {

View File

@ -214,19 +214,6 @@ const char *print_u32_array(
return ret;
}
const char *print_u64_array(
struct trace_seq *p,
u64 *arr, int len)
{
int i;
const char *ret = trace_seq_buffer_ptr(p);
for (i = 0; i < len; i++)
trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
trace_seq_putc(p, 0);
return ret;
}
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
__hfi1_trace_fn(SDMA);

View File

@ -183,7 +183,7 @@ struct user_sdma_iovec {
struct sdma_mmu_node *node;
};
#define SDMA_CACHE_NODE_EVICT BIT(0)
#define SDMA_CACHE_NODE_EVICT 0
struct sdma_mmu_node {
struct mmu_rb_node rb;
@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req,
*/
SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
req->tidoffset, req->tidoffset / req->omfactor,
!!(req->omfactor - KDETH_OM_SMALL));
req->omfactor != KDETH_OM_SMALL);
KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
req->tidoffset / req->omfactor);
KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
!!(req->omfactor - KDETH_OM_SMALL));
req->omfactor != KDETH_OM_SMALL);
}
done:
trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,

View File

@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
}
if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;

View File

@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int eqn;
int err;
if (entries < 0)
if (entries < 0 ||
(entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
return ERR_PTR(-EINVAL);
if (check_cq_create_flags(attr->flags))
@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -ENOSYS;
}
if (entries < 1)
if (entries < 1 ||
entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
entries,
1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
return -EINVAL;
}
entries = roundup_pow_of_two(entries + 1);
if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)

View File

@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_CAP_ETH(dev->mdev, scatter_fcs))
props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
resp.cache_line_size = L1_CACHE_BYTES;
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
/*
* We don't want to expose information from the PCI bar that is located
* after 4096 bytes, so if the arch only supports larger pages, let's
* pretend we don't support reading the HCA's core clock. This is also
* forced by mmap function.
*/
if (PAGE_SIZE <= 4096 &&
field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
resp.comp_mask |=
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset =
@ -1798,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
@ -1866,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
break;
case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED:
ibev.event = IB_EVENT_PORT_ERR;
port = (u8)param;
break;
case MLX5_DEV_EVENT_PORT_INITIALIZED:
/* not used by ULPs */
return;
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
port = (u8)param;

View File

@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
qp->rq.max_gs = 0;
qp->rq.wqe_cnt = 0;
qp->rq.wqe_shift = 0;
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct ib_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask,
u32 path_flags, const struct ib_qp_attr *attr)
u32 path_flags, const struct ib_qp_attr *attr,
bool alt)
{
enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
int err;
if (attr_mask & IB_QP_PKEY_INDEX)
path->pkey_index = attr->pkey_index;
path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
attr->pkey_index);
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >=
@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
ah->grh.sgid_index);
path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
} else {
path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
0;
path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
path->fl_free_ar |=
(path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
path->rlid = cpu_to_be16(ah->dlid);
path->grh_mlid = ah->src_path_bits & 0x7f;
if (ah->ah_flags & IB_AH_GRH)
@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
path->port = port;
if (attr_mask & IB_QP_TIMEOUT)
path->ackto_lt = attr->timeout << 3;
path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
if (attr_mask & IB_QP_PKEY_INDEX)
context->pri_path.pkey_index = attr->pkey_index;
context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
/* todo implement counter_index functionality */
@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_AV) {
err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
attr_mask, 0, attr);
attr_mask, 0, attr, false);
if (err)
goto out;
}
@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_ALT_PATH) {
err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
&context->alt_path,
attr->alt_port_num, attr_mask, 0, attr);
attr->alt_port_num,
attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
0, attr, true);
if (err)
goto out;
}
@ -4013,11 +4019,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
qp_attr->alt_pkey_index =
be16_to_cpu(context->alt_path.pkey_index);
qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
}
qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
qp_attr->port_num = context->pri_path.port;
/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
@ -4079,17 +4086,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
if (!ibqp->uobject) {
qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
qp_attr->cap.max_send_wr = qp->sq.max_post;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
qp_init_attr->qp_context = ibqp->qp_context;
} else {
qp_attr->cap.max_send_wr = 0;
qp_attr->cap.max_send_sge = 0;
}
/* We don't support inline sends for kernel QPs (yet), and we
* don't know what userspace's value should be.
*/
qp_attr->cap.max_inline_data = 0;
qp_init_attr->qp_type = ibqp->qp_type;
qp_init_attr->recv_cq = ibqp->recv_cq;
qp_init_attr->send_cq = ibqp->send_cq;
qp_init_attr->srq = ibqp->srq;
qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;

View File

@ -36,7 +36,6 @@
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
#include <linux/dma-attrs.h>
#include <linux/iommu.h>
#include <linux/workqueue.h>
#include <linux/list.h>
@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int i;
int flags;
dma_addr_t pa;
DEFINE_DMA_ATTRS(attrs);
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
if (!can_do_mlock())
return -EPERM;

View File

@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
*/
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type)
__releases(&qp->s_lock)
__releases(&qp->s_hlock)
__releases(&qp->r_lock)
__acquires(&qp->r_lock)
__acquires(&qp->s_hlock)
__acquires(&qp->s_lock)
{
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;

View File

@ -94,6 +94,7 @@ enum {
IPOIB_NEIGH_TBL_FLUSH = 12,
IPOIB_FLAG_DEV_ADDR_SET = 13,
IPOIB_FLAG_DEV_ADDR_CTRL = 14,
IPOIB_FLAG_GOING_DOWN = 15,
IPOIB_MAX_BACKOFF_SECONDS = 16,

View File

@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
{
struct net_device *dev = to_net_dev(d);
int ret;
struct ipoib_dev_priv *priv = netdev_priv(dev);
if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
return -EPERM;
if (!rtnl_trylock())
return restart_syscall();

View File

@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
return false;
netif_addr_lock(priv->dev);
netif_addr_lock_bh(priv->dev);
/* The subnet prefix may have changed, update it now so we won't have
* to do it later
@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
search_gid.global.interface_id = priv->local_gid.global.interface_id;
netif_addr_unlock(priv->dev);
netif_addr_unlock_bh(priv->dev);
err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
priv->dev, &port, &index);
netif_addr_lock(priv->dev);
netif_addr_lock_bh(priv->dev);
if (search_gid.global.interface_id !=
priv->local_gid.global.interface_id)
@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
}
out:
netif_addr_unlock(priv->dev);
netif_addr_unlock_bh(priv->dev);
return ret;
}

View File

@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
neigh = NULL;
goto out_unlock;
}
neigh->alive = jiffies;
if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
neigh->alive = jiffies;
goto out_unlock;
}
}
@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
struct ipoib_dev_priv *child_priv;
struct net_device *netdev = priv->dev;
netif_addr_lock(netdev);
netif_addr_lock_bh(netdev);
memcpy(&priv->local_gid.global.interface_id,
&gid->global.interface_id,
@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
netif_addr_unlock(netdev);
netif_addr_unlock_bh(netdev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
down_read(&priv->vlan_rwsem);
@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
int ret = 0;
netif_addr_lock(dev);
netif_addr_lock_bh(dev);
/* Make sure the QPN, reserved and subnet prefix match the current
* lladdr, it also makes sure the lladdr is unicast.
@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
gid->global.interface_id == 0)
ret = -EINVAL;
netif_addr_unlock(dev);
netif_addr_unlock_bh(dev);
return ret;
}
@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
ib_unregister_event_handler(&priv->event_handler);
flush_workqueue(ipoib_workqueue);
/* mark interface in the middle of destruction */
set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
rtnl_lock();
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
rtnl_unlock();

View File

@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
return;
}
priv->local_lid = port_attr.lid;
netif_addr_lock(dev);
netif_addr_lock_bh(dev);
if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
netif_addr_unlock(dev);
netif_addr_unlock_bh(dev);
return;
}
netif_addr_unlock(dev);
netif_addr_unlock_bh(dev);
spin_lock_irq(&priv->lock);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))

View File

@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
return -EPERM;
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
priv = ipoib_intf_alloc(intf_name);
@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
return -EPERM;
if (!rtnl_trylock())
return restart_syscall();

View File

@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
{
unsigned int sg_offset = 0;
state->desc = req->indirect_desc;
state->fr.next = req->fr_list;
state->fr.end = req->fr_list + ch->target->mr_per_cmd;
state->sg = scat;
@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct scatterlist *sg;
int i;
state->desc = req->indirect_desc;
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
target->indirect_size, DMA_TO_DEVICE);
memset(&state, 0, sizeof(state));
state.desc = req->indirect_desc;
if (dev->use_fast_reg)
ret = srp_map_sg_fr(&state, ch, req, scat, count);
else if (dev->use_fmr)
@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device)
int mr_page_shift, p;
u64 max_pages_per_mr;
srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
return;
@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device)
IB_ACCESS_REMOTE_WRITE);
if (IS_ERR(srp_dev->global_mr))
goto err_pd;
} else {
srp_dev->global_mr = NULL;
}
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {

View File

@ -460,10 +460,9 @@ struct mlx5_core_qp {
};
struct mlx5_qp_path {
u8 fl;
u8 fl_free_ar;
u8 rsvd3;
u8 free_ar;
u8 pkey_index;
__be16 pkey_index;
u8 rsvd0;
u8 grh_mlid;
__be16 rlid;

View File

@ -217,10 +217,10 @@ enum ib_device_cap_flags {
IB_DEVICE_CROSS_CHANNEL = (1 << 27),
IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33),
IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34),
IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
};
enum ib_signature_prot_cap {