Skip to content

Commit

Permalink
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kern…
Browse files Browse the repository at this point in the history
…el/git/roland/infiniband

Pull InfiniBand/RDMA updates from Roland Dreier:
 - Re-enable on-demand paging changes with stable ABI
 - Fairly large set of ocrdma HW driver fixes
 - Some qib HW driver fixes
 - Other miscellaneous changes

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (43 commits)
  IB/qib: Add blank line after declaration
  IB/qib: Fix checkpatch warnings
  IB/mlx5: Enable the ODP capability query verb
  IB/core: Add on demand paging caps to ib_uverbs_ex_query_device
  IB/core: Add support for extended query device caps
  RDMA/cxgb4: Don't hang threads forever waiting on WR replies
  RDMA/ocrdma: Fix off by one in ocrdma_query_gid()
  RDMA/ocrdma: Use unsigned for bit index
  RDMA/ocrdma: Help gcc generate better code for ocrdma_srq_toggle_bit
  RDMA/ocrdma: Update the ocrdma module version string
  RDMA/ocrdma: set vlan present bit for user AH
  RDMA/ocrdma: remove reference of ocrdma_dev out of ocrdma_qp structure
  RDMA/ocrdma: Add support for interrupt moderation
  RDMA/ocrdma: Honor return value of ocrdma_resolve_dmac
  RDMA/ocrdma: Allow expansion of the SQ CQEs via buddy CQ expansion of the QP
  RDMA/ocrdma: Discontinue support of RDMA-READ-WITH-INVALIDATE
  RDMA/ocrdma: Host crash on destroying device resources
  RDMA/ocrdma: Report correct state in ibv_query_qp
  RDMA/ocrdma: Debugfs enhancments for ocrdma driver
  RDMA/ocrdma: Report correct count of interrupt vectors while registering ocrdma device
  ...
  • Loading branch information
torvalds committed Feb 21, 2015
2 parents 2953245 + 147d1da commit b5ccb07
Show file tree
Hide file tree
Showing 63 changed files with 1,212 additions and 561 deletions.
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -8567,7 +8567,7 @@ S: Maintained
F: drivers/scsi/sr*

SCSI RDMA PROTOCOL (SRP) INITIATOR
M: Bart Van Assche <bvanassche@acm.org>
M: Bart Van Assche <bart.vanassche@sandisk.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.openfabrics.org
Expand Down
3 changes: 3 additions & 0 deletions drivers/infiniband/core/ucma.c
Original file line number Diff line number Diff line change
Expand Up @@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
if (!optlen)
return -EINVAL;

memset(&sa_path, 0, sizeof(sa_path));
sa_path.vlan_id = 0xffff;

ib_sa_unpack_path(path_data->path_rec, &sa_path);
ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
if (ret)
Expand Down
3 changes: 2 additions & 1 deletion drivers/infiniband/core/umem_odp.c
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
rbt_ib_umem_insert(&umem->odp_data->interval_tree,
&context->umem_tree);
if (likely(!atomic_read(&context->notifier_count)))
if (likely(!atomic_read(&context->notifier_count)) ||
context->odp_mrs_count == 1)
umem->odp_data->mn_counters_active = true;
else
list_add(&umem->odp_data->no_private_counters,
Expand Down
1 change: 1 addition & 0 deletions drivers/infiniband/core/uverbs.h
Original file line number Diff line number Diff line change
Expand Up @@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);

IB_UVERBS_DECLARE_EX_CMD(create_flow);
IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
IB_UVERBS_DECLARE_EX_CMD(query_device);

#endif /* UVERBS_H */
158 changes: 113 additions & 45 deletions drivers/infiniband/core/uverbs_cmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,52 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
return ret;
}

static void copy_query_dev_fields(struct ib_uverbs_file *file,
struct ib_uverbs_query_device_resp *resp,
struct ib_device_attr *attr)
{
resp->fw_ver = attr->fw_ver;
resp->node_guid = file->device->ib_dev->node_guid;
resp->sys_image_guid = attr->sys_image_guid;
resp->max_mr_size = attr->max_mr_size;
resp->page_size_cap = attr->page_size_cap;
resp->vendor_id = attr->vendor_id;
resp->vendor_part_id = attr->vendor_part_id;
resp->hw_ver = attr->hw_ver;
resp->max_qp = attr->max_qp;
resp->max_qp_wr = attr->max_qp_wr;
resp->device_cap_flags = attr->device_cap_flags;
resp->max_sge = attr->max_sge;
resp->max_sge_rd = attr->max_sge_rd;
resp->max_cq = attr->max_cq;
resp->max_cqe = attr->max_cqe;
resp->max_mr = attr->max_mr;
resp->max_pd = attr->max_pd;
resp->max_qp_rd_atom = attr->max_qp_rd_atom;
resp->max_ee_rd_atom = attr->max_ee_rd_atom;
resp->max_res_rd_atom = attr->max_res_rd_atom;
resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
resp->atomic_cap = attr->atomic_cap;
resp->max_ee = attr->max_ee;
resp->max_rdd = attr->max_rdd;
resp->max_mw = attr->max_mw;
resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
resp->max_mcast_grp = attr->max_mcast_grp;
resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
resp->max_ah = attr->max_ah;
resp->max_fmr = attr->max_fmr;
resp->max_map_per_fmr = attr->max_map_per_fmr;
resp->max_srq = attr->max_srq;
resp->max_srq_wr = attr->max_srq_wr;
resp->max_srq_sge = attr->max_srq_sge;
resp->max_pkeys = attr->max_pkeys;
resp->local_ca_ack_delay = attr->local_ca_ack_delay;
resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
}

ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
const char __user *buf,
int in_len, int out_len)
Expand All @@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return ret;

memset(&resp, 0, sizeof resp);

resp.fw_ver = attr.fw_ver;
resp.node_guid = file->device->ib_dev->node_guid;
resp.sys_image_guid = attr.sys_image_guid;
resp.max_mr_size = attr.max_mr_size;
resp.page_size_cap = attr.page_size_cap;
resp.vendor_id = attr.vendor_id;
resp.vendor_part_id = attr.vendor_part_id;
resp.hw_ver = attr.hw_ver;
resp.max_qp = attr.max_qp;
resp.max_qp_wr = attr.max_qp_wr;
resp.device_cap_flags = attr.device_cap_flags;
resp.max_sge = attr.max_sge;
resp.max_sge_rd = attr.max_sge_rd;
resp.max_cq = attr.max_cq;
resp.max_cqe = attr.max_cqe;
resp.max_mr = attr.max_mr;
resp.max_pd = attr.max_pd;
resp.max_qp_rd_atom = attr.max_qp_rd_atom;
resp.max_ee_rd_atom = attr.max_ee_rd_atom;
resp.max_res_rd_atom = attr.max_res_rd_atom;
resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
resp.atomic_cap = attr.atomic_cap;
resp.max_ee = attr.max_ee;
resp.max_rdd = attr.max_rdd;
resp.max_mw = attr.max_mw;
resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
resp.max_mcast_grp = attr.max_mcast_grp;
resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
resp.max_ah = attr.max_ah;
resp.max_fmr = attr.max_fmr;
resp.max_map_per_fmr = attr.max_map_per_fmr;
resp.max_srq = attr.max_srq;
resp.max_srq_wr = attr.max_srq_wr;
resp.max_srq_sge = attr.max_srq_sge;
resp.max_pkeys = attr.max_pkeys;
resp.local_ca_ack_delay = attr.local_ca_ack_delay;
resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
copy_query_dev_fields(file, &resp, &attr);

if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
Expand Down Expand Up @@ -2091,20 +2097,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
if (qp->real_qp == qp) {
ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
if (ret)
goto out;
goto release_qp;
ret = qp->device->modify_qp(qp, attr,
modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
} else {
ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
}

put_qp_read(qp);

if (ret)
goto out;
goto release_qp;

ret = in_len;

release_qp:
put_qp_read(qp);

out:
kfree(attr);

Expand Down Expand Up @@ -3287,3 +3294,64 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,

return ret ? ret : in_len;
}

int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
struct ib_uverbs_ex_query_device_resp resp;
struct ib_uverbs_ex_query_device cmd;
struct ib_device_attr attr;
struct ib_device *device;
int err;

device = file->device->ib_dev;
if (ucore->inlen < sizeof(cmd))
return -EINVAL;

err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
if (err)
return err;

if (cmd.comp_mask)
return -EINVAL;

if (cmd.reserved)
return -EINVAL;

resp.response_length = offsetof(typeof(resp), odp_caps);

if (ucore->outlen < resp.response_length)
return -ENOSPC;

err = device->query_device(device, &attr);
if (err)
return err;

copy_query_dev_fields(file, &resp.base, &attr);
resp.comp_mask = 0;

if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
goto end;

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
resp.odp_caps.general_caps = attr.odp_caps.general_caps;
resp.odp_caps.per_transport_caps.rc_odp_caps =
attr.odp_caps.per_transport_caps.rc_odp_caps;
resp.odp_caps.per_transport_caps.uc_odp_caps =
attr.odp_caps.per_transport_caps.uc_odp_caps;
resp.odp_caps.per_transport_caps.ud_odp_caps =
attr.odp_caps.per_transport_caps.ud_odp_caps;
resp.odp_caps.reserved = 0;
#else
memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
#endif
resp.response_length += sizeof(resp.odp_caps);

end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
if (err)
return err;

return 0;
}
1 change: 1 addition & 0 deletions drivers/infiniband/core/uverbs_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
struct ib_udata *uhw) = {
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
[IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
[IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
};

static void ib_uverbs_add_one(struct ib_device *device);
Expand Down
9 changes: 8 additions & 1 deletion drivers/infiniband/hw/cxgb4/ev.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
struct c4iw_cq *chp;
unsigned long flag;

spin_lock_irqsave(&dev->lock, flag);
chp = get_chp(dev, qid);
if (chp) {
atomic_inc(&chp->refcnt);
spin_unlock_irqrestore(&dev->lock, flag);
t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
} else
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
} else {
PDBG("%s unknown cqid 0x%x\n", __func__, qid);
spin_unlock_irqrestore(&dev->lock, flag);
}
return 0;
}
29 changes: 14 additions & 15 deletions drivers/infiniband/hw/cxgb4/iw_cxgb4.h
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
return (int)(rdev->lldi.vr->stag.size >> 5);
}

#define C4IW_WR_TO (30*HZ)
#define C4IW_WR_TO (60*HZ)

struct c4iw_wr_wait {
struct completion completion;
Expand All @@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
u32 hwtid, u32 qpid,
const char *func)
{
unsigned to = C4IW_WR_TO;
int ret;

do {
ret = wait_for_completion_timeout(&wr_waitp->completion, to);
if (!ret) {
printk(KERN_ERR MOD "%s - Device %s not responding - "
"tid %u qpid %u\n", func,
pci_name(rdev->lldi.pdev), hwtid, qpid);
if (c4iw_fatal_error(rdev)) {
wr_waitp->ret = -EIO;
break;
}
to = to << 2;
}
} while (!ret);
if (c4iw_fatal_error(rdev)) {
wr_waitp->ret = -EIO;
goto out;
}

ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
if (!ret) {
PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
func, pci_name(rdev->lldi.pdev), hwtid, qpid);
rdev->flags |= T4_FATAL_ERROR;
wr_waitp->ret = -EIO;
}
out:
if (wr_waitp->ret)
PDBG("%s: FW reply %d tid %u qpid %u\n",
pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
Expand Down
3 changes: 0 additions & 3 deletions drivers/infiniband/hw/ipath/ipath_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *);
/* clean up any chip type-specific stuff */
void ipath_chip_done(void);

/* check to see if we have to force ordering for write combining */
int ipath_unordered_wc(void);

void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
unsigned cnt);
void ipath_cancel_sends(struct ipath_devdata *, int);
Expand Down
13 changes: 0 additions & 13 deletions drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd)
{
return 0;
}

/**
* ipath_unordered_wc - indicate whether write combining is unordered
*
* Because our performance depends on our ability to do write
* combining mmio writes in the most efficient way, we need to
* know if we are on a processor that may reorder stores when
* write combining.
*/
int ipath_unordered_wc(void)
{
return 1;
}
15 changes: 0 additions & 15 deletions drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd)
dd->ipath_wc_cookie = 0; /* even on failure */
}
}

/**
* ipath_unordered_wc - indicate whether write combining is ordered
*
* Because our performance depends on our ability to do write combining mmio
* writes in the most efficient way, we need to know if we are on an Intel
* or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
* the order completed, and so no special flushing is required to get
* correct ordering. Intel processors, however, will flush write buffers
* out in "random" orders, and so explicit ordering is needed at times.
*/
int ipath_unordered_wc(void)
{
return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
}
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/mlx4/cm.c
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
if (*slave < 0) {
mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
gid.global.interface_id);
be64_to_cpu(gid.global.interface_id));
return -ENOENT;
}
return 0;
Expand Down
7 changes: 3 additions & 4 deletions drivers/infiniband/hw/mlx4/cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -369,8 +369,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
int err;

mutex_lock(&cq->resize_mutex);

if (entries < 1) {
if (entries < 1 || entries > dev->dev->caps.max_cqes) {
err = -EINVAL;
goto out;
}
Expand All @@ -381,7 +380,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
goto out;
}

if (entries > dev->dev->caps.max_cqes) {
if (entries > dev->dev->caps.max_cqes + 1) {
err = -EINVAL;
goto out;
}
Expand All @@ -394,7 +393,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
/* Can't be smaller than the number of outstanding CQEs */
outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
if (entries < outst_cqe + 1) {
err = 0;
err = -EINVAL;
goto out;
}

Expand Down
Loading

0 comments on commit b5ccb07

Please sign in to comment.