python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2022 Hewlett Packard Enterprise, Inc. All rights reserved. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ /* * rxe_mcast.c implements driver support for multicast transport. * It is based on two data structures struct rxe_mcg ('mcg') and * struct rxe_mca ('mca'). An mcg is allocated each time a qp is * attached to a new mgid for the first time. These are indexed by * a red-black tree using the mgid. This data structure is searched * for the mcg when a multicast packet is received and when another * qp is attached to the same mgid. It is cleaned up when the last qp * is detached from the mcg. Each time a qp is attached to an mcg an * mca is created. It holds a pointer to the qp and is added to a list * of qp's that are attached to the mcg. The qp_list is used to replicate * mcast packets in the rxe receive path. */ #include "rxe.h" /** * rxe_mcast_add - add multicast address to rxe device * @rxe: rxe device object * @mgid: multicast address as a gid * * Returns 0 on success else an error */ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) { unsigned char ll_addr[ETH_ALEN]; ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); return dev_mc_add(rxe->ndev, ll_addr); } /** * rxe_mcast_del - delete multicast address from rxe device * @rxe: rxe device object * @mgid: multicast address as a gid * * Returns 0 on success else an error */ static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid) { unsigned char ll_addr[ETH_ALEN]; ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); return dev_mc_del(rxe->ndev, ll_addr); } /** * __rxe_insert_mcg - insert an mcg into red-black tree (rxe->mcg_tree) * @mcg: mcg object with an embedded red-black tree node * * Context: caller must hold a reference to mcg and rxe->mcg_lock and * is responsible to avoid adding the same mcg twice to the tree. */ static void __rxe_insert_mcg(struct rxe_mcg *mcg) { struct rb_root *tree = &mcg->rxe->mcg_tree; struct rb_node **link = &tree->rb_node; struct rb_node *node = NULL; struct rxe_mcg *tmp; int cmp; while (*link) { node = *link; tmp = rb_entry(node, struct rxe_mcg, node); cmp = memcmp(&tmp->mgid, &mcg->mgid, sizeof(mcg->mgid)); if (cmp > 0) link = &(*link)->rb_left; else link = &(*link)->rb_right; } rb_link_node(&mcg->node, node, link); rb_insert_color(&mcg->node, tree); } /** * __rxe_remove_mcg - remove an mcg from red-black tree holding lock * @mcg: mcast group object with an embedded red-black tree node * * Context: caller must hold a reference to mcg and rxe->mcg_lock */ static void __rxe_remove_mcg(struct rxe_mcg *mcg) { rb_erase(&mcg->node, &mcg->rxe->mcg_tree); } /** * __rxe_lookup_mcg - lookup mcg in rxe->mcg_tree while holding lock * @rxe: rxe device object * @mgid: multicast IP address * * Context: caller must hold rxe->mcg_lock * Returns: mcg on success and takes a ref to mcg else NULL */ static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid) { struct rb_root *tree = &rxe->mcg_tree; struct rxe_mcg *mcg; struct rb_node *node; int cmp; node = tree->rb_node; while (node) { mcg = rb_entry(node, struct rxe_mcg, node); cmp = memcmp(&mcg->mgid, mgid, sizeof(*mgid)); if (cmp > 0) node = node->rb_left; else if (cmp < 0) node = node->rb_right; else break; } if (node) { kref_get(&mcg->ref_cnt); return mcg; } return NULL; } /** * rxe_lookup_mcg - lookup up mcg in red-back tree * @rxe: rxe device object * @mgid: multicast IP address * * Returns: mcg if found else NULL */ struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid) { struct rxe_mcg *mcg; spin_lock_bh(&rxe->mcg_lock); mcg = __rxe_lookup_mcg(rxe, mgid); spin_unlock_bh(&rxe->mcg_lock); return mcg; } /** * __rxe_init_mcg - initialize a new mcg * @rxe: rxe device * @mgid: multicast address as a gid * @mcg: new mcg object * * Context: caller should hold rxe->mcg lock */ static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid, struct rxe_mcg *mcg) { kref_init(&mcg->ref_cnt); memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid)); INIT_LIST_HEAD(&mcg->qp_list); mcg->rxe = rxe; /* caller holds a ref on mcg but that will be * dropped when mcg goes out of scope. We need to take a ref * on the pointer that will be saved in the red-black tree * by __rxe_insert_mcg and used to lookup mcg from mgid later. * Inserting mcg makes it visible to outside so this should * be done last after the object is ready. */ kref_get(&mcg->ref_cnt); __rxe_insert_mcg(mcg); } /** * rxe_get_mcg - lookup or allocate a mcg * @rxe: rxe device object * @mgid: multicast IP address as a gid * * Returns: mcg on success else ERR_PTR(error) */ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid) { struct rxe_mcg *mcg, *tmp; int err; if (rxe->attr.max_mcast_grp == 0) return ERR_PTR(-EINVAL); /* check to see if mcg already exists */ mcg = rxe_lookup_mcg(rxe, mgid); if (mcg) return mcg; /* check to see if we have reached limit */ if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) { err = -ENOMEM; goto err_dec; } /* speculative alloc of new mcg */ mcg = kzalloc(sizeof(*mcg), GFP_KERNEL); if (!mcg) { err = -ENOMEM; goto err_dec; } spin_lock_bh(&rxe->mcg_lock); /* re-check to see if someone else just added it */ tmp = __rxe_lookup_mcg(rxe, mgid); if (tmp) { spin_unlock_bh(&rxe->mcg_lock); atomic_dec(&rxe->mcg_num); kfree(mcg); return tmp; } __rxe_init_mcg(rxe, mgid, mcg); spin_unlock_bh(&rxe->mcg_lock); /* add mcast address outside of lock */ err = rxe_mcast_add(rxe, mgid); if (!err) return mcg; kfree(mcg); err_dec: atomic_dec(&rxe->mcg_num); return ERR_PTR(err); } /** * rxe_cleanup_mcg - cleanup mcg for kref_put * @kref: struct kref embnedded in mcg */ void rxe_cleanup_mcg(struct kref *kref) { struct rxe_mcg *mcg = container_of(kref, typeof(*mcg), ref_cnt); kfree(mcg); } /** * __rxe_destroy_mcg - destroy mcg object holding rxe->mcg_lock * @mcg: the mcg object * * Context: caller is holding rxe->mcg_lock * no qp's are attached to mcg */ static void __rxe_destroy_mcg(struct rxe_mcg *mcg) { struct rxe_dev *rxe = mcg->rxe; /* remove mcg from red-black tree then drop ref */ __rxe_remove_mcg(mcg); kref_put(&mcg->ref_cnt, rxe_cleanup_mcg); atomic_dec(&rxe->mcg_num); } /** * rxe_destroy_mcg - destroy mcg object * @mcg: the mcg object * * Context: no qp's are attached to mcg */ static void rxe_destroy_mcg(struct rxe_mcg *mcg) { /* delete mcast address outside of lock */ rxe_mcast_del(mcg->rxe, &mcg->mgid); spin_lock_bh(&mcg->rxe->mcg_lock); __rxe_destroy_mcg(mcg); spin_unlock_bh(&mcg->rxe->mcg_lock); } /** * __rxe_init_mca - initialize a new mca holding lock * @qp: qp object * @mcg: mcg object * @mca: empty space for new mca * * Context: caller must hold references on qp and mcg, rxe->mcg_lock * and pass memory for new mca * * Returns: 0 on success else an error */ static int __rxe_init_mca(struct rxe_qp *qp, struct rxe_mcg *mcg, struct rxe_mca *mca) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); int n; n = atomic_inc_return(&rxe->mcg_attach); if (n > rxe->attr.max_total_mcast_qp_attach) { atomic_dec(&rxe->mcg_attach); return -ENOMEM; } n = atomic_inc_return(&mcg->qp_num); if (n > rxe->attr.max_mcast_qp_attach) { atomic_dec(&mcg->qp_num); atomic_dec(&rxe->mcg_attach); return -ENOMEM; } atomic_inc(&qp->mcg_num); rxe_get(qp); mca->qp = qp; list_add_tail(&mca->qp_list, &mcg->qp_list); return 0; } /** * rxe_attach_mcg - attach qp to mcg if not already attached * @qp: qp object * @mcg: mcg object * * Context: caller must hold reference on qp and mcg. * Returns: 0 on success else an error */ static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp) { struct rxe_dev *rxe = mcg->rxe; struct rxe_mca *mca, *tmp; int err; /* check to see if the qp is already a member of the group */ spin_lock_bh(&rxe->mcg_lock); list_for_each_entry(mca, &mcg->qp_list, qp_list) { if (mca->qp == qp) { spin_unlock_bh(&rxe->mcg_lock); return 0; } } spin_unlock_bh(&rxe->mcg_lock); /* speculative alloc new mca without using GFP_ATOMIC */ mca = kzalloc(sizeof(*mca), GFP_KERNEL); if (!mca) return -ENOMEM; spin_lock_bh(&rxe->mcg_lock); /* re-check to see if someone else just attached qp */ list_for_each_entry(tmp, &mcg->qp_list, qp_list) { if (tmp->qp == qp) { kfree(mca); err = 0; goto out; } } err = __rxe_init_mca(qp, mcg, mca); if (err) kfree(mca); out: spin_unlock_bh(&rxe->mcg_lock); return err; } /** * __rxe_cleanup_mca - cleanup mca object holding lock * @mca: mca object * @mcg: mcg object * * Context: caller must hold a reference to mcg and rxe->mcg_lock */ static void __rxe_cleanup_mca(struct rxe_mca *mca, struct rxe_mcg *mcg) { list_del(&mca->qp_list); atomic_dec(&mcg->qp_num); atomic_dec(&mcg->rxe->mcg_attach); atomic_dec(&mca->qp->mcg_num); rxe_put(mca->qp); kfree(mca); } /** * rxe_detach_mcg - detach qp from mcg * @mcg: mcg object * @qp: qp object * * Returns: 0 on success else an error if qp is not attached. */ static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp) { struct rxe_dev *rxe = mcg->rxe; struct rxe_mca *mca, *tmp; spin_lock_bh(&rxe->mcg_lock); list_for_each_entry_safe(mca, tmp, &mcg->qp_list, qp_list) { if (mca->qp == qp) { __rxe_cleanup_mca(mca, mcg); /* if the number of qp's attached to the * mcast group falls to zero go ahead and * tear it down. This will not free the * object since we are still holding a ref * from the caller */ if (atomic_read(&mcg->qp_num) <= 0) __rxe_destroy_mcg(mcg); spin_unlock_bh(&rxe->mcg_lock); return 0; } } /* we didn't find the qp on the list */ spin_unlock_bh(&rxe->mcg_lock); return -EINVAL; } /** * rxe_attach_mcast - attach qp to multicast group (see IBA-11.3.1) * @ibqp: (IB) qp object * @mgid: multicast IP address * @mlid: multicast LID, ignored for RoCEv2 (see IBA-A17.5.6) * * Returns: 0 on success else an errno */ int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) { int err; struct rxe_dev *rxe = to_rdev(ibqp->device); struct rxe_qp *qp = to_rqp(ibqp); struct rxe_mcg *mcg; /* takes a ref on mcg if successful */ mcg = rxe_get_mcg(rxe, mgid); if (IS_ERR(mcg)) return PTR_ERR(mcg); err = rxe_attach_mcg(mcg, qp); /* if we failed to attach the first qp to mcg tear it down */ if (atomic_read(&mcg->qp_num) == 0) rxe_destroy_mcg(mcg); kref_put(&mcg->ref_cnt, rxe_cleanup_mcg); return err; } /** * rxe_detach_mcast - detach qp from multicast group (see IBA-11.3.2) * @ibqp: address of (IB) qp object * @mgid: multicast IP address * @mlid: multicast LID, ignored for RoCEv2 (see IBA-A17.5.6) * * Returns: 0 on success else an errno */ int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) { struct rxe_dev *rxe = to_rdev(ibqp->device); struct rxe_qp *qp = to_rqp(ibqp); struct rxe_mcg *mcg; int err; mcg = rxe_lookup_mcg(rxe, mgid); if (!mcg) return -EINVAL; err = rxe_detach_mcg(mcg, qp); kref_put(&mcg->ref_cnt, rxe_cleanup_mcg); return err; }
linux-master
drivers/infiniband/sw/rxe/rxe_mcast.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved. */ /* * The rdma_rxe driver supports type 1 or type 2B memory windows. * Type 1 MWs are created by ibv_alloc_mw() verbs calls and bound by * ibv_bind_mw() calls. Type 2 MWs are also created by ibv_alloc_mw() * but bound by bind_mw work requests. The ibv_bind_mw() call is converted * by libibverbs to a bind_mw work request. */ #include "rxe.h" int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) { struct rxe_mw *mw = to_rmw(ibmw); struct rxe_pd *pd = to_rpd(ibmw->pd); struct rxe_dev *rxe = to_rdev(ibmw->device); int ret; rxe_get(pd); ret = rxe_add_to_pool(&rxe->mw_pool, mw); if (ret) { rxe_put(pd); return ret; } mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? RXE_MW_STATE_FREE : RXE_MW_STATE_VALID; spin_lock_init(&mw->lock); rxe_finalize(mw); return 0; } int rxe_dealloc_mw(struct ib_mw *ibmw) { struct rxe_mw *mw = to_rmw(ibmw); rxe_cleanup(mw); return 0; } static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_mw *mw, struct rxe_mr *mr, int access) { if (mw->ibmw.type == IB_MW_TYPE_1) { if (unlikely(mw->state != RXE_MW_STATE_VALID)) { rxe_dbg_mw(mw, "attempt to bind a type 1 MW not in the valid state\n"); return -EINVAL; } /* o10-36.2.2 */ if (unlikely((access & IB_ZERO_BASED))) { rxe_dbg_mw(mw, "attempt to bind a zero based type 1 MW\n"); return -EINVAL; } } if (mw->ibmw.type == IB_MW_TYPE_2) { /* o10-37.2.30 */ if (unlikely(mw->state != RXE_MW_STATE_FREE)) { rxe_dbg_mw(mw, "attempt to bind a type 2 MW not in the free state\n"); return -EINVAL; } /* C10-72 */ if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) { rxe_dbg_mw(mw, "attempt to bind type 2 MW with qp with different PD\n"); return -EINVAL; } /* o10-37.2.40 */ if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { rxe_dbg_mw(mw, "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n"); return -EINVAL; } } /* remaining checks only apply to a nonzero MR */ if (!mr) return 0; if (unlikely(mr->access & IB_ZERO_BASED)) { rxe_dbg_mw(mw, "attempt to bind MW to zero based MR\n"); return -EINVAL; } /* C10-73 */ if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) { rxe_dbg_mw(mw, "attempt to bind an MW to an MR without bind access\n"); return -EINVAL; } /* C10-74 */ if (unlikely((access & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) && !(mr->access & IB_ACCESS_LOCAL_WRITE))) { rxe_dbg_mw(mw, "attempt to bind an Writable MW to an MR without local write access\n"); return -EINVAL; } /* C10-75 */ if (access & IB_ZERO_BASED) { if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { rxe_dbg_mw(mw, "attempt to bind a ZB MW outside of the MR\n"); return -EINVAL; } } else { if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > (mr->ibmr.iova + mr->ibmr.length)))) { rxe_dbg_mw(mw, "attempt to bind a VA MW outside of the MR\n"); return -EINVAL; } } return 0; } static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_mw *mw, struct rxe_mr *mr, int access) { u32 key = wqe->wr.wr.mw.rkey & 0xff; mw->rkey = (mw->rkey & ~0xff) | key; mw->access = access; mw->state = RXE_MW_STATE_VALID; mw->addr = wqe->wr.wr.mw.addr; mw->length = wqe->wr.wr.mw.length; if (mw->mr) { rxe_put(mw->mr); atomic_dec(&mw->mr->num_mw); mw->mr = NULL; } if (mw->length) { mw->mr = mr; atomic_inc(&mr->num_mw); rxe_get(mr); } if (mw->ibmw.type == IB_MW_TYPE_2) { rxe_get(qp); mw->qp = qp; } } int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) { int ret; struct rxe_mw *mw; struct rxe_mr *mr; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; int access = wqe->wr.wr.mw.access; mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); if (unlikely(!mw)) { ret = -EINVAL; goto err; } if (unlikely(mw->rkey != mw_rkey)) { ret = -EINVAL; goto err_drop_mw; } if (likely(wqe->wr.wr.mw.length)) { mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8); if (unlikely(!mr)) { ret = -EINVAL; goto err_drop_mw; } if (unlikely(mr->lkey != mr_lkey)) { ret = -EINVAL; goto err_drop_mr; } } else { mr = NULL; } if (access & ~RXE_ACCESS_SUPPORTED_MW) { rxe_err_mw(mw, "access %#x not supported", access); ret = -EOPNOTSUPP; goto err_drop_mr; } spin_lock_bh(&mw->lock); ret = rxe_check_bind_mw(qp, wqe, mw, mr, access); if (ret) goto err_unlock; rxe_do_bind_mw(qp, wqe, mw, mr, access); err_unlock: spin_unlock_bh(&mw->lock); err_drop_mr: if (mr) rxe_put(mr); err_drop_mw: rxe_put(mw); err: return ret; } static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw) { if (unlikely(mw->state == RXE_MW_STATE_INVALID)) return -EINVAL; /* o10-37.2.26 */ if (unlikely(mw->ibmw.type == IB_MW_TYPE_1)) return -EINVAL; return 0; } static void rxe_do_invalidate_mw(struct rxe_mw *mw) { struct rxe_qp *qp; struct rxe_mr *mr; /* valid type 2 MW will always have a QP pointer */ qp = mw->qp; mw->qp = NULL; rxe_put(qp); /* valid type 2 MW will always have an MR pointer */ mr = mw->mr; mw->mr = NULL; atomic_dec(&mr->num_mw); rxe_put(mr); mw->access = 0; mw->addr = 0; mw->length = 0; mw->state = RXE_MW_STATE_FREE; } int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_mw *mw; int ret; mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); if (!mw) { ret = -EINVAL; goto err; } if (rkey != mw->rkey) { ret = -EINVAL; goto err_drop_ref; } spin_lock_bh(&mw->lock); ret = rxe_check_invalidate_mw(qp, mw); if (ret) goto err_unlock; rxe_do_invalidate_mw(mw); err_unlock: spin_unlock_bh(&mw->lock); err_drop_ref: rxe_put(mw); err: return ret; } struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_pd *pd = to_rpd(qp->ibqp.pd); struct rxe_mw *mw; int index = rkey >> 8; mw = rxe_pool_get_index(&rxe->mw_pool, index); if (!mw) return NULL; if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd || (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) || (mw->length == 0) || ((access & mw->access) != access) || mw->state != RXE_MW_STATE_VALID)) { rxe_put(mw); return NULL; } return mw; } void rxe_mw_cleanup(struct rxe_pool_elem *elem) { struct rxe_mw *mw = container_of(elem, typeof(*mw), elem); struct rxe_pd *pd = to_rpd(mw->ibmw.pd); rxe_put(pd); if (mw->mr) { struct rxe_mr *mr = mw->mr; mw->mr = NULL; atomic_dec(&mr->num_mw); rxe_put(mr); } if (mw->qp) { struct rxe_qp *qp = mw->qp; mw->qp = NULL; rxe_put(qp); } mw->access = 0; mw->addr = 0; mw->length = 0; mw->state = RXE_MW_STATE_INVALID; }
linux-master
drivers/infiniband/sw/rxe/rxe_mw.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/errno.h> #include <rdma/uverbs_ioctl.h> #include "rxe.h" #include "rxe_loc.h" #include "rxe_queue.h" void rxe_mmap_release(struct kref *ref) { struct rxe_mmap_info *ip = container_of(ref, struct rxe_mmap_info, ref); struct rxe_dev *rxe = to_rdev(ip->context->device); spin_lock_bh(&rxe->pending_lock); if (!list_empty(&ip->pending_mmaps)) list_del(&ip->pending_mmaps); spin_unlock_bh(&rxe->pending_lock); vfree(ip->obj); /* buf */ kfree(ip); } /* * open and close keep track of how many times the memory region is mapped, * to avoid releasing it. */ static void rxe_vma_open(struct vm_area_struct *vma) { struct rxe_mmap_info *ip = vma->vm_private_data; kref_get(&ip->ref); } static void rxe_vma_close(struct vm_area_struct *vma) { struct rxe_mmap_info *ip = vma->vm_private_data; kref_put(&ip->ref, rxe_mmap_release); } static const struct vm_operations_struct rxe_vm_ops = { .open = rxe_vma_open, .close = rxe_vma_close, }; /** * rxe_mmap - create a new mmap region * @context: the IB user context of the process making the mmap() call * @vma: the VMA to be initialized * Return zero if the mmap is OK. Otherwise, return an errno. */ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct rxe_dev *rxe = to_rdev(context->device); unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; struct rxe_mmap_info *ip, *pp; int ret; /* * Search the device's list of objects waiting for a mmap call. * Normally, this list is very short since a call to create a * CQ, QP, or SRQ is soon followed by a call to mmap(). */ spin_lock_bh(&rxe->pending_lock); list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) { if (context != ip->context || (__u64)offset != ip->info.offset) continue; /* Don't allow a mmap larger than the object. */ if (size > ip->info.size) { rxe_dbg_dev(rxe, "mmap region is larger than the object!\n"); spin_unlock_bh(&rxe->pending_lock); ret = -EINVAL; goto done; } goto found_it; } rxe_dbg_dev(rxe, "unable to find pending mmap info\n"); spin_unlock_bh(&rxe->pending_lock); ret = -EINVAL; goto done; found_it: list_del_init(&ip->pending_mmaps); spin_unlock_bh(&rxe->pending_lock); ret = remap_vmalloc_range(vma, ip->obj, 0); if (ret) { rxe_dbg_dev(rxe, "err %d from remap_vmalloc_range\n", ret); goto done; } vma->vm_ops = &rxe_vm_ops; vma->vm_private_data = ip; rxe_vma_open(vma); done: return ret; } /* * Allocate information for rxe_mmap */ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size, struct ib_udata *udata, void *obj) { struct rxe_mmap_info *ip; if (!udata) return ERR_PTR(-EINVAL); ip = kmalloc(sizeof(*ip), GFP_KERNEL); if (!ip) return ERR_PTR(-ENOMEM); size = PAGE_ALIGN(size); spin_lock_bh(&rxe->mmap_offset_lock); if (rxe->mmap_offset == 0) rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); ip->info.offset = rxe->mmap_offset; rxe->mmap_offset += ALIGN(size, SHMLBA); spin_unlock_bh(&rxe->mmap_offset_lock); INIT_LIST_HEAD(&ip->pending_mmaps); ip->info.size = size; ip->context = container_of(udata, struct uverbs_attr_bundle, driver_udata) ->context; ip->obj = obj; kref_init(&ip->ref); return ip; }
linux-master
drivers/infiniband/sw/rxe/rxe_mmap.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 Intel Corporation. */ #define CREATE_TRACE_POINTS #include "trace.h"
linux-master
drivers/infiniband/sw/rdmavt/trace.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 Intel Corporation. */ #include <rdma/ib_mad.h> #include "mad.h" #include "vt.h" /** * rvt_process_mad - process an incoming MAD packet * @ibdev: the infiniband device this packet came in on * @mad_flags: MAD flags * @port_num: the port number this packet came in on, 1 based from ib core * @in_wc: the work completion entry for this packet * @in_grh: the global route header for this packet * @in: the incoming MAD * @in_mad_size: size of the incoming MAD reply * @out: any outgoing MAD reply * @out_mad_size: size of the outgoing MAD reply * @out_mad_pkey_index: unused * * Note that the verbs framework has already done the MAD sanity checks, * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE * MADs. * * This is called by the ib_mad module. * * Return: IB_MAD_RESULT_SUCCESS or error */ int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad_hdr *in, size_t in_mad_size, struct ib_mad_hdr *out, size_t *out_mad_size, u16 *out_mad_pkey_index) { /* * MAD processing is quite different between hfi1 and qib. Therefore * this is expected to be provided by the driver. Other drivers in the * future may choose to implement this but it should not be made into a * requirement. */ return IB_MAD_RESULT_FAILURE; } static void rvt_send_mad_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { ib_free_send_mad(mad_send_wc->send_buf); } /** * rvt_create_mad_agents - create mad agents * @rdi: rvt dev struct * * If driver needs to be notified of mad agent creation then call back * * Return 0 on success */ int rvt_create_mad_agents(struct rvt_dev_info *rdi) { struct ib_mad_agent *agent; struct rvt_ibport *rvp; int p; int ret; for (p = 0; p < rdi->dparms.nports; p++) { rvp = rdi->ports[p]; agent = ib_register_mad_agent(&rdi->ibdev, p + 1, IB_QPT_SMI, NULL, 0, rvt_send_mad_handler, NULL, NULL, 0); if (IS_ERR(agent)) { ret = PTR_ERR(agent); goto err; } rvp->send_agent = agent; if (rdi->driver_f.notify_create_mad_agent) rdi->driver_f.notify_create_mad_agent(rdi, p); } return 0; err: for (p = 0; p < rdi->dparms.nports; p++) { rvp = rdi->ports[p]; if (rvp->send_agent) { agent = rvp->send_agent; rvp->send_agent = NULL; ib_unregister_mad_agent(agent); if (rdi->driver_f.notify_free_mad_agent) rdi->driver_f.notify_free_mad_agent(rdi, p); } } return ret; } /** * rvt_free_mad_agents - free up mad agents * @rdi: rvt dev struct * * If driver needs notification of mad agent removal make the call back */ void rvt_free_mad_agents(struct rvt_dev_info *rdi) { struct ib_mad_agent *agent; struct rvt_ibport *rvp; int p; for (p = 0; p < rdi->dparms.nports; p++) { rvp = rdi->ports[p]; if (rvp->send_agent) { agent = rvp->send_agent; rvp->send_agent = NULL; ib_unregister_mad_agent(agent); } if (rvp->sm_ah) { rdma_destroy_ah(&rvp->sm_ah->ibah, RDMA_DESTROY_AH_SLEEPABLE); rvp->sm_ah = NULL; } if (rdi->driver_f.notify_free_mad_agent) rdi->driver_f.notify_free_mad_agent(rdi, p); } }
linux-master
drivers/infiniband/sw/rdmavt/mad.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 Intel Corporation. */ #include <rdma/rdmavt_qp.h> #include <rdma/ib_hdrs.h> /* * Convert the AETH credit code into the number of credits. */ static const u16 credit_table[31] = { 0, /* 0 */ 1, /* 1 */ 2, /* 2 */ 3, /* 3 */ 4, /* 4 */ 6, /* 5 */ 8, /* 6 */ 12, /* 7 */ 16, /* 8 */ 24, /* 9 */ 32, /* A */ 48, /* B */ 64, /* C */ 96, /* D */ 128, /* E */ 192, /* F */ 256, /* 10 */ 384, /* 11 */ 512, /* 12 */ 768, /* 13 */ 1024, /* 14 */ 1536, /* 15 */ 2048, /* 16 */ 3072, /* 17 */ 4096, /* 18 */ 6144, /* 19 */ 8192, /* 1A */ 12288, /* 1B */ 16384, /* 1C */ 24576, /* 1D */ 32768 /* 1E */ }; /** * rvt_compute_aeth - compute the AETH (syndrome + MSN) * @qp: the queue pair to compute the AETH for * * Returns the AETH. */ __be32 rvt_compute_aeth(struct rvt_qp *qp) { u32 aeth = qp->r_msn & IB_MSN_MASK; if (qp->ibqp.srq) { /* * Shared receive queues don't generate credits. * Set the credit field to the invalid value. */ aeth |= IB_AETH_CREDIT_INVAL << IB_AETH_CREDIT_SHIFT; } else { u32 min, max, x; u32 credits; u32 head; u32 tail; credits = READ_ONCE(qp->r_rq.kwq->count); if (credits == 0) { /* sanity check pointers before trusting them */ if (qp->ip) { head = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->head); tail = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->tail); } else { head = READ_ONCE(qp->r_rq.kwq->head); tail = READ_ONCE(qp->r_rq.kwq->tail); } if (head >= qp->r_rq.size) head = 0; if (tail >= qp->r_rq.size) tail = 0; /* * Compute the number of credits available (RWQEs). * There is a small chance that the pair of reads are * not atomic, which is OK, since the fuzziness is * resolved as further ACKs go out. */ credits = rvt_get_rq_count(&qp->r_rq, head, tail); } /* * Binary search the credit table to find the code to * use. */ min = 0; max = 31; for (;;) { x = (min + max) / 2; if (credit_table[x] == credits) break; if (credit_table[x] > credits) { max = x; } else { if (min == x) break; min = x; } } aeth |= x << IB_AETH_CREDIT_SHIFT; } return cpu_to_be32(aeth); } EXPORT_SYMBOL(rvt_compute_aeth); /** * rvt_get_credit - flush the send work queue of a QP * @qp: the qp who's send work queue to flush * @aeth: the Acknowledge Extended Transport Header * * The QP s_lock should be held. */ void rvt_get_credit(struct rvt_qp *qp, u32 aeth) { struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); u32 credit = (aeth >> IB_AETH_CREDIT_SHIFT) & IB_AETH_CREDIT_MASK; lockdep_assert_held(&qp->s_lock); /* * If the credit is invalid, we can send * as many packets as we like. Otherwise, we have to * honor the credit field. */ if (credit == IB_AETH_CREDIT_INVAL) { if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { qp->s_flags |= RVT_S_UNLIMITED_CREDIT; if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; rdi->driver_f.schedule_send(qp); } } } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { /* Compute new LSN (i.e., MSN + credit) */ credit = (aeth + credit_table[credit]) & IB_MSN_MASK; if (rvt_cmp_msn(credit, qp->s_lsn) > 0) { qp->s_lsn = credit; if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; rdi->driver_f.schedule_send(qp); } } } } EXPORT_SYMBOL(rvt_get_credit); /** * rvt_restart_sge - rewind the sge state for a wqe * @ss: the sge state pointer * @wqe: the wqe to rewind * @len: the data length from the start of the wqe in bytes * * Returns the remaining data length. */ u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len) { ss->sge = wqe->sg_list[0]; ss->sg_list = wqe->sg_list + 1; ss->num_sge = wqe->wr.num_sge; ss->total_len = wqe->length; rvt_skip_sge(ss, len, false); return wqe->length - len; } EXPORT_SYMBOL(rvt_restart_sge);
linux-master
drivers/infiniband/sw/rdmavt/rc.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 Intel Corporation. */ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <rdma/uverbs_ioctl.h> #include "mmap.h" /** * rvt_mmap_init - init link list and lock for mem map * @rdi: rvt dev struct */ void rvt_mmap_init(struct rvt_dev_info *rdi) { INIT_LIST_HEAD(&rdi->pending_mmaps); spin_lock_init(&rdi->pending_lock); rdi->mmap_offset = PAGE_SIZE; spin_lock_init(&rdi->mmap_offset_lock); } /** * rvt_release_mmap_info - free mmap info structure * @ref: a pointer to the kref within struct rvt_mmap_info */ void rvt_release_mmap_info(struct kref *ref) { struct rvt_mmap_info *ip = container_of(ref, struct rvt_mmap_info, ref); struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device); spin_lock_irq(&rdi->pending_lock); list_del(&ip->pending_mmaps); spin_unlock_irq(&rdi->pending_lock); vfree(ip->obj); kfree(ip); } static void rvt_vma_open(struct vm_area_struct *vma) { struct rvt_mmap_info *ip = vma->vm_private_data; kref_get(&ip->ref); } static void rvt_vma_close(struct vm_area_struct *vma) { struct rvt_mmap_info *ip = vma->vm_private_data; kref_put(&ip->ref, rvt_release_mmap_info); } static const struct vm_operations_struct rvt_vm_ops = { .open = rvt_vma_open, .close = rvt_vma_close, }; /** * rvt_mmap - create a new mmap region * @context: the IB user context of the process making the mmap() call * @vma: the VMA to be initialized * * Return: zero if the mmap is OK. Otherwise, return an errno. */ int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct rvt_dev_info *rdi = ib_to_rvt(context->device); unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; struct rvt_mmap_info *ip, *pp; int ret = -EINVAL; /* * Search the device's list of objects waiting for a mmap call. * Normally, this list is very short since a call to create a * CQ, QP, or SRQ is soon followed by a call to mmap(). */ spin_lock_irq(&rdi->pending_lock); list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps, pending_mmaps) { /* Only the creator is allowed to mmap the object */ if (context != ip->context || (__u64)offset != ip->offset) continue; /* Don't allow a mmap larger than the object. */ if (size > ip->size) break; list_del_init(&ip->pending_mmaps); spin_unlock_irq(&rdi->pending_lock); ret = remap_vmalloc_range(vma, ip->obj, 0); if (ret) goto done; vma->vm_ops = &rvt_vm_ops; vma->vm_private_data = ip; rvt_vma_open(vma); goto done; } spin_unlock_irq(&rdi->pending_lock); done: return ret; } /** * rvt_create_mmap_info - allocate information for hfi1_mmap * @rdi: rvt dev struct * @size: size in bytes to map * @udata: user data (must be valid!) * @obj: opaque pointer to a cq, wq etc * * Return: rvt_mmap struct on success, ERR_PTR on failure */ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, struct ib_udata *udata, void *obj) { struct rvt_mmap_info *ip; if (!udata) return ERR_PTR(-EINVAL); ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node); if (!ip) return ERR_PTR(-ENOMEM); size = PAGE_ALIGN(size); spin_lock_irq(&rdi->mmap_offset_lock); if (rdi->mmap_offset == 0) rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); ip->offset = rdi->mmap_offset; rdi->mmap_offset += ALIGN(size, SHMLBA); spin_unlock_irq(&rdi->mmap_offset_lock); INIT_LIST_HEAD(&ip->pending_mmaps); ip->size = size; ip->context = container_of(udata, struct uverbs_attr_bundle, driver_udata) ->context; ip->obj = obj; kref_init(&ip->ref); return ip; } /** * rvt_update_mmap_info - update a mem map * @rdi: rvt dev struct * @ip: mmap info pointer * @size: size to grow by * @obj: opaque pointer to cq, wq, etc. */ void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip, u32 size, void *obj) { size = PAGE_ALIGN(size); spin_lock_irq(&rdi->mmap_offset_lock); if (rdi->mmap_offset == 0) rdi->mmap_offset = PAGE_SIZE; ip->offset = rdi->mmap_offset; rdi->mmap_offset += size; spin_unlock_irq(&rdi->mmap_offset_lock); ip->size = size; ip->obj = obj; }
linux-master
drivers/infiniband/sw/rdmavt/mmap.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 - 2020 Intel Corporation. */ #include <linux/hash.h> #include <linux/bitops.h> #include <linux/lockdep.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <rdma/ib_verbs.h> #include <rdma/ib_hdrs.h> #include <rdma/opa_addr.h> #include <rdma/uverbs_ioctl.h> #include "qp.h" #include "vt.h" #include "trace.h" #define RVT_RWQ_COUNT_THRESHOLD 16 static void rvt_rc_timeout(struct timer_list *t); static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type); /* * Convert the AETH RNR timeout code into the number of microseconds. */ static const u32 ib_rvt_rnr_table[32] = { 655360, /* 00: 655.36 */ 10, /* 01: .01 */ 20, /* 02 .02 */ 30, /* 03: .03 */ 40, /* 04: .04 */ 60, /* 05: .06 */ 80, /* 06: .08 */ 120, /* 07: .12 */ 160, /* 08: .16 */ 240, /* 09: .24 */ 320, /* 0A: .32 */ 480, /* 0B: .48 */ 640, /* 0C: .64 */ 960, /* 0D: .96 */ 1280, /* 0E: 1.28 */ 1920, /* 0F: 1.92 */ 2560, /* 10: 2.56 */ 3840, /* 11: 3.84 */ 5120, /* 12: 5.12 */ 7680, /* 13: 7.68 */ 10240, /* 14: 10.24 */ 15360, /* 15: 15.36 */ 20480, /* 16: 20.48 */ 30720, /* 17: 30.72 */ 40960, /* 18: 40.96 */ 61440, /* 19: 61.44 */ 81920, /* 1A: 81.92 */ 122880, /* 1B: 122.88 */ 163840, /* 1C: 163.84 */ 245760, /* 1D: 245.76 */ 327680, /* 1E: 327.68 */ 491520 /* 1F: 491.52 */ }; /* * Note that it is OK to post send work requests in the SQE and ERR * states; rvt_do_send() will process them and generate error * completions as per IB 1.2 C10-96. */ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { [IB_QPS_RESET] = 0, [IB_QPS_INIT] = RVT_POST_RECV_OK, [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK, [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK | RVT_PROCESS_NEXT_SEND_OK, [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK, [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | RVT_POST_SEND_OK | RVT_FLUSH_SEND, [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV | RVT_POST_SEND_OK | RVT_FLUSH_SEND, }; EXPORT_SYMBOL(ib_rvt_state_ops); /* platform specific: return the last level cache (llc) size, in KiB */ static int rvt_wss_llc_size(void) { /* assume that the boot CPU value is universal for all CPUs */ return boot_cpu_data.x86_cache_size; } /* platform specific: cacheless copy */ static void cacheless_memcpy(void *dst, void *src, size_t n) { /* * Use the only available X64 cacheless copy. Add a __user cast * to quiet sparse. The src agument is already in the kernel so * there are no security issues. The extra fault recovery machinery * is not invoked. */ __copy_user_nocache(dst, (void __user *)src, n); } void rvt_wss_exit(struct rvt_dev_info *rdi) { struct rvt_wss *wss = rdi->wss; if (!wss) return; /* coded to handle partially initialized and repeat callers */ kfree(wss->entries); wss->entries = NULL; kfree(rdi->wss); rdi->wss = NULL; } /* * rvt_wss_init - Init wss data structures * * Return: 0 on success */ int rvt_wss_init(struct rvt_dev_info *rdi) { unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode; unsigned int wss_threshold = rdi->dparms.wss_threshold; unsigned int wss_clean_period = rdi->dparms.wss_clean_period; long llc_size; long llc_bits; long table_size; long table_bits; struct rvt_wss *wss; int node = rdi->dparms.node; if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) { rdi->wss = NULL; return 0; } rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node); if (!rdi->wss) return -ENOMEM; wss = rdi->wss; /* check for a valid percent range - default to 80 if none or invalid */ if (wss_threshold < 1 || wss_threshold > 100) wss_threshold = 80; /* reject a wildly large period */ if (wss_clean_period > 1000000) wss_clean_period = 256; /* reject a zero period */ if (wss_clean_period == 0) wss_clean_period = 1; /* * Calculate the table size - the next power of 2 larger than the * LLC size. LLC size is in KiB. */ llc_size = rvt_wss_llc_size() * 1024; table_size = roundup_pow_of_two(llc_size); /* one bit per page in rounded up table */ llc_bits = llc_size / PAGE_SIZE; table_bits = table_size / PAGE_SIZE; wss->pages_mask = table_bits - 1; wss->num_entries = table_bits / BITS_PER_LONG; wss->threshold = (llc_bits * wss_threshold) / 100; if (wss->threshold == 0) wss->threshold = 1; wss->clean_period = wss_clean_period; atomic_set(&wss->clean_counter, wss_clean_period); wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries), GFP_KERNEL, node); if (!wss->entries) { rvt_wss_exit(rdi); return -ENOMEM; } return 0; } /* * Advance the clean counter. When the clean period has expired, * clean an entry. * * This is implemented in atomics to avoid locking. Because multiple * variables are involved, it can be racy which can lead to slightly * inaccurate information. Since this is only a heuristic, this is * OK. Any innaccuracies will clean themselves out as the counter * advances. That said, it is unlikely the entry clean operation will * race - the next possible racer will not start until the next clean * period. * * The clean counter is implemented as a decrement to zero. When zero * is reached an entry is cleaned. */ static void wss_advance_clean_counter(struct rvt_wss *wss) { int entry; int weight; unsigned long bits; /* become the cleaner if we decrement the counter to zero */ if (atomic_dec_and_test(&wss->clean_counter)) { /* * Set, not add, the clean period. This avoids an issue * where the counter could decrement below the clean period. * Doing a set can result in lost decrements, slowing the * clean advance. Since this a heuristic, this possible * slowdown is OK. * * An alternative is to loop, advancing the counter by a * clean period until the result is > 0. However, this could * lead to several threads keeping another in the clean loop. * This could be mitigated by limiting the number of times * we stay in the loop. */ atomic_set(&wss->clean_counter, wss->clean_period); /* * Uniquely grab the entry to clean and move to next. * The current entry is always the lower bits of * wss.clean_entry. The table size, wss.num_entries, * is always a power-of-2. */ entry = (atomic_inc_return(&wss->clean_entry) - 1) & (wss->num_entries - 1); /* clear the entry and count the bits */ bits = xchg(&wss->entries[entry], 0); weight = hweight64((u64)bits); /* only adjust the contended total count if needed */ if (weight) atomic_sub(weight, &wss->total_count); } } /* * Insert the given address into the working set array. */ static void wss_insert(struct rvt_wss *wss, void *address) { u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask; u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */ u32 nr = page & (BITS_PER_LONG - 1); if (!test_and_set_bit(nr, &wss->entries[entry])) atomic_inc(&wss->total_count); wss_advance_clean_counter(wss); } /* * Is the working set larger than the threshold? */ static inline bool wss_exceeds_threshold(struct rvt_wss *wss) { return atomic_read(&wss->total_count) >= wss->threshold; } static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map) { unsigned long page = get_zeroed_page(GFP_KERNEL); /* * Free the page if someone raced with us installing it. */ spin_lock(&qpt->lock); if (map->page) free_page(page); else map->page = (void *)page; spin_unlock(&qpt->lock); } /** * init_qpn_table - initialize the QP number table for a device * @rdi: rvt dev struct * @qpt: the QPN table */ static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) { u32 offset, i; struct rvt_qpn_map *map; int ret = 0; if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start)) return -EINVAL; spin_lock_init(&qpt->lock); qpt->last = rdi->dparms.qpn_start; qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift; /* * Drivers may want some QPs beyond what we need for verbs let them use * our qpn table. No need for two. Lets go ahead and mark the bitmaps * for those. The reserved range must be *after* the range which verbs * will pick from. */ /* Figure out number of bit maps needed before reserved range */ qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE; /* This should always be zero */ offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK; /* Starting with the first reserved bit map */ map = &qpt->map[qpt->nmaps]; rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n", rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { if (!map->page) { get_map_page(qpt, map); if (!map->page) { ret = -ENOMEM; break; } } set_bit(offset, map->page); offset++; if (offset == RVT_BITS_PER_PAGE) { /* next page */ qpt->nmaps++; map++; offset = 0; } } return ret; } /** * free_qpn_table - free the QP number table for a device * @qpt: the QPN table */ static void free_qpn_table(struct rvt_qpn_table *qpt) { int i; for (i = 0; i < ARRAY_SIZE(qpt->map); i++) free_page((unsigned long)qpt->map[i].page); } /** * rvt_driver_qp_init - Init driver qp resources * @rdi: rvt dev strucutre * * Return: 0 on success */ int rvt_driver_qp_init(struct rvt_dev_info *rdi) { int i; int ret = -ENOMEM; if (!rdi->dparms.qp_table_size) return -EINVAL; /* * If driver is not doing any QP allocation then make sure it is * providing the necessary QP functions. */ if (!rdi->driver_f.free_all_qps || !rdi->driver_f.qp_priv_alloc || !rdi->driver_f.qp_priv_free || !rdi->driver_f.notify_qp_reset || !rdi->driver_f.notify_restart_rc) return -EINVAL; /* allocate parent object */ rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL, rdi->dparms.node); if (!rdi->qp_dev) return -ENOMEM; /* allocate hash table */ rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size; rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size); rdi->qp_dev->qp_table = kmalloc_array_node(rdi->qp_dev->qp_table_size, sizeof(*rdi->qp_dev->qp_table), GFP_KERNEL, rdi->dparms.node); if (!rdi->qp_dev->qp_table) goto no_qp_table; for (i = 0; i < rdi->qp_dev->qp_table_size; i++) RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); spin_lock_init(&rdi->qp_dev->qpt_lock); /* initialize qpn map */ if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table)) goto fail_table; spin_lock_init(&rdi->n_qps_lock); return 0; fail_table: kfree(rdi->qp_dev->qp_table); free_qpn_table(&rdi->qp_dev->qpn_table); no_qp_table: kfree(rdi->qp_dev); return ret; } /** * rvt_free_qp_cb - callback function to reset a qp * @qp: the qp to reset * @v: a 64-bit value * * This function resets the qp and removes it from the * qp hash table. */ static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v) { unsigned int *qp_inuse = (unsigned int *)v; struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); /* Reset the qp and remove it from the qp hash list */ rvt_reset_qp(rdi, qp, qp->ibqp.qp_type); /* Increment the qp_inuse count */ (*qp_inuse)++; } /** * rvt_free_all_qps - check for QPs still in use * @rdi: rvt device info structure * * There should not be any QPs still in use. * Free memory for table. * Return the number of QPs still in use. */ static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) { unsigned int qp_inuse = 0; qp_inuse += rvt_mcast_tree_empty(rdi); rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb); return qp_inuse; } /** * rvt_qp_exit - clean up qps on device exit * @rdi: rvt dev structure * * Check for qp leaks and free resources. */ void rvt_qp_exit(struct rvt_dev_info *rdi) { u32 qps_inuse = rvt_free_all_qps(rdi); if (qps_inuse) rvt_pr_err(rdi, "QP memory leak! %u still in use\n", qps_inuse); kfree(rdi->qp_dev->qp_table); free_qpn_table(&rdi->qp_dev->qpn_table); kfree(rdi->qp_dev); } static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, unsigned off) { return (map - qpt->map) * RVT_BITS_PER_PAGE + off; } /** * alloc_qpn - Allocate the next available qpn or zero/one for QP type * IB_QPT_SMI/IB_QPT_GSI * @rdi: rvt device info structure * @qpt: queue pair number table pointer * @type: the QP type * @port_num: IB port number, 1 based, comes from core * @exclude_prefix: prefix of special queue pair number being allocated * * Return: The queue pair number */ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, enum ib_qp_type type, u8 port_num, u8 exclude_prefix) { u32 i, offset, max_scan, qpn; struct rvt_qpn_map *map; u32 ret; u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ? RVT_AIP_QPN_MAX : RVT_QPN_MAX; if (rdi->driver_f.alloc_qpn) return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num); if (type == IB_QPT_SMI || type == IB_QPT_GSI) { unsigned n; ret = type == IB_QPT_GSI; n = 1 << (ret + 2 * (port_num - 1)); spin_lock(&qpt->lock); if (qpt->flags & n) ret = -EINVAL; else qpt->flags |= n; spin_unlock(&qpt->lock); goto bail; } qpn = qpt->last + qpt->incr; if (qpn >= max_qpn) qpn = qpt->incr | ((qpt->last & 1) ^ 1); /* offset carries bit 0 */ offset = qpn & RVT_BITS_PER_PAGE_MASK; map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { get_map_page(qpt, map); if (unlikely(!map->page)) break; } do { if (!test_and_set_bit(offset, map->page)) { qpt->last = qpn; ret = qpn; goto bail; } offset += qpt->incr; /* * This qpn might be bogus if offset >= BITS_PER_PAGE. * That is OK. It gets re-assigned below */ qpn = mk_qpn(qpt, map, offset); } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); /* * In order to keep the number of pages allocated to a * minimum, we scan the all existing pages before increasing * the size of the bitmap table. */ if (++i > max_scan) { if (qpt->nmaps == RVT_QPNMAP_ENTRIES) break; map = &qpt->map[qpt->nmaps++]; /* start at incr with current bit 0 */ offset = qpt->incr | (offset & 1); } else if (map < &qpt->map[qpt->nmaps]) { ++map; /* start at incr with current bit 0 */ offset = qpt->incr | (offset & 1); } else { map = &qpt->map[0]; /* wrap to first map page, invert bit 0 */ offset = qpt->incr | ((offset & 1) ^ 1); } /* there can be no set bits in low-order QoS bits */ WARN_ON(rdi->dparms.qos_shift > 1 && offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1)); qpn = mk_qpn(qpt, map, offset); } ret = -ENOMEM; bail: return ret; } /** * rvt_clear_mr_refs - Drop help mr refs * @qp: rvt qp data structure * @clr_sends: If shoudl clear send side or not */ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) { unsigned n; struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) rvt_put_ss(&qp->s_rdma_read_sge); rvt_put_ss(&qp->r_sge); if (clr_sends) { while (qp->s_last != qp->s_head) { struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); rvt_put_qp_swqe(qp, wqe); if (++qp->s_last >= qp->s_size) qp->s_last = 0; smp_wmb(); /* see qp_set_savail */ } if (qp->s_rdma_mr) { rvt_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } } for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) { struct rvt_ack_entry *e = &qp->s_ack_queue[n]; if (e->rdma_sge.mr) { rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } } } /** * rvt_swqe_has_lkey - return true if lkey is used by swqe * @wqe: the send wqe * @lkey: the lkey * * Test the swqe for using lkey */ static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey) { int i; for (i = 0; i < wqe->wr.num_sge; i++) { struct rvt_sge *sge = &wqe->sg_list[i]; if (rvt_mr_has_lkey(sge->mr, lkey)) return true; } return false; } /** * rvt_qp_sends_has_lkey - return true is qp sends use lkey * @qp: the rvt_qp * @lkey: the lkey */ static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey) { u32 s_last = qp->s_last; while (s_last != qp->s_head) { struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last); if (rvt_swqe_has_lkey(wqe, lkey)) return true; if (++s_last >= qp->s_size) s_last = 0; } if (qp->s_rdma_mr) if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey)) return true; return false; } /** * rvt_qp_acks_has_lkey - return true if acks have lkey * @qp: the qp * @lkey: the lkey */ static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey) { int i; struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) { struct rvt_ack_entry *e = &qp->s_ack_queue[i]; if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey)) return true; } return false; } /** * rvt_qp_mr_clean - clean up remote ops for lkey * @qp: the qp * @lkey: the lkey that is being de-registered * * This routine checks if the lkey is being used by * the qp. * * If so, the qp is put into an error state to elminate * any references from the qp. */ void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey) { bool lastwqe = false; if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) /* avoid special QPs */ return; spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_hlock); spin_lock(&qp->s_lock); if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) goto check_lwqe; if (rvt_ss_has_lkey(&qp->r_sge, lkey) || rvt_qp_sends_has_lkey(qp, lkey) || rvt_qp_acks_has_lkey(qp, lkey)) lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR); check_lwqe: spin_unlock(&qp->s_lock); spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); if (lastwqe) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } } /** * rvt_remove_qp - remove qp form table * @rdi: rvt dev struct * @qp: qp to remove * * Remove the QP from the table so it can't be found asynchronously by * the receive routine. */ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); unsigned long flags; int removed = 1; spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); if (rcu_dereference_protected(rvp->qp[0], lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { RCU_INIT_POINTER(rvp->qp[0], NULL); } else if (rcu_dereference_protected(rvp->qp[1], lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { RCU_INIT_POINTER(rvp->qp[1], NULL); } else { struct rvt_qp *q; struct rvt_qp __rcu **qpp; removed = 0; qpp = &rdi->qp_dev->qp_table[n]; for (; (q = rcu_dereference_protected(*qpp, lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; qpp = &q->next) { if (q == qp) { RCU_INIT_POINTER(*qpp, rcu_dereference_protected(qp->next, lockdep_is_held(&rdi->qp_dev->qpt_lock))); removed = 1; trace_rvt_qpremove(qp, n); break; } } } spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); if (removed) { synchronize_rcu(); rvt_put_qp(qp); } } /** * rvt_alloc_rq - allocate memory for user or kernel buffer * @rq: receive queue data structure * @size: number of request queue entries * @node: The NUMA node * @udata: True if user data is available or not false * * Return: If memory allocation failed, return -ENONEM * This function is used by both shared receive * queues and non-shared receive queues to allocate * memory. */ int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node, struct ib_udata *udata) { if (udata) { rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size); if (!rq->wq) goto bail; /* need kwq with no buffers */ rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node); if (!rq->kwq) goto bail; rq->kwq->curr_wq = rq->wq->wq; } else { /* need kwq with buffers */ rq->kwq = vzalloc_node(sizeof(struct rvt_krwq) + size, node); if (!rq->kwq) goto bail; rq->kwq->curr_wq = rq->kwq->wq; } spin_lock_init(&rq->kwq->p_lock); spin_lock_init(&rq->kwq->c_lock); return 0; bail: rvt_free_rq(rq); return -ENOMEM; } /** * rvt_init_qp - initialize the QP state to the reset state * @rdi: rvt dev struct * @qp: the QP to init or reinit * @type: the QP type * * This function is called from both rvt_create_qp() and * rvt_reset_qp(). The difference is that the reset * patch the necessary locks to protect against concurent * access. */ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type) { qp->remote_qpn = 0; qp->qkey = 0; qp->qp_access_flags = 0; qp->s_flags &= RVT_S_SIGNAL_REQ_WR; qp->s_hdrwords = 0; qp->s_wqe = NULL; qp->s_draining = 0; qp->s_next_psn = 0; qp->s_last_psn = 0; qp->s_sending_psn = 0; qp->s_sending_hpsn = 0; qp->s_psn = 0; qp->r_psn = 0; qp->r_msn = 0; if (type == IB_QPT_RC) { qp->s_state = IB_OPCODE_RC_SEND_LAST; qp->r_state = IB_OPCODE_RC_SEND_LAST; } else { qp->s_state = IB_OPCODE_UC_SEND_LAST; qp->r_state = IB_OPCODE_UC_SEND_LAST; } qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; qp->r_nak_state = 0; qp->r_aflags = 0; qp->r_flags = 0; qp->s_head = 0; qp->s_tail = 0; qp->s_cur = 0; qp->s_acked = 0; qp->s_last = 0; qp->s_ssn = 1; qp->s_lsn = 0; qp->s_mig_state = IB_MIG_MIGRATED; qp->r_head_ack_queue = 0; qp->s_tail_ack_queue = 0; qp->s_acked_ack_queue = 0; qp->s_num_rd_atomic = 0; qp->r_sge.num_sge = 0; atomic_set(&qp->s_reserved_used, 0); } /** * _rvt_reset_qp - initialize the QP state to the reset state * @rdi: rvt dev struct * @qp: the QP to reset * @type: the QP type * * r_lock, s_hlock, and s_lock are required to be held by the caller */ static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type) __must_hold(&qp->s_lock) __must_hold(&qp->s_hlock) __must_hold(&qp->r_lock) { lockdep_assert_held(&qp->r_lock); lockdep_assert_held(&qp->s_hlock); lockdep_assert_held(&qp->s_lock); if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; /* Let drivers flush their waitlist */ rdi->driver_f.flush_qp_waiters(qp); rvt_stop_rc_timers(qp); qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); spin_unlock(&qp->s_lock); spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); /* Stop the send queue and the retry timer */ rdi->driver_f.stop_send_queue(qp); rvt_del_timers_sync(qp); /* Wait for things to stop */ rdi->driver_f.quiesce_qp(qp); /* take qp out the hash and wait for it to be unused */ rvt_remove_qp(rdi, qp); /* grab the lock b/c it was locked at call time */ spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_hlock); spin_lock(&qp->s_lock); rvt_clear_mr_refs(qp, 1); /* * Let the driver do any tear down or re-init it needs to for * a qp that has been reset */ rdi->driver_f.notify_qp_reset(qp); } rvt_init_qp(rdi, qp, type); lockdep_assert_held(&qp->r_lock); lockdep_assert_held(&qp->s_hlock); lockdep_assert_held(&qp->s_lock); } /** * rvt_reset_qp - initialize the QP state to the reset state * @rdi: the device info * @qp: the QP to reset * @type: the QP type * * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock * before calling _rvt_reset_qp(). */ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type) { spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_hlock); spin_lock(&qp->s_lock); _rvt_reset_qp(rdi, qp, type); spin_unlock(&qp->s_lock); spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); } /** * rvt_free_qpn - Free a qpn from the bit map * @qpt: QP table * @qpn: queue pair number to free */ static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) { struct rvt_qpn_map *map; if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE) qpn &= RVT_AIP_QP_SUFFIX; map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE; if (map->page) clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); } /** * get_allowed_ops - Given a QP type return the appropriate allowed OP * @type: valid, supported, QP type */ static u8 get_allowed_ops(enum ib_qp_type type) { return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ? IB_OPCODE_UC : IB_OPCODE_UD; } /** * free_ud_wq_attr - Clean up AH attribute cache for UD QPs * @qp: Valid QP with allowed_ops set * * The rvt_swqe data structure being used is a union, so this is * only valid for UD QPs. */ static void free_ud_wq_attr(struct rvt_qp *qp) { struct rvt_swqe *wqe; int i; for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) { wqe = rvt_get_swqe_ptr(qp, i); kfree(wqe->ud_wr.attr); wqe->ud_wr.attr = NULL; } } /** * alloc_ud_wq_attr - AH attribute cache for UD QPs * @qp: Valid QP with allowed_ops set * @node: Numa node for allocation * * The rvt_swqe data structure being used is a union, so this is * only valid for UD QPs. */ static int alloc_ud_wq_attr(struct rvt_qp *qp, int node) { struct rvt_swqe *wqe; int i; for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) { wqe = rvt_get_swqe_ptr(qp, i); wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr), GFP_KERNEL, node); if (!wqe->ud_wr.attr) { free_ud_wq_attr(qp); return -ENOMEM; } } return 0; } /** * rvt_create_qp - create a queue pair for a device * @ibqp: the queue pair * @init_attr: the attributes of the queue pair * @udata: user data for libibverbs.so * * Queue pair creation is mostly an rvt issue. However, drivers have their own * unique idea of what queue pair numbers mean. For instance there is a reserved * range for PSM. * * Return: 0 on success, otherwise returns an errno. * * Called by the ib_create_qp() core verbs function. */ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); int ret = -ENOMEM; struct rvt_swqe *swq = NULL; size_t sz; size_t sg_list_sz = 0; struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); void *priv = NULL; size_t sqsize; u8 exclude_prefix = 0; if (!rdi) return -EINVAL; if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE) return -EOPNOTSUPP; if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge || init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr) return -EINVAL; /* Check receive queue parameters if no SRQ is specified. */ if (!init_attr->srq) { if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_recv_sge || init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) return -EINVAL; if (init_attr->cap.max_send_sge + init_attr->cap.max_send_wr + init_attr->cap.max_recv_sge + init_attr->cap.max_recv_wr == 0) return -EINVAL; } sqsize = init_attr->cap.max_send_wr + 1 + rdi->dparms.reserved_operations; switch (init_attr->qp_type) { case IB_QPT_SMI: case IB_QPT_GSI: if (init_attr->port_num == 0 || init_attr->port_num > ibqp->device->phys_port_cnt) return -EINVAL; fallthrough; case IB_QPT_UC: case IB_QPT_RC: case IB_QPT_UD: sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge); swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node); if (!swq) return -ENOMEM; if (init_attr->srq) { struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); if (srq->rq.max_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * (srq->rq.max_sge - 1); } else if (init_attr->cap.max_recv_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * (init_attr->cap.max_recv_sge - 1); qp->r_sg_list = kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node); if (!qp->r_sg_list) goto bail_qp; qp->allowed_ops = get_allowed_ops(init_attr->qp_type); RCU_INIT_POINTER(qp->next, NULL); if (init_attr->qp_type == IB_QPT_RC) { qp->s_ack_queue = kcalloc_node(rvt_max_atomic(rdi), sizeof(*qp->s_ack_queue), GFP_KERNEL, rdi->dparms.node); if (!qp->s_ack_queue) goto bail_qp; } /* initialize timers needed for rc qp */ timer_setup(&qp->s_timer, rvt_rc_timeout, 0); hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); qp->s_rnr_timer.function = rvt_rc_rnr_retry; /* * Driver needs to set up it's private QP structure and do any * initialization that is needed. */ priv = rdi->driver_f.qp_priv_alloc(rdi, qp); if (IS_ERR(priv)) { ret = PTR_ERR(priv); goto bail_qp; } qp->priv = priv; qp->timeout_jiffies = usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL); if (init_attr->srq) { sz = 0; } else { qp->r_rq.size = init_attr->cap.max_recv_wr + 1; qp->r_rq.max_sge = init_attr->cap.max_recv_sge; sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + sizeof(struct rvt_rwqe); ret = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz, rdi->dparms.node, udata); if (ret) goto bail_driver_priv; } /* * ib_create_qp() will initialize qp->ibqp * except for qp->ibqp.qp_num. */ spin_lock_init(&qp->r_lock); spin_lock_init(&qp->s_hlock); spin_lock_init(&qp->s_lock); atomic_set(&qp->refcount, 0); atomic_set(&qp->local_ops_pending, 0); init_waitqueue_head(&qp->wait); INIT_LIST_HEAD(&qp->rspwait); qp->state = IB_QPS_RESET; qp->s_wq = swq; qp->s_size = sqsize; qp->s_avail = init_attr->cap.max_send_wr; qp->s_max_sge = init_attr->cap.max_send_sge; if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) qp->s_flags = RVT_S_SIGNAL_REQ_WR; ret = alloc_ud_wq_attr(qp, rdi->dparms.node); if (ret) goto bail_rq_rvt; if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) exclude_prefix = RVT_AIP_QP_PREFIX; ret = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, init_attr->qp_type, init_attr->port_num, exclude_prefix); if (ret < 0) goto bail_rq_wq; qp->ibqp.qp_num = ret; if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) qp->ibqp.qp_num |= RVT_AIP_QP_BASE; qp->port_num = init_attr->port_num; rvt_init_qp(rdi, qp, init_attr->qp_type); if (rdi->driver_f.qp_priv_init) { ret = rdi->driver_f.qp_priv_init(rdi, qp, init_attr); if (ret) goto bail_rq_wq; } break; default: /* Don't support raw QPs */ return -EOPNOTSUPP; } init_attr->cap.max_inline_data = 0; /* * Return the address of the RWQ as the offset to mmap. * See rvt_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { if (!qp->r_rq.wq) { __u64 offset = 0; ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); if (ret) goto bail_qpn; } else { u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; qp->ip = rvt_create_mmap_info(rdi, s, udata, qp->r_rq.wq); if (IS_ERR(qp->ip)) { ret = PTR_ERR(qp->ip); goto bail_qpn; } ret = ib_copy_to_udata(udata, &qp->ip->offset, sizeof(qp->ip->offset)); if (ret) goto bail_ip; } qp->pid = current->pid; } spin_lock(&rdi->n_qps_lock); if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { spin_unlock(&rdi->n_qps_lock); ret = -ENOMEM; goto bail_ip; } rdi->n_qps_allocated++; /* * Maintain a busy_jiffies variable that will be added to the timeout * period in mod_retry_timer and add_retry_timer. This busy jiffies * is scaled by the number of rc qps created for the device to reduce * the number of timeouts occurring when there is a large number of * qps. busy_jiffies is incremented every rc qp scaling interval. * The scaling interval is selected based on extensive performance * evaluation of targeted workloads. */ if (init_attr->qp_type == IB_QPT_RC) { rdi->n_rc_qps++; rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; } spin_unlock(&rdi->n_qps_lock); if (qp->ip) { spin_lock_irq(&rdi->pending_lock); list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps); spin_unlock_irq(&rdi->pending_lock); } return 0; bail_ip: if (qp->ip) kref_put(&qp->ip->ref, rvt_release_mmap_info); bail_qpn: rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); bail_rq_wq: free_ud_wq_attr(qp); bail_rq_rvt: rvt_free_rq(&qp->r_rq); bail_driver_priv: rdi->driver_f.qp_priv_free(rdi, qp); bail_qp: kfree(qp->s_ack_queue); kfree(qp->r_sg_list); vfree(swq); return ret; } /** * rvt_error_qp - put a QP into the error state * @qp: the QP to put into the error state * @err: the receive completion error to signal if a RWQE is active * * Flushes both send and receive work queues. * * Return: true if last WQE event should be generated. * The QP r_lock and s_lock should be held and interrupts disabled. * If we are already in error state, just return. */ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) { struct ib_wc wc; int ret = 0; struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); lockdep_assert_held(&qp->r_lock); lockdep_assert_held(&qp->s_lock); if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) goto bail; qp->state = IB_QPS_ERR; if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); del_timer(&qp->s_timer); } if (qp->s_flags & RVT_S_ANY_WAIT_SEND) qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; rdi->driver_f.notify_error_qp(qp); /* Schedule the sending tasklet to drain the send work queue. */ if (READ_ONCE(qp->s_last) != qp->s_head) rdi->driver_f.schedule_send(qp); rvt_clear_mr_refs(qp, 0); memset(&wc, 0, sizeof(wc)); wc.qp = &qp->ibqp; wc.opcode = IB_WC_RECV; if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { wc.wr_id = qp->r_wr_id; wc.status = err; rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); } wc.status = IB_WC_WR_FLUSH_ERR; if (qp->r_rq.kwq) { u32 head; u32 tail; struct rvt_rwq *wq = NULL; struct rvt_krwq *kwq = NULL; spin_lock(&qp->r_rq.kwq->c_lock); /* qp->ip used to validate if there is a user buffer mmaped */ if (qp->ip) { wq = qp->r_rq.wq; head = RDMA_READ_UAPI_ATOMIC(wq->head); tail = RDMA_READ_UAPI_ATOMIC(wq->tail); } else { kwq = qp->r_rq.kwq; head = kwq->head; tail = kwq->tail; } /* sanity check pointers before trusting them */ if (head >= qp->r_rq.size) head = 0; if (tail >= qp->r_rq.size) tail = 0; while (tail != head) { wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id; if (++tail >= qp->r_rq.size) tail = 0; rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); } if (qp->ip) RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail); else kwq->tail = tail; spin_unlock(&qp->r_rq.kwq->c_lock); } else if (qp->ibqp.event_handler) { ret = 1; } bail: return ret; } EXPORT_SYMBOL(rvt_error_qp); /* * Put the QP into the hash table. * The hash table holds a reference to the QP. */ static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; unsigned long flags; rvt_get_qp(qp); spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); if (qp->ibqp.qp_num <= 1) { rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp); } else { u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); qp->next = rdi->qp_dev->qp_table[n]; rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); trace_rvt_qpinsert(qp, n); } spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); } /** * rvt_modify_qp - modify the attributes of a queue pair * @ibqp: the queue pair who's attributes we're modifying * @attr: the new attributes * @attr_mask: the mask of attributes to modify * @udata: user data for libibverbs.so * * Return: 0 on success, otherwise returns an errno. */ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); enum ib_qp_state cur_state, new_state; struct ib_event ev; int lastwqe = 0; int mig = 0; int pmtu = 0; /* for gcc warning only */ int opa_ah; if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_hlock); spin_lock(&qp->s_lock); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num); if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) goto inval; if (rdi->driver_f.check_modify_qp && rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata)) goto inval; if (attr_mask & IB_QP_AV) { if (opa_ah) { if (rdma_ah_get_dlid(&attr->ah_attr) >= opa_get_mcast_base(OPA_MCAST_NR)) goto inval; } else { if (rdma_ah_get_dlid(&attr->ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) goto inval; } if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) goto inval; } if (attr_mask & IB_QP_ALT_PATH) { if (opa_ah) { if (rdma_ah_get_dlid(&attr->alt_ah_attr) >= opa_get_mcast_base(OPA_MCAST_NR)) goto inval; } else { if (rdma_ah_get_dlid(&attr->alt_ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) goto inval; } if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) goto inval; if (attr->alt_pkey_index >= rvt_get_npkeys(rdi)) goto inval; } if (attr_mask & IB_QP_PKEY_INDEX) if (attr->pkey_index >= rvt_get_npkeys(rdi)) goto inval; if (attr_mask & IB_QP_MIN_RNR_TIMER) if (attr->min_rnr_timer > 31) goto inval; if (attr_mask & IB_QP_PORT) if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI || attr->port_num == 0 || attr->port_num > ibqp->device->phys_port_cnt) goto inval; if (attr_mask & IB_QP_DEST_QPN) if (attr->dest_qp_num > RVT_QPN_MASK) goto inval; if (attr_mask & IB_QP_RETRY_CNT) if (attr->retry_cnt > 7) goto inval; if (attr_mask & IB_QP_RNR_RETRY) if (attr->rnr_retry > 7) goto inval; /* * Don't allow invalid path_mtu values. OK to set greater * than the active mtu (or even the max_cap, if we have tuned * that to a small mtu. We'll set qp->path_mtu * to the lesser of requested attribute mtu and active, * for packetizing messages. * Note that the QP port has to be set in INIT and MTU in RTR. */ if (attr_mask & IB_QP_PATH_MTU) { pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr); if (pmtu < 0) goto inval; } if (attr_mask & IB_QP_PATH_MIG_STATE) { if (attr->path_mig_state == IB_MIG_REARM) { if (qp->s_mig_state == IB_MIG_ARMED) goto inval; if (new_state != IB_QPS_RTS) goto inval; } else if (attr->path_mig_state == IB_MIG_MIGRATED) { if (qp->s_mig_state == IB_MIG_REARM) goto inval; if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) goto inval; if (qp->s_mig_state == IB_MIG_ARMED) mig = 1; } else { goto inval; } } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic) goto inval; switch (new_state) { case IB_QPS_RESET: if (qp->state != IB_QPS_RESET) _rvt_reset_qp(rdi, qp, ibqp->qp_type); break; case IB_QPS_RTR: /* Allow event to re-trigger if QP set to RTR more than once */ qp->r_flags &= ~RVT_R_COMM_EST; qp->state = new_state; break; case IB_QPS_SQD: qp->s_draining = qp->s_last != qp->s_cur; qp->state = new_state; break; case IB_QPS_SQE: if (qp->ibqp.qp_type == IB_QPT_RC) goto inval; qp->state = new_state; break; case IB_QPS_ERR: lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); break; default: qp->state = new_state; break; } if (attr_mask & IB_QP_PKEY_INDEX) qp->s_pkey_index = attr->pkey_index; if (attr_mask & IB_QP_PORT) qp->port_num = attr->port_num; if (attr_mask & IB_QP_DEST_QPN) qp->remote_qpn = attr->dest_qp_num; if (attr_mask & IB_QP_SQ_PSN) { qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask; qp->s_psn = qp->s_next_psn; qp->s_sending_psn = qp->s_next_psn; qp->s_last_psn = qp->s_next_psn - 1; qp->s_sending_hpsn = qp->s_last_psn; } if (attr_mask & IB_QP_RQ_PSN) qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->qp_access_flags = attr->qp_access_flags; if (attr_mask & IB_QP_AV) { rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr); qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr); qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); } if (attr_mask & IB_QP_ALT_PATH) { rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr); qp->s_alt_pkey_index = attr->alt_pkey_index; } if (attr_mask & IB_QP_PATH_MIG_STATE) { qp->s_mig_state = attr->path_mig_state; if (mig) { qp->remote_ah_attr = qp->alt_ah_attr; qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); qp->s_pkey_index = qp->s_alt_pkey_index; } } if (attr_mask & IB_QP_PATH_MTU) { qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu); qp->log_pmtu = ilog2(qp->pmtu); } if (attr_mask & IB_QP_RETRY_CNT) { qp->s_retry_cnt = attr->retry_cnt; qp->s_retry = attr->retry_cnt; } if (attr_mask & IB_QP_RNR_RETRY) { qp->s_rnr_retry_cnt = attr->rnr_retry; qp->s_rnr_retry = attr->rnr_retry; } if (attr_mask & IB_QP_MIN_RNR_TIMER) qp->r_min_rnr_timer = attr->min_rnr_timer; if (attr_mask & IB_QP_TIMEOUT) { qp->timeout = attr->timeout; qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout); } if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->r_max_rd_atomic = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) qp->s_max_rd_atomic = attr->max_rd_atomic; if (rdi->driver_f.modify_qp) rdi->driver_f.modify_qp(qp, attr, attr_mask, udata); spin_unlock(&qp->s_lock); spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) rvt_insert_qp(rdi, qp); if (lastwqe) { ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } if (mig) { ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_PATH_MIG; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } return 0; inval: spin_unlock(&qp->s_lock); spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); return -EINVAL; } /** * rvt_destroy_qp - destroy a queue pair * @ibqp: the queue pair to destroy * @udata: unused by the driver * * Note that this can be called while the QP is actively sending or * receiving! * * Return: 0 on success. */ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); rvt_reset_qp(rdi, qp, ibqp->qp_type); wait_event(qp->wait, !atomic_read(&qp->refcount)); /* qpn is now available for use again */ rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); spin_lock(&rdi->n_qps_lock); rdi->n_qps_allocated--; if (qp->ibqp.qp_type == IB_QPT_RC) { rdi->n_rc_qps--; rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; } spin_unlock(&rdi->n_qps_lock); if (qp->ip) kref_put(&qp->ip->ref, rvt_release_mmap_info); kvfree(qp->r_rq.kwq); rdi->driver_f.qp_priv_free(rdi, qp); kfree(qp->s_ack_queue); kfree(qp->r_sg_list); rdma_destroy_ah_attr(&qp->remote_ah_attr); rdma_destroy_ah_attr(&qp->alt_ah_attr); free_ud_wq_attr(qp); vfree(qp->s_wq); return 0; } /** * rvt_query_qp - query an ipbq * @ibqp: IB qp to query * @attr: attr struct to fill in * @attr_mask: attr mask ignored * @init_attr: struct to fill in * * Return: always 0 */ int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) { struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); attr->qp_state = qp->state; attr->cur_qp_state = attr->qp_state; attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu); attr->path_mig_state = qp->s_mig_state; attr->qkey = qp->qkey; attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask; attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask; attr->dest_qp_num = qp->remote_qpn; attr->qp_access_flags = qp->qp_access_flags; attr->cap.max_send_wr = qp->s_size - 1 - rdi->dparms.reserved_operations; attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; attr->cap.max_send_sge = qp->s_max_sge; attr->cap.max_recv_sge = qp->r_rq.max_sge; attr->cap.max_inline_data = 0; attr->ah_attr = qp->remote_ah_attr; attr->alt_ah_attr = qp->alt_ah_attr; attr->pkey_index = qp->s_pkey_index; attr->alt_pkey_index = qp->s_alt_pkey_index; attr->en_sqd_async_notify = 0; attr->sq_draining = qp->s_draining; attr->max_rd_atomic = qp->s_max_rd_atomic; attr->max_dest_rd_atomic = qp->r_max_rd_atomic; attr->min_rnr_timer = qp->r_min_rnr_timer; attr->port_num = qp->port_num; attr->timeout = qp->timeout; attr->retry_cnt = qp->s_retry_cnt; attr->rnr_retry = qp->s_rnr_retry_cnt; attr->alt_port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); attr->alt_timeout = qp->alt_timeout; init_attr->event_handler = qp->ibqp.event_handler; init_attr->qp_context = qp->ibqp.qp_context; init_attr->send_cq = qp->ibqp.send_cq; init_attr->recv_cq = qp->ibqp.recv_cq; init_attr->srq = qp->ibqp.srq; init_attr->cap = attr->cap; if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; else init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; init_attr->qp_type = qp->ibqp.qp_type; init_attr->port_num = qp->port_num; return 0; } /** * rvt_post_recv - post a receive on a QP * @ibqp: the QP to post the receive on * @wr: the WR to post * @bad_wr: the first bad WR is put here * * This may be called from interrupt context. * * Return: 0 on success otherwise errno */ int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_krwq *wq = qp->r_rq.kwq; unsigned long flags; int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) && !qp->ibqp.srq; /* Check that state is OK to post receive. */ if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { *bad_wr = wr; return -EINVAL; } for (; wr; wr = wr->next) { struct rvt_rwqe *wqe; u32 next; int i; if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { *bad_wr = wr; return -EINVAL; } spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags); next = wq->head + 1; if (next >= qp->r_rq.size) next = 0; if (next == READ_ONCE(wq->tail)) { spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags); *bad_wr = wr; return -ENOMEM; } if (unlikely(qp_err_flush)) { struct ib_wc wc; memset(&wc, 0, sizeof(wc)); wc.qp = &qp->ibqp; wc.opcode = IB_WC_RECV; wc.wr_id = wr->wr_id; wc.status = IB_WC_WR_FLUSH_ERR; rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); } else { wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); wqe->wr_id = wr->wr_id; wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) { wqe->sg_list[i].addr = wr->sg_list[i].addr; wqe->sg_list[i].length = wr->sg_list[i].length; wqe->sg_list[i].lkey = wr->sg_list[i].lkey; } /* * Make sure queue entry is written * before the head index. */ smp_store_release(&wq->head, next); } spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags); } return 0; } /** * rvt_qp_valid_operation - validate post send wr request * @qp: the qp * @post_parms: the post send table for the driver * @wr: the work request * * The routine validates the operation based on the * validation table an returns the length of the operation * which can extend beyond the ib_send_bw. Operation * dependent flags key atomic operation validation. * * There is an exception for UD qps that validates the pd and * overrides the length to include the additional UD specific * length. * * Returns a negative error or the length of the work request * for building the swqe. */ static inline int rvt_qp_valid_operation( struct rvt_qp *qp, const struct rvt_operation_params *post_parms, const struct ib_send_wr *wr) { int len; if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length) return -EINVAL; if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type))) return -EINVAL; if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) && ibpd_to_rvtpd(qp->ibqp.pd)->user) return -EINVAL; if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE && (wr->num_sge == 0 || wr->sg_list[0].length < sizeof(u64) || wr->sg_list[0].addr & (sizeof(u64) - 1))) return -EINVAL; if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC && !qp->s_max_rd_atomic) return -EINVAL; len = post_parms[wr->opcode].length; /* UD specific */ if (qp->ibqp.qp_type != IB_QPT_UC && qp->ibqp.qp_type != IB_QPT_RC) { if (qp->ibqp.pd != ud_wr(wr)->ah->pd) return -EINVAL; len = sizeof(struct ib_ud_wr); } return len; } /** * rvt_qp_is_avail - determine queue capacity * @qp: the qp * @rdi: the rdmavt device * @reserved_op: is reserved operation * * This assumes the s_hlock is held but the s_last * qp variable is uncontrolled. * * For non reserved operations, the qp->s_avail * may be changed. * * The return value is zero or a -ENOMEM. */ static inline int rvt_qp_is_avail( struct rvt_qp *qp, struct rvt_dev_info *rdi, bool reserved_op) { u32 slast; u32 avail; u32 reserved_used; /* see rvt_qp_wqe_unreserve() */ smp_mb__before_atomic(); if (unlikely(reserved_op)) { /* see rvt_qp_wqe_unreserve() */ reserved_used = atomic_read(&qp->s_reserved_used); if (reserved_used >= rdi->dparms.reserved_operations) return -ENOMEM; return 0; } /* non-reserved operations */ if (likely(qp->s_avail)) return 0; /* See rvt_qp_complete_swqe() */ slast = smp_load_acquire(&qp->s_last); if (qp->s_head >= slast) avail = qp->s_size - (qp->s_head - slast); else avail = slast - qp->s_head; reserved_used = atomic_read(&qp->s_reserved_used); avail = avail - 1 - (rdi->dparms.reserved_operations - reserved_used); /* insure we don't assign a negative s_avail */ if ((s32)avail <= 0) return -ENOMEM; qp->s_avail = avail; if (WARN_ON(qp->s_avail > (qp->s_size - 1 - rdi->dparms.reserved_operations))) rvt_pr_err(rdi, "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u", qp->ibqp.qp_num, qp->s_size, qp->s_avail, qp->s_head, qp->s_tail, qp->s_cur, qp->s_acked, qp->s_last); return 0; } /** * rvt_post_one_wr - post one RC, UC, or UD send work request * @qp: the QP to post on * @wr: the work request to send * @call_send: kick the send engine into gear */ static int rvt_post_one_wr(struct rvt_qp *qp, const struct ib_send_wr *wr, bool *call_send) { struct rvt_swqe *wqe; u32 next; int i; int j; int acc; struct rvt_lkey_table *rkt; struct rvt_pd *pd; struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); u8 log_pmtu; int ret; size_t cplen; bool reserved_op; int local_ops_delayed = 0; BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE)); /* IB spec says that num_sge == 0 is OK. */ if (unlikely(wr->num_sge > qp->s_max_sge)) return -EINVAL; ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr); if (ret < 0) return ret; cplen = ret; /* * Local operations include fast register and local invalidate. * Fast register needs to be processed immediately because the * registered lkey may be used by following work requests and the * lkey needs to be valid at the time those requests are posted. * Local invalidate can be processed immediately if fencing is * not required and no previous local invalidate ops are pending. * Signaled local operations that have been processed immediately * need to have requests with "completion only" flags set posted * to the send queue in order to generate completions. */ if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) { switch (wr->opcode) { case IB_WR_REG_MR: ret = rvt_fast_reg_mr(qp, reg_wr(wr)->mr, reg_wr(wr)->key, reg_wr(wr)->access); if (ret || !(wr->send_flags & IB_SEND_SIGNALED)) return ret; break; case IB_WR_LOCAL_INV: if ((wr->send_flags & IB_SEND_FENCE) || atomic_read(&qp->local_ops_pending)) { local_ops_delayed = 1; } else { ret = rvt_invalidate_rkey( qp, wr->ex.invalidate_rkey); if (ret || !(wr->send_flags & IB_SEND_SIGNALED)) return ret; } break; default: return -EINVAL; } } reserved_op = rdi->post_parms[wr->opcode].flags & RVT_OPERATION_USE_RESERVE; /* check for avail */ ret = rvt_qp_is_avail(qp, rdi, reserved_op); if (ret) return ret; next = qp->s_head + 1; if (next >= qp->s_size) next = 0; rkt = &rdi->lkey_table; pd = ibpd_to_rvtpd(qp->ibqp.pd); wqe = rvt_get_swqe_ptr(qp, qp->s_head); /* cplen has length from above */ memcpy(&wqe->ud_wr, wr, cplen); wqe->length = 0; j = 0; if (wr->num_sge) { struct rvt_sge *last_sge = NULL; acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0; for (i = 0; i < wr->num_sge; i++) { u32 length = wr->sg_list[i].length; if (length == 0) continue; ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge, &wr->sg_list[i], acc); if (unlikely(ret < 0)) goto bail_inval_free; wqe->length += length; if (ret) last_sge = &wqe->sg_list[j]; j += ret; } wqe->wr.num_sge = j; } /* * Calculate and set SWQE PSN values prior to handing it off * to the driver's check routine. This give the driver the * opportunity to adjust PSN values based on internal checks. */ log_pmtu = qp->log_pmtu; if (qp->allowed_ops == IB_OPCODE_UD) { struct rvt_ah *ah = rvt_get_swqe_ah(wqe); log_pmtu = ah->log_pmtu; rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr); } if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) { if (local_ops_delayed) atomic_inc(&qp->local_ops_pending); else wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY; wqe->ssn = 0; wqe->psn = 0; wqe->lpsn = 0; } else { wqe->ssn = qp->s_ssn++; wqe->psn = qp->s_next_psn; wqe->lpsn = wqe->psn + (wqe->length ? ((wqe->length - 1) >> log_pmtu) : 0); } /* general part of wqe valid - allow for driver checks */ if (rdi->driver_f.setup_wqe) { ret = rdi->driver_f.setup_wqe(qp, wqe, call_send); if (ret < 0) goto bail_inval_free_ref; } if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) qp->s_next_psn = wqe->lpsn + 1; if (unlikely(reserved_op)) { wqe->wr.send_flags |= RVT_SEND_RESERVE_USED; rvt_qp_wqe_reserve(qp, wqe); } else { wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED; qp->s_avail--; } trace_rvt_post_one_wr(qp, wqe, wr->num_sge); smp_wmb(); /* see request builders */ qp->s_head = next; return 0; bail_inval_free_ref: if (qp->allowed_ops == IB_OPCODE_UD) rdma_destroy_ah_attr(wqe->ud_wr.attr); bail_inval_free: /* release mr holds */ while (j) { struct rvt_sge *sge = &wqe->sg_list[--j]; rvt_put_mr(sge->mr); } return ret; } /** * rvt_post_send - post a send on a QP * @ibqp: the QP to post the send on * @wr: the list of work requests to post * @bad_wr: the first bad WR is put here * * This may be called from interrupt context. * * Return: 0 on success else errno */ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); unsigned long flags = 0; bool call_send; unsigned nreq = 0; int err = 0; spin_lock_irqsave(&qp->s_hlock, flags); /* * Ensure QP state is such that we can send. If not bail out early, * there is no need to do this every time we post a send. */ if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) { spin_unlock_irqrestore(&qp->s_hlock, flags); return -EINVAL; } /* * If the send queue is empty, and we only have a single WR then just go * ahead and kick the send engine into gear. Otherwise we will always * just schedule the send to happen later. */ call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next; for (; wr; wr = wr->next) { err = rvt_post_one_wr(qp, wr, &call_send); if (unlikely(err)) { *bad_wr = wr; goto bail; } nreq++; } bail: spin_unlock_irqrestore(&qp->s_hlock, flags); if (nreq) { /* * Only call do_send if there is exactly one packet, and the * driver said it was ok. */ if (nreq == 1 && call_send) rdi->driver_f.do_send(qp); else rdi->driver_f.schedule_send_no_lock(qp); } return err; } /** * rvt_post_srq_recv - post a receive on a shared receive queue * @ibsrq: the SRQ to post the receive on * @wr: the list of work requests to post * @bad_wr: A pointer to the first WR to cause a problem is put here * * This may be called from interrupt context. * * Return: 0 on success else errno */ int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); struct rvt_krwq *wq; unsigned long flags; for (; wr; wr = wr->next) { struct rvt_rwqe *wqe; u32 next; int i; if ((unsigned)wr->num_sge > srq->rq.max_sge) { *bad_wr = wr; return -EINVAL; } spin_lock_irqsave(&srq->rq.kwq->p_lock, flags); wq = srq->rq.kwq; next = wq->head + 1; if (next >= srq->rq.size) next = 0; if (next == READ_ONCE(wq->tail)) { spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags); *bad_wr = wr; return -ENOMEM; } wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); wqe->wr_id = wr->wr_id; wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) { wqe->sg_list[i].addr = wr->sg_list[i].addr; wqe->sg_list[i].length = wr->sg_list[i].length; wqe->sg_list[i].lkey = wr->sg_list[i].lkey; } /* Make sure queue entry is written before the head index. */ smp_store_release(&wq->head, next); spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags); } return 0; } /* * rvt used the internal kernel struct as part of its ABI, for now make sure * the kernel struct does not change layout. FIXME: rvt should never cast the * user struct to a kernel struct. */ static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge) { BUILD_BUG_ON(offsetof(struct ib_sge, addr) != offsetof(struct rvt_wqe_sge, addr)); BUILD_BUG_ON(offsetof(struct ib_sge, length) != offsetof(struct rvt_wqe_sge, length)); BUILD_BUG_ON(offsetof(struct ib_sge, lkey) != offsetof(struct rvt_wqe_sge, lkey)); return (struct ib_sge *)sge; } /* * Validate a RWQE and fill in the SGE state. * Return 1 if OK. */ static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) { int i, j, ret; struct ib_wc wc; struct rvt_lkey_table *rkt; struct rvt_pd *pd; struct rvt_sge_state *ss; struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); rkt = &rdi->lkey_table; pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); ss = &qp->r_sge; ss->sg_list = qp->r_sg_list; qp->r_len = 0; for (i = j = 0; i < wqe->num_sge; i++) { if (wqe->sg_list[i].length == 0) continue; /* Check LKEY */ ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, NULL, rvt_cast_sge(&wqe->sg_list[i]), IB_ACCESS_LOCAL_WRITE); if (unlikely(ret <= 0)) goto bad_lkey; qp->r_len += wqe->sg_list[i].length; j++; } ss->num_sge = j; ss->total_len = qp->r_len; return 1; bad_lkey: while (j) { struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; rvt_put_mr(sge->mr); } ss->num_sge = 0; memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr_id; wc.status = IB_WC_LOC_PROT_ERR; wc.opcode = IB_WC_RECV; wc.qp = &qp->ibqp; /* Signal solicited completion event. */ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); return 0; } /** * get_rvt_head - get head indices of the circular buffer * @rq: data structure for request queue entry * @ip: the QP * * Return - head index value */ static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip) { u32 head; if (ip) head = RDMA_READ_UAPI_ATOMIC(rq->wq->head); else head = rq->kwq->head; return head; } /** * rvt_get_rwqe - copy the next RWQE into the QP's RWQE * @qp: the QP * @wr_id_only: update qp->r_wr_id only, not qp->r_sge * * Return -1 if there is a local error, 0 if no RWQE is available, * otherwise return 1. * * Can be called from interrupt level. */ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) { unsigned long flags; struct rvt_rq *rq; struct rvt_krwq *kwq = NULL; struct rvt_rwq *wq; struct rvt_srq *srq; struct rvt_rwqe *wqe; void (*handler)(struct ib_event *, void *); u32 tail; u32 head; int ret; void *ip = NULL; if (qp->ibqp.srq) { srq = ibsrq_to_rvtsrq(qp->ibqp.srq); handler = srq->ibsrq.event_handler; rq = &srq->rq; ip = srq->ip; } else { srq = NULL; handler = NULL; rq = &qp->r_rq; ip = qp->ip; } spin_lock_irqsave(&rq->kwq->c_lock, flags); if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { ret = 0; goto unlock; } kwq = rq->kwq; if (ip) { wq = rq->wq; tail = RDMA_READ_UAPI_ATOMIC(wq->tail); } else { tail = kwq->tail; } /* Validate tail before using it since it is user writable. */ if (tail >= rq->size) tail = 0; if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) { head = get_rvt_head(rq, ip); kwq->count = rvt_get_rq_count(rq, head, tail); } if (unlikely(kwq->count == 0)) { ret = 0; goto unlock; } /* Make sure entry is read after the count is read. */ smp_rmb(); wqe = rvt_get_rwqe_ptr(rq, tail); /* * Even though we update the tail index in memory, the verbs * consumer is not supposed to post more entries until a * completion is generated. */ if (++tail >= rq->size) tail = 0; if (ip) RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail); else kwq->tail = tail; if (!wr_id_only && !init_sge(qp, wqe)) { ret = -1; goto unlock; } qp->r_wr_id = wqe->wr_id; kwq->count--; ret = 1; set_bit(RVT_R_WRID_VALID, &qp->r_aflags); if (handler) { /* * Validate head pointer value and compute * the number of remaining WQEs. */ if (kwq->count < srq->limit) { kwq->count = rvt_get_rq_count(rq, get_rvt_head(rq, ip), tail); if (kwq->count < srq->limit) { struct ib_event ev; srq->limit = 0; spin_unlock_irqrestore(&rq->kwq->c_lock, flags); ev.device = qp->ibqp.device; ev.element.srq = qp->ibqp.srq; ev.event = IB_EVENT_SRQ_LIMIT_REACHED; handler(&ev, srq->ibsrq.srq_context); goto bail; } } } unlock: spin_unlock_irqrestore(&rq->kwq->c_lock, flags); bail: return ret; } EXPORT_SYMBOL(rvt_get_rwqe); /** * rvt_comm_est - handle trap with QP established * @qp: the QP */ void rvt_comm_est(struct rvt_qp *qp) { qp->r_flags |= RVT_R_COMM_EST; if (qp->ibqp.event_handler) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_COMM_EST; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } } EXPORT_SYMBOL(rvt_comm_est); void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err) { unsigned long flags; int lastwqe; spin_lock_irqsave(&qp->s_lock, flags); lastwqe = rvt_error_qp(qp, err); spin_unlock_irqrestore(&qp->s_lock, flags); if (lastwqe) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } } EXPORT_SYMBOL(rvt_rc_error); /* * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table * @index - the index * return usec from an index into ib_rvt_rnr_table */ unsigned long rvt_rnr_tbl_to_usec(u32 index) { return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)]; } EXPORT_SYMBOL(rvt_rnr_tbl_to_usec); static inline unsigned long rvt_aeth_to_usec(u32 aeth) { return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) & IB_AETH_CREDIT_MASK]; } /* * rvt_add_retry_timer_ext - add/start a retry timer * @qp - the QP * @shift - timeout shift to wait for multiple packets * add a retry timer on the QP */ void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift) { struct ib_qp *ibqp = &qp->ibqp; struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); lockdep_assert_held(&qp->s_lock); qp->s_flags |= RVT_S_TIMER; /* 4.096 usec. * (1 << qp->timeout) */ qp->s_timer.expires = jiffies + rdi->busy_jiffies + (qp->timeout_jiffies << shift); add_timer(&qp->s_timer); } EXPORT_SYMBOL(rvt_add_retry_timer_ext); /** * rvt_add_rnr_timer - add/start an rnr timer on the QP * @qp: the QP * @aeth: aeth of RNR timeout, simulated aeth for loopback */ void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth) { u32 to; lockdep_assert_held(&qp->s_lock); qp->s_flags |= RVT_S_WAIT_RNR; to = rvt_aeth_to_usec(aeth); trace_rvt_rnrnak_add(qp, to); hrtimer_start(&qp->s_rnr_timer, ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED); } EXPORT_SYMBOL(rvt_add_rnr_timer); /** * rvt_stop_rc_timers - stop all timers * @qp: the QP * stop any pending timers */ void rvt_stop_rc_timers(struct rvt_qp *qp) { lockdep_assert_held(&qp->s_lock); /* Remove QP from all timers */ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); del_timer(&qp->s_timer); hrtimer_try_to_cancel(&qp->s_rnr_timer); } } EXPORT_SYMBOL(rvt_stop_rc_timers); /** * rvt_stop_rnr_timer - stop an rnr timer * @qp: the QP * * stop an rnr timer and return if the timer * had been pending. */ static void rvt_stop_rnr_timer(struct rvt_qp *qp) { lockdep_assert_held(&qp->s_lock); /* Remove QP from rnr timer */ if (qp->s_flags & RVT_S_WAIT_RNR) { qp->s_flags &= ~RVT_S_WAIT_RNR; trace_rvt_rnrnak_stop(qp, 0); } } /** * rvt_del_timers_sync - wait for any timeout routines to exit * @qp: the QP */ void rvt_del_timers_sync(struct rvt_qp *qp) { del_timer_sync(&qp->s_timer); hrtimer_cancel(&qp->s_rnr_timer); } EXPORT_SYMBOL(rvt_del_timers_sync); /* * This is called from s_timer for missing responses. */ static void rvt_rc_timeout(struct timer_list *t) { struct rvt_qp *qp = from_timer(qp, t, s_timer); struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); unsigned long flags; spin_lock_irqsave(&qp->r_lock, flags); spin_lock(&qp->s_lock); if (qp->s_flags & RVT_S_TIMER) { struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; qp->s_flags &= ~RVT_S_TIMER; rvp->n_rc_timeouts++; del_timer(&qp->s_timer); trace_rvt_rc_timeout(qp, qp->s_last_psn + 1); if (rdi->driver_f.notify_restart_rc) rdi->driver_f.notify_restart_rc(qp, qp->s_last_psn + 1, 1); rdi->driver_f.schedule_send(qp); } spin_unlock(&qp->s_lock); spin_unlock_irqrestore(&qp->r_lock, flags); } /* * This is called from s_timer for RNR timeouts. */ enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t) { struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer); struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); rvt_stop_rnr_timer(qp); trace_rvt_rnrnak_timeout(qp, 0); rdi->driver_f.schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); return HRTIMER_NORESTART; } EXPORT_SYMBOL(rvt_rc_rnr_retry); /** * rvt_qp_iter_init - initial for QP iteration * @rdi: rvt devinfo * @v: u64 value * @cb: user-defined callback * * This returns an iterator suitable for iterating QPs * in the system. * * The @cb is a user-defined callback and @v is a 64-bit * value passed to and relevant for processing in the * @cb. An example use case would be to alter QP processing * based on criteria not part of the rvt_qp. * * Use cases that require memory allocation to succeed * must preallocate appropriately. * * Return: a pointer to an rvt_qp_iter or NULL */ struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, u64 v, void (*cb)(struct rvt_qp *qp, u64 v)) { struct rvt_qp_iter *i; i = kzalloc(sizeof(*i), GFP_KERNEL); if (!i) return NULL; i->rdi = rdi; /* number of special QPs (SMI/GSI) for device */ i->specials = rdi->ibdev.phys_port_cnt * 2; i->v = v; i->cb = cb; return i; } EXPORT_SYMBOL(rvt_qp_iter_init); /** * rvt_qp_iter_next - return the next QP in iter * @iter: the iterator * * Fine grained QP iterator suitable for use * with debugfs seq_file mechanisms. * * Updates iter->qp with the current QP when the return * value is 0. * * Return: 0 - iter->qp is valid 1 - no more QPs */ int rvt_qp_iter_next(struct rvt_qp_iter *iter) __must_hold(RCU) { int n = iter->n; int ret = 1; struct rvt_qp *pqp = iter->qp; struct rvt_qp *qp; struct rvt_dev_info *rdi = iter->rdi; /* * The approach is to consider the special qps * as additional table entries before the * real hash table. Since the qp code sets * the qp->next hash link to NULL, this works just fine. * * iter->specials is 2 * # ports * * n = 0..iter->specials is the special qp indices * * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are * the potential hash bucket entries * */ for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) { if (pqp) { qp = rcu_dereference(pqp->next); } else { if (n < iter->specials) { struct rvt_ibport *rvp; int pidx; pidx = n % rdi->ibdev.phys_port_cnt; rvp = rdi->ports[pidx]; qp = rcu_dereference(rvp->qp[n & 1]); } else { qp = rcu_dereference( rdi->qp_dev->qp_table[ (n - iter->specials)]); } } pqp = qp; if (qp) { iter->qp = qp; iter->n = n; return 0; } } return ret; } EXPORT_SYMBOL(rvt_qp_iter_next); /** * rvt_qp_iter - iterate all QPs * @rdi: rvt devinfo * @v: a 64-bit value * @cb: a callback * * This provides a way for iterating all QPs. * * The @cb is a user-defined callback and @v is a 64-bit * value passed to and relevant for processing in the * cb. An example use case would be to alter QP processing * based on criteria not part of the rvt_qp. * * The code has an internal iterator to simplify * non seq_file use cases. */ void rvt_qp_iter(struct rvt_dev_info *rdi, u64 v, void (*cb)(struct rvt_qp *qp, u64 v)) { int ret; struct rvt_qp_iter i = { .rdi = rdi, .specials = rdi->ibdev.phys_port_cnt * 2, .v = v, .cb = cb }; rcu_read_lock(); do { ret = rvt_qp_iter_next(&i); if (!ret) { rvt_get_qp(i.qp); rcu_read_unlock(); i.cb(i.qp, i.v); rcu_read_lock(); rvt_put_qp(i.qp); } } while (!ret); rcu_read_unlock(); } EXPORT_SYMBOL(rvt_qp_iter); /* * This should be called with s_lock and r_lock held. */ void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status) { u32 old_last, last; struct rvt_dev_info *rdi; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) return; rdi = ib_to_rvt(qp->ibqp.device); old_last = qp->s_last; trace_rvt_qp_send_completion(qp, wqe, old_last); last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode], status); if (qp->s_acked == old_last) qp->s_acked = last; if (qp->s_cur == old_last) qp->s_cur = last; if (qp->s_tail == old_last) qp->s_tail = last; if (qp->state == IB_QPS_SQD && last == qp->s_cur) qp->s_draining = 0; } EXPORT_SYMBOL(rvt_send_complete); /** * rvt_copy_sge - copy data to SGE memory * @qp: associated QP * @ss: the SGE state * @data: the data to copy * @length: the length of the data * @release: boolean to release MR * @copy_last: do a separate copy of the last 8 bytes */ void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, void *data, u32 length, bool release, bool copy_last) { struct rvt_sge *sge = &ss->sge; int i; bool in_last = false; bool cacheless_copy = false; struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); struct rvt_wss *wss = rdi->wss; unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode; if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) { cacheless_copy = length >= PAGE_SIZE; } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) { if (length >= PAGE_SIZE) { /* * NOTE: this *assumes*: * o The first vaddr is the dest. * o If multiple pages, then vaddr is sequential. */ wss_insert(wss, sge->vaddr); if (length >= (2 * PAGE_SIZE)) wss_insert(wss, (sge->vaddr + PAGE_SIZE)); cacheless_copy = wss_exceeds_threshold(wss); } else { wss_advance_clean_counter(wss); } } if (copy_last) { if (length > 8) { length -= 8; } else { copy_last = false; in_last = true; } } again: while (length) { u32 len = rvt_get_sge_length(sge, length); WARN_ON_ONCE(len == 0); if (unlikely(in_last)) { /* enforce byte transfer ordering */ for (i = 0; i < len; i++) ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i]; } else if (cacheless_copy) { cacheless_memcpy(sge->vaddr, data, len); } else { memcpy(sge->vaddr, data, len); } rvt_update_sge(ss, len, release); data += len; length -= len; } if (copy_last) { copy_last = false; in_last = true; length = 8; goto again; } } EXPORT_SYMBOL(rvt_copy_sge); static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp, struct rvt_qp *sqp) { rvp->n_pkt_drops++; /* * For RC, the requester would timeout and retry so * shortcut the timeouts and just signal too many retries. */ return sqp->ibqp.qp_type == IB_QPT_RC ? IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS; } /** * rvt_ruc_loopback - handle UC and RC loopback requests * @sqp: the sending QP * * This is called from rvt_do_send() to forward a WQE addressed to the same HFI * Note that although we are single threaded due to the send engine, we still * have to protect against post_send(). We don't have to worry about * receive interrupts since this is a connected protocol and all packets * will pass through here. */ void rvt_ruc_loopback(struct rvt_qp *sqp) { struct rvt_ibport *rvp = NULL; struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device); struct rvt_qp *qp; struct rvt_swqe *wqe; struct rvt_sge *sge; unsigned long flags; struct ib_wc wc; u64 sdata; atomic64_t *maddr; enum ib_wc_status send_status; bool release; int ret; bool copy_last = false; int local_ops = 0; rcu_read_lock(); rvp = rdi->ports[sqp->port_num - 1]; /* * Note that we check the responder QP state after * checking the requester's state. */ qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp, sqp->remote_qpn); spin_lock_irqsave(&sqp->s_lock, flags); /* Return if we are already busy processing a work request. */ if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) || !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND)) goto unlock; sqp->s_flags |= RVT_S_BUSY; again: if (sqp->s_last == READ_ONCE(sqp->s_head)) goto clr_busy; wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); /* Return if it is not OK to start a new work request. */ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) { if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND)) goto clr_busy; /* We are in the error state, flush the work request. */ send_status = IB_WC_WR_FLUSH_ERR; goto flush_send; } /* * We can rely on the entry not changing without the s_lock * being held until we update s_last. * We increment s_cur to indicate s_last is in progress. */ if (sqp->s_last == sqp->s_cur) { if (++sqp->s_cur >= sqp->s_size) sqp->s_cur = 0; } spin_unlock_irqrestore(&sqp->s_lock, flags); if (!qp) { send_status = loopback_qp_drop(rvp, sqp); goto serr_no_r_lock; } spin_lock_irqsave(&qp->r_lock, flags); if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) || qp->ibqp.qp_type != sqp->ibqp.qp_type) { send_status = loopback_qp_drop(rvp, sqp); goto serr; } memset(&wc, 0, sizeof(wc)); send_status = IB_WC_SUCCESS; release = true; sqp->s_sge.sge = wqe->sg_list[0]; sqp->s_sge.sg_list = wqe->sg_list + 1; sqp->s_sge.num_sge = wqe->wr.num_sge; sqp->s_len = wqe->length; switch (wqe->wr.opcode) { case IB_WR_REG_MR: goto send_comp; case IB_WR_LOCAL_INV: if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { if (rvt_invalidate_rkey(sqp, wqe->wr.ex.invalidate_rkey)) send_status = IB_WC_LOC_PROT_ERR; local_ops = 1; } goto send_comp; case IB_WR_SEND_WITH_INV: case IB_WR_SEND_WITH_IMM: case IB_WR_SEND: ret = rvt_get_rwqe(qp, false); if (ret < 0) goto op_err; if (!ret) goto rnr_nak; if (wqe->length > qp->r_len) goto inv_err; switch (wqe->wr.opcode) { case IB_WR_SEND_WITH_INV: if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) { wc.wc_flags = IB_WC_WITH_INVALIDATE; wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey; } break; case IB_WR_SEND_WITH_IMM: wc.wc_flags = IB_WC_WITH_IMM; wc.ex.imm_data = wqe->wr.ex.imm_data; break; default: break; } break; case IB_WR_RDMA_WRITE_WITH_IMM: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; wc.wc_flags = IB_WC_WITH_IMM; wc.ex.imm_data = wqe->wr.ex.imm_data; ret = rvt_get_rwqe(qp, true); if (ret < 0) goto op_err; if (!ret) goto rnr_nak; /* skip copy_last set and qp_access_flags recheck */ goto do_write; case IB_WR_RDMA_WRITE: copy_last = rvt_is_user_qp(qp); if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; do_write: if (wqe->length == 0) break; if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, wqe->rdma_wr.remote_addr, wqe->rdma_wr.rkey, IB_ACCESS_REMOTE_WRITE))) goto acc_err; qp->r_sge.sg_list = NULL; qp->r_sge.num_sge = 1; qp->r_sge.total_len = wqe->length; break; case IB_WR_RDMA_READ: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto inv_err; if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, wqe->rdma_wr.remote_addr, wqe->rdma_wr.rkey, IB_ACCESS_REMOTE_READ))) goto acc_err; release = false; sqp->s_sge.sg_list = NULL; sqp->s_sge.num_sge = 1; qp->r_sge.sge = wqe->sg_list[0]; qp->r_sge.sg_list = wqe->sg_list + 1; qp->r_sge.num_sge = wqe->wr.num_sge; qp->r_sge.total_len = wqe->length; break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1))) goto inv_err; if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), wqe->atomic_wr.remote_addr, wqe->atomic_wr.rkey, IB_ACCESS_REMOTE_ATOMIC))) goto acc_err; /* Perform atomic OP and save result. */ maddr = (atomic64_t *)qp->r_sge.sge.vaddr; sdata = wqe->atomic_wr.compare_add; *(u64 *)sqp->s_sge.sge.vaddr = (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? (u64)atomic64_add_return(sdata, maddr) - sdata : (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, sdata, wqe->atomic_wr.swap); rvt_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; goto send_comp; default: send_status = IB_WC_LOC_QP_OP_ERR; goto serr; } sge = &sqp->s_sge.sge; while (sqp->s_len) { u32 len = rvt_get_sge_length(sge, sqp->s_len); WARN_ON_ONCE(len == 0); rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, release, copy_last); rvt_update_sge(&sqp->s_sge, len, !release); sqp->s_len -= len; } if (release) rvt_put_ss(&qp->r_sge); if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) goto send_comp; if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; else wc.opcode = IB_WC_RECV; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; wc.src_qp = qp->remote_qpn; wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX; wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); wc.port_num = 1; /* Signal completion event if the solicited bit is set. */ rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED); send_comp: spin_unlock_irqrestore(&qp->r_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags); rvp->n_loop_pkts++; flush_send: sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; spin_lock(&sqp->r_lock); rvt_send_complete(sqp, wqe, send_status); spin_unlock(&sqp->r_lock); if (local_ops) { atomic_dec(&sqp->local_ops_pending); local_ops = 0; } goto again; rnr_nak: /* Handle RNR NAK */ if (qp->ibqp.qp_type == IB_QPT_UC) goto send_comp; rvp->n_rnr_naks++; /* * Note: we don't need the s_lock held since the BUSY flag * makes this single threaded. */ if (sqp->s_rnr_retry == 0) { send_status = IB_WC_RNR_RETRY_EXC_ERR; goto serr; } if (sqp->s_rnr_retry_cnt < 7) sqp->s_rnr_retry--; spin_unlock_irqrestore(&qp->r_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags); if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK)) goto clr_busy; rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer << IB_AETH_CREDIT_SHIFT); goto clr_busy; op_err: send_status = IB_WC_REM_OP_ERR; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; inv_err: send_status = sqp->ibqp.qp_type == IB_QPT_RC ? IB_WC_REM_INV_REQ_ERR : IB_WC_SUCCESS; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; acc_err: send_status = IB_WC_REM_ACCESS_ERR; wc.status = IB_WC_LOC_PROT_ERR; err: /* responder goes to error state */ rvt_rc_error(qp, wc.status); serr: spin_unlock_irqrestore(&qp->r_lock, flags); serr_no_r_lock: spin_lock_irqsave(&sqp->s_lock, flags); spin_lock(&sqp->r_lock); rvt_send_complete(sqp, wqe, send_status); spin_unlock(&sqp->r_lock); if (sqp->ibqp.qp_type == IB_QPT_RC) { int lastwqe; spin_lock(&sqp->r_lock); lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); spin_unlock(&sqp->r_lock); sqp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); if (lastwqe) { struct ib_event ev; ev.device = sqp->ibqp.device; ev.element.qp = &sqp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); } goto done; } clr_busy: sqp->s_flags &= ~RVT_S_BUSY; unlock: spin_unlock_irqrestore(&sqp->s_lock, flags); done: rcu_read_unlock(); } EXPORT_SYMBOL(rvt_ruc_loopback);
linux-master
drivers/infiniband/sw/rdmavt/qp.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 Intel Corporation. */ #include <linux/slab.h> #include <linux/sched.h> #include <linux/rculist.h> #include <rdma/rdma_vt.h> #include <rdma/rdmavt_qp.h> #include "mcast.h" /** * rvt_driver_mcast_init - init resources for multicast * @rdi: rvt dev struct * * This is per device that registers with rdmavt */ void rvt_driver_mcast_init(struct rvt_dev_info *rdi) { /* * Anything that needs setup for multicast on a per driver or per rdi * basis should be done in here. */ spin_lock_init(&rdi->n_mcast_grps_lock); } /** * rvt_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct * @qp: the QP to link */ static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp) { struct rvt_mcast_qp *mqp; mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); if (!mqp) goto bail; mqp->qp = qp; rvt_get_qp(qp); bail: return mqp; } static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp) { struct rvt_qp *qp = mqp->qp; /* Notify hfi1_destroy_qp() if it is waiting. */ rvt_put_qp(qp); kfree(mqp); } /** * rvt_mcast_alloc - allocate the multicast GID structure * @mgid: the multicast GID * @lid: the muilticast LID (host order) * * A list of QPs will be attached to this structure. */ static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid, u16 lid) { struct rvt_mcast *mcast; mcast = kzalloc(sizeof(*mcast), GFP_KERNEL); if (!mcast) goto bail; mcast->mcast_addr.mgid = *mgid; mcast->mcast_addr.lid = lid; INIT_LIST_HEAD(&mcast->qp_list); init_waitqueue_head(&mcast->wait); atomic_set(&mcast->refcount, 0); bail: return mcast; } static void rvt_mcast_free(struct rvt_mcast *mcast) { struct rvt_mcast_qp *p, *tmp; list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) rvt_mcast_qp_free(p); kfree(mcast); } /** * rvt_mcast_find - search the global table for the given multicast GID/LID * NOTE: It is valid to have 1 MLID with multiple MGIDs. It is not valid * to have 1 MGID with multiple MLIDs. * @ibp: the IB port structure * @mgid: the multicast GID to search for * @lid: the multicast LID portion of the multicast address (host order) * * The caller is responsible for decrementing the reference count if found. * * Return: NULL if not found. */ struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid, u16 lid) { struct rb_node *n; unsigned long flags; struct rvt_mcast *found = NULL; spin_lock_irqsave(&ibp->lock, flags); n = ibp->mcast_tree.rb_node; while (n) { int ret; struct rvt_mcast *mcast; mcast = rb_entry(n, struct rvt_mcast, rb_node); ret = memcmp(mgid->raw, mcast->mcast_addr.mgid.raw, sizeof(*mgid)); if (ret < 0) { n = n->rb_left; } else if (ret > 0) { n = n->rb_right; } else { /* MGID/MLID must match */ if (mcast->mcast_addr.lid == lid) { atomic_inc(&mcast->refcount); found = mcast; } break; } } spin_unlock_irqrestore(&ibp->lock, flags); return found; } EXPORT_SYMBOL(rvt_mcast_find); /* * rvt_mcast_add - insert mcast GID into table and attach QP struct * @mcast: the mcast GID table * @mqp: the QP to attach * * Return: zero if both were added. Return EEXIST if the GID was already in * the table but the QP was added. Return ESRCH if the QP was already * attached and neither structure was added. Return EINVAL if the MGID was * found, but the MLID did NOT match. */ static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp, struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp) { struct rb_node **n = &ibp->mcast_tree.rb_node; struct rb_node *pn = NULL; int ret; spin_lock_irq(&ibp->lock); while (*n) { struct rvt_mcast *tmcast; struct rvt_mcast_qp *p; pn = *n; tmcast = rb_entry(pn, struct rvt_mcast, rb_node); ret = memcmp(mcast->mcast_addr.mgid.raw, tmcast->mcast_addr.mgid.raw, sizeof(mcast->mcast_addr.mgid)); if (ret < 0) { n = &pn->rb_left; continue; } if (ret > 0) { n = &pn->rb_right; continue; } if (tmcast->mcast_addr.lid != mcast->mcast_addr.lid) { ret = EINVAL; goto bail; } /* Search the QP list to see if this is already there. */ list_for_each_entry_rcu(p, &tmcast->qp_list, list) { if (p->qp == mqp->qp) { ret = ESRCH; goto bail; } } if (tmcast->n_attached == rdi->dparms.props.max_mcast_qp_attach) { ret = ENOMEM; goto bail; } tmcast->n_attached++; list_add_tail_rcu(&mqp->list, &tmcast->qp_list); ret = EEXIST; goto bail; } spin_lock(&rdi->n_mcast_grps_lock); if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) { spin_unlock(&rdi->n_mcast_grps_lock); ret = ENOMEM; goto bail; } rdi->n_mcast_grps_allocated++; spin_unlock(&rdi->n_mcast_grps_lock); mcast->n_attached++; list_add_tail_rcu(&mqp->list, &mcast->qp_list); atomic_inc(&mcast->refcount); rb_link_node(&mcast->rb_node, pn, n); rb_insert_color(&mcast->rb_node, &ibp->mcast_tree); ret = 0; bail: spin_unlock_irq(&ibp->lock); return ret; } /** * rvt_attach_mcast - attach a qp to a multicast group * @ibqp: Infiniband qp * @gid: multicast guid * @lid: multicast lid * * Return: 0 on success */ int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1]; struct rvt_mcast *mcast; struct rvt_mcast_qp *mqp; int ret = -ENOMEM; if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) return -EINVAL; /* * Allocate data structures since its better to do this outside of * spin locks and it will most likely be needed. */ mcast = rvt_mcast_alloc(gid, lid); if (!mcast) return -ENOMEM; mqp = rvt_mcast_qp_alloc(qp); if (!mqp) goto bail_mcast; switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) { case ESRCH: /* Neither was used: OK to attach the same QP twice. */ ret = 0; goto bail_mqp; case EEXIST: /* The mcast wasn't used */ ret = 0; goto bail_mcast; case ENOMEM: /* Exceeded the maximum number of mcast groups. */ ret = -ENOMEM; goto bail_mqp; case EINVAL: /* Invalid MGID/MLID pair */ ret = -EINVAL; goto bail_mqp; default: break; } return 0; bail_mqp: rvt_mcast_qp_free(mqp); bail_mcast: rvt_mcast_free(mcast); return ret; } /** * rvt_detach_mcast - remove a qp from a multicast group * @ibqp: Infiniband qp * @gid: multicast guid * @lid: multicast lid * * Return: 0 on success */ int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1]; struct rvt_mcast *mcast = NULL; struct rvt_mcast_qp *p, *tmp, *delp = NULL; struct rb_node *n; int last = 0; int ret = 0; if (ibqp->qp_num <= 1) return -EINVAL; spin_lock_irq(&ibp->lock); /* Find the GID in the mcast table. */ n = ibp->mcast_tree.rb_node; while (1) { if (!n) { spin_unlock_irq(&ibp->lock); return -EINVAL; } mcast = rb_entry(n, struct rvt_mcast, rb_node); ret = memcmp(gid->raw, mcast->mcast_addr.mgid.raw, sizeof(*gid)); if (ret < 0) { n = n->rb_left; } else if (ret > 0) { n = n->rb_right; } else { /* MGID/MLID must match */ if (mcast->mcast_addr.lid != lid) { spin_unlock_irq(&ibp->lock); return -EINVAL; } break; } } /* Search the QP list. */ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) { if (p->qp != qp) continue; /* * We found it, so remove it, but don't poison the forward * link until we are sure there are no list walkers. */ list_del_rcu(&p->list); mcast->n_attached--; delp = p; /* If this was the last attached QP, remove the GID too. */ if (list_empty(&mcast->qp_list)) { rb_erase(&mcast->rb_node, &ibp->mcast_tree); last = 1; } break; } spin_unlock_irq(&ibp->lock); /* QP not attached */ if (!delp) return -EINVAL; /* * Wait for any list walkers to finish before freeing the * list element. */ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); rvt_mcast_qp_free(delp); if (last) { atomic_dec(&mcast->refcount); wait_event(mcast->wait, !atomic_read(&mcast->refcount)); rvt_mcast_free(mcast); spin_lock_irq(&rdi->n_mcast_grps_lock); rdi->n_mcast_grps_allocated--; spin_unlock_irq(&rdi->n_mcast_grps_lock); } return 0; } /** * rvt_mcast_tree_empty - determine if any qps are attached to any mcast group * @rdi: rvt dev struct * * Return: in use count */ int rvt_mcast_tree_empty(struct rvt_dev_info *rdi) { int i; int in_use = 0; for (i = 0; i < rdi->dparms.nports; i++) if (rdi->ports[i]->mcast_tree.rb_node) in_use++; return in_use; }
linux-master
drivers/infiniband/sw/rdmavt/mcast.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 - 2018 Intel Corporation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/dma-mapping.h> #include "vt.h" #include "cq.h" #include "trace.h" #define RVT_UVERBS_ABI_VERSION 2 MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("RDMA Verbs Transport Library"); static int __init rvt_init(void) { int ret = rvt_driver_cq_init(); if (ret) pr_err("Error in driver CQ init.\n"); return ret; } module_init(rvt_init); static void __exit rvt_cleanup(void) { rvt_cq_exit(); } module_exit(rvt_cleanup); /** * rvt_alloc_device - allocate rdi * @size: how big of a structure to allocate * @nports: number of ports to allocate array slots for * * Use IB core device alloc to allocate space for the rdi which is assumed to be * inside of the ib_device. Any extra space that drivers require should be * included in size. * * We also allocate a port array based on the number of ports. * * Return: pointer to allocated rdi */ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports) { struct rvt_dev_info *rdi; rdi = container_of(_ib_alloc_device(size), struct rvt_dev_info, ibdev); if (!rdi) return rdi; rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL); if (!rdi->ports) ib_dealloc_device(&rdi->ibdev); return rdi; } EXPORT_SYMBOL(rvt_alloc_device); /** * rvt_dealloc_device - deallocate rdi * @rdi: structure to free * * Free a structure allocated with rvt_alloc_device() */ void rvt_dealloc_device(struct rvt_dev_info *rdi) { kfree(rdi->ports); ib_dealloc_device(&rdi->ibdev); } EXPORT_SYMBOL(rvt_dealloc_device); static int rvt_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); if (uhw->inlen || uhw->outlen) return -EINVAL; /* * Return rvt_dev_info.dparms.props contents */ *props = rdi->dparms.props; return 0; } static int rvt_get_numa_node(struct ib_device *ibdev) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); return rdi->dparms.node; } static int rvt_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) { /* * There is currently no need to supply this based on qib and hfi1. * Future drivers may need to implement this though. */ return -EOPNOTSUPP; } /** * rvt_query_port - Passes the query port call to the driver * @ibdev: Verbs IB dev * @port_num: port number, 1 based from ib core * @props: structure to hold returned properties * * Return: 0 on success */ static int rvt_query_port(struct ib_device *ibdev, u32 port_num, struct ib_port_attr *props) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); struct rvt_ibport *rvp; u32 port_index = ibport_num_to_idx(ibdev, port_num); rvp = rdi->ports[port_index]; /* props being zeroed by the caller, avoid zeroing it here */ props->sm_lid = rvp->sm_lid; props->sm_sl = rvp->sm_sl; props->port_cap_flags = rvp->port_cap_flags; props->max_msg_sz = 0x80000000; props->pkey_tbl_len = rvt_get_npkeys(rdi); props->bad_pkey_cntr = rvp->pkey_violations; props->qkey_viol_cntr = rvp->qkey_violations; props->subnet_timeout = rvp->subnet_timeout; props->init_type_reply = 0; /* Populate the remaining ib_port_attr elements */ return rdi->driver_f.query_port_state(rdi, port_num, props); } /** * rvt_modify_port - modify port * @ibdev: Verbs IB dev * @port_num: Port number, 1 based from ib core * @port_modify_mask: How to change the port * @props: Structure to fill in * * Return: 0 on success */ static int rvt_modify_port(struct ib_device *ibdev, u32 port_num, int port_modify_mask, struct ib_port_modify *props) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); struct rvt_ibport *rvp; int ret = 0; u32 port_index = ibport_num_to_idx(ibdev, port_num); rvp = rdi->ports[port_index]; if (port_modify_mask & IB_PORT_OPA_MASK_CHG) { rvp->port_cap3_flags |= props->set_port_cap_mask; rvp->port_cap3_flags &= ~props->clr_port_cap_mask; } else { rvp->port_cap_flags |= props->set_port_cap_mask; rvp->port_cap_flags &= ~props->clr_port_cap_mask; } if (props->set_port_cap_mask || props->clr_port_cap_mask) rdi->driver_f.cap_mask_chg(rdi, port_num); if (port_modify_mask & IB_PORT_SHUTDOWN) ret = rdi->driver_f.shut_down_port(rdi, port_num); if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) rvp->qkey_violations = 0; return ret; } /** * rvt_query_pkey - Return a pkey from the table at a given index * @ibdev: Verbs IB dev * @port_num: Port number, 1 based from ib core * @index: Index into pkey table * @pkey: returned pkey from the port pkey table * * Return: 0 on failure pkey otherwise */ static int rvt_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index, u16 *pkey) { /* * Driver will be responsible for keeping rvt_dev_info.pkey_table up to * date. This function will just return that value. There is no need to * lock, if a stale value is read and sent to the user so be it there is * no way to protect against that anyway. */ struct rvt_dev_info *rdi = ib_to_rvt(ibdev); u32 port_index; port_index = ibport_num_to_idx(ibdev, port_num); if (index >= rvt_get_npkeys(rdi)) return -EINVAL; *pkey = rvt_get_pkey(rdi, port_index, index); return 0; } /** * rvt_query_gid - Return a gid from the table * @ibdev: Verbs IB dev * @port_num: Port number, 1 based from ib core * @guid_index: Index in table * @gid: Gid to return * * Return: 0 on success */ static int rvt_query_gid(struct ib_device *ibdev, u32 port_num, int guid_index, union ib_gid *gid) { struct rvt_dev_info *rdi; struct rvt_ibport *rvp; u32 port_index; /* * Driver is responsible for updating the guid table. Which will be used * to craft the return value. This will work similar to how query_pkey() * is being done. */ port_index = ibport_num_to_idx(ibdev, port_num); rdi = ib_to_rvt(ibdev); rvp = rdi->ports[port_index]; gid->global.subnet_prefix = rvp->gid_prefix; return rdi->driver_f.get_guid_be(rdi, rvp, guid_index, &gid->global.interface_id); } /** * rvt_alloc_ucontext - Allocate a user context * @uctx: Verbs context * @udata: User data allocated */ static int rvt_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { return 0; } /** * rvt_dealloc_ucontext - Free a user context * @context: Unused */ static void rvt_dealloc_ucontext(struct ib_ucontext *context) { return; } static int rvt_get_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); struct ib_port_attr attr; int err; immutable->core_cap_flags = rdi->dparms.core_cap_flags; err = ib_query_port(ibdev, port_num, &attr); if (err) return err; immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; immutable->max_mad_size = rdi->dparms.max_mad_size; return 0; } enum { MISC, QUERY_DEVICE, MODIFY_DEVICE, QUERY_PORT, MODIFY_PORT, QUERY_PKEY, QUERY_GID, ALLOC_UCONTEXT, DEALLOC_UCONTEXT, GET_PORT_IMMUTABLE, CREATE_QP, MODIFY_QP, DESTROY_QP, QUERY_QP, POST_SEND, POST_RECV, POST_SRQ_RECV, CREATE_AH, DESTROY_AH, MODIFY_AH, QUERY_AH, CREATE_SRQ, MODIFY_SRQ, DESTROY_SRQ, QUERY_SRQ, ATTACH_MCAST, DETACH_MCAST, GET_DMA_MR, REG_USER_MR, DEREG_MR, ALLOC_MR, MAP_MR_SG, ALLOC_FMR, MAP_PHYS_FMR, UNMAP_FMR, DEALLOC_FMR, MMAP, CREATE_CQ, DESTROY_CQ, POLL_CQ, REQ_NOTFIY_CQ, RESIZE_CQ, ALLOC_PD, DEALLOC_PD, _VERB_IDX_MAX /* Must always be last! */ }; static const struct ib_device_ops rvt_dev_ops = { .uverbs_abi_ver = RVT_UVERBS_ABI_VERSION, .alloc_mr = rvt_alloc_mr, .alloc_pd = rvt_alloc_pd, .alloc_ucontext = rvt_alloc_ucontext, .attach_mcast = rvt_attach_mcast, .create_ah = rvt_create_ah, .create_cq = rvt_create_cq, .create_qp = rvt_create_qp, .create_srq = rvt_create_srq, .create_user_ah = rvt_create_ah, .dealloc_pd = rvt_dealloc_pd, .dealloc_ucontext = rvt_dealloc_ucontext, .dereg_mr = rvt_dereg_mr, .destroy_ah = rvt_destroy_ah, .destroy_cq = rvt_destroy_cq, .destroy_qp = rvt_destroy_qp, .destroy_srq = rvt_destroy_srq, .detach_mcast = rvt_detach_mcast, .get_dma_mr = rvt_get_dma_mr, .get_numa_node = rvt_get_numa_node, .get_port_immutable = rvt_get_port_immutable, .map_mr_sg = rvt_map_mr_sg, .mmap = rvt_mmap, .modify_ah = rvt_modify_ah, .modify_device = rvt_modify_device, .modify_port = rvt_modify_port, .modify_qp = rvt_modify_qp, .modify_srq = rvt_modify_srq, .poll_cq = rvt_poll_cq, .post_recv = rvt_post_recv, .post_send = rvt_post_send, .post_srq_recv = rvt_post_srq_recv, .query_ah = rvt_query_ah, .query_device = rvt_query_device, .query_gid = rvt_query_gid, .query_pkey = rvt_query_pkey, .query_port = rvt_query_port, .query_qp = rvt_query_qp, .query_srq = rvt_query_srq, .reg_user_mr = rvt_reg_user_mr, .req_notify_cq = rvt_req_notify_cq, .resize_cq = rvt_resize_cq, INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_qp, rvt_qp, ibqp), INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext), }; static noinline int check_support(struct rvt_dev_info *rdi, int verb) { switch (verb) { case MISC: /* * These functions are not part of verbs specifically but are * required for rdmavt to function. */ if ((!rdi->ibdev.ops.port_groups) || (!rdi->driver_f.get_pci_dev)) return -EINVAL; break; case MODIFY_DEVICE: /* * rdmavt does not support modify device currently drivers must * provide. */ if (!rdi->ibdev.ops.modify_device) return -EOPNOTSUPP; break; case QUERY_PORT: if (!rdi->ibdev.ops.query_port) if (!rdi->driver_f.query_port_state) return -EINVAL; break; case MODIFY_PORT: if (!rdi->ibdev.ops.modify_port) if (!rdi->driver_f.cap_mask_chg || !rdi->driver_f.shut_down_port) return -EINVAL; break; case QUERY_GID: if (!rdi->ibdev.ops.query_gid) if (!rdi->driver_f.get_guid_be) return -EINVAL; break; case CREATE_QP: if (!rdi->ibdev.ops.create_qp) if (!rdi->driver_f.qp_priv_alloc || !rdi->driver_f.qp_priv_free || !rdi->driver_f.notify_qp_reset || !rdi->driver_f.flush_qp_waiters || !rdi->driver_f.stop_send_queue || !rdi->driver_f.quiesce_qp) return -EINVAL; break; case MODIFY_QP: if (!rdi->ibdev.ops.modify_qp) if (!rdi->driver_f.notify_qp_reset || !rdi->driver_f.schedule_send || !rdi->driver_f.get_pmtu_from_attr || !rdi->driver_f.flush_qp_waiters || !rdi->driver_f.stop_send_queue || !rdi->driver_f.quiesce_qp || !rdi->driver_f.notify_error_qp || !rdi->driver_f.mtu_from_qp || !rdi->driver_f.mtu_to_path_mtu) return -EINVAL; break; case DESTROY_QP: if (!rdi->ibdev.ops.destroy_qp) if (!rdi->driver_f.qp_priv_free || !rdi->driver_f.notify_qp_reset || !rdi->driver_f.flush_qp_waiters || !rdi->driver_f.stop_send_queue || !rdi->driver_f.quiesce_qp) return -EINVAL; break; case POST_SEND: if (!rdi->ibdev.ops.post_send) if (!rdi->driver_f.schedule_send || !rdi->driver_f.do_send || !rdi->post_parms) return -EINVAL; break; } return 0; } /** * rvt_register_device - register a driver * @rdi: main dev structure for all of rdmavt operations * * It is up to drivers to allocate the rdi and fill in the appropriate * information. * * Return: 0 on success otherwise an errno. */ int rvt_register_device(struct rvt_dev_info *rdi) { int ret = 0, i; if (!rdi) return -EINVAL; /* * Check to ensure drivers have setup the required helpers for the verbs * they want rdmavt to handle */ for (i = 0; i < _VERB_IDX_MAX; i++) if (check_support(rdi, i)) { pr_err("Driver support req not met at %d\n", i); return -EINVAL; } ib_set_device_ops(&rdi->ibdev, &rvt_dev_ops); /* Once we get past here we can use rvt_pr macros and tracepoints */ trace_rvt_dbg(rdi, "Driver attempting registration"); rvt_mmap_init(rdi); /* Queue Pairs */ ret = rvt_driver_qp_init(rdi); if (ret) { pr_err("Error in driver QP init.\n"); return -EINVAL; } /* Address Handle */ spin_lock_init(&rdi->n_ahs_lock); rdi->n_ahs_allocated = 0; /* Shared Receive Queue */ rvt_driver_srq_init(rdi); /* Multicast */ rvt_driver_mcast_init(rdi); /* Mem Region */ ret = rvt_driver_mr_init(rdi); if (ret) { pr_err("Error in driver MR init.\n"); goto bail_no_mr; } /* Memory Working Set Size */ ret = rvt_wss_init(rdi); if (ret) { rvt_pr_err(rdi, "Error in WSS init.\n"); goto bail_mr; } /* Completion queues */ spin_lock_init(&rdi->n_cqs_lock); /* Protection Domain */ spin_lock_init(&rdi->n_pds_lock); rdi->n_pds_allocated = 0; /* * There are some things which could be set by underlying drivers but * really should be up to rdmavt to set. For instance drivers can't know * exactly which functions rdmavt supports, nor do they know the ABI * version, so we do all of this sort of stuff here. */ rdi->ibdev.uverbs_cmd_mask |= (1ull << IB_USER_VERBS_CMD_POLL_CQ) | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | (1ull << IB_USER_VERBS_CMD_POST_SEND) | (1ull << IB_USER_VERBS_CMD_POST_RECV) | (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); rdi->ibdev.node_type = RDMA_NODE_IB_CA; if (!rdi->ibdev.num_comp_vectors) rdi->ibdev.num_comp_vectors = 1; /* We are now good to announce we exist */ ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev), NULL); if (ret) { rvt_pr_err(rdi, "Failed to register driver with ib core.\n"); goto bail_wss; } rvt_create_mad_agents(rdi); rvt_pr_info(rdi, "Registration with rdmavt done.\n"); return ret; bail_wss: rvt_wss_exit(rdi); bail_mr: rvt_mr_exit(rdi); bail_no_mr: rvt_qp_exit(rdi); return ret; } EXPORT_SYMBOL(rvt_register_device); /** * rvt_unregister_device - remove a driver * @rdi: rvt dev struct */ void rvt_unregister_device(struct rvt_dev_info *rdi) { trace_rvt_dbg(rdi, "Driver is unregistering."); if (!rdi) return; rvt_free_mad_agents(rdi); ib_unregister_device(&rdi->ibdev); rvt_wss_exit(rdi); rvt_mr_exit(rdi); rvt_qp_exit(rdi); } EXPORT_SYMBOL(rvt_unregister_device); /** * rvt_init_port - init internal data for driver port * @rdi: rvt_dev_info struct * @port: rvt port * @port_index: 0 based index of ports, different from IB core port num * @pkey_table: pkey_table for @port * * Keep track of a list of ports. No need to have a detach port. * They persist until the driver goes away. * * Return: always 0 */ int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, int port_index, u16 *pkey_table) { rdi->ports[port_index] = port; rdi->ports[port_index]->pkey_table = pkey_table; return 0; } EXPORT_SYMBOL(rvt_init_port);
linux-master
drivers/infiniband/sw/rdmavt/vt.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 - 2018 Intel Corporation. */ #include <linux/slab.h> #include <linux/vmalloc.h> #include "cq.h" #include "vt.h" #include "trace.h" static struct workqueue_struct *comp_vector_wq; /** * rvt_cq_enter - add a new entry to the completion queue * @cq: completion queue * @entry: work completion entry to add * @solicited: true if @entry is solicited * * This may be called with qp->s_lock held. * * Return: return true on success, else return * false if cq is full. */ bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) { struct ib_uverbs_wc *uqueue = NULL; struct ib_wc *kqueue = NULL; struct rvt_cq_wc *u_wc = NULL; struct rvt_k_cq_wc *k_wc = NULL; unsigned long flags; u32 head; u32 next; u32 tail; spin_lock_irqsave(&cq->lock, flags); if (cq->ip) { u_wc = cq->queue; uqueue = &u_wc->uqueue[0]; head = RDMA_READ_UAPI_ATOMIC(u_wc->head); tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail); } else { k_wc = cq->kqueue; kqueue = &k_wc->kqueue[0]; head = k_wc->head; tail = k_wc->tail; } /* * Note that the head pointer might be writable by * user processes.Take care to verify it is a sane value. */ if (head >= (unsigned)cq->ibcq.cqe) { head = cq->ibcq.cqe; next = 0; } else { next = head + 1; } if (unlikely(next == tail || cq->cq_full)) { struct rvt_dev_info *rdi = cq->rdi; if (!cq->cq_full) rvt_pr_err_ratelimited(rdi, "CQ is full!\n"); cq->cq_full = true; spin_unlock_irqrestore(&cq->lock, flags); if (cq->ibcq.event_handler) { struct ib_event ev; ev.device = cq->ibcq.device; ev.element.cq = &cq->ibcq; ev.event = IB_EVENT_CQ_ERR; cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); } return false; } trace_rvt_cq_enter(cq, entry, head); if (uqueue) { uqueue[head].wr_id = entry->wr_id; uqueue[head].status = entry->status; uqueue[head].opcode = entry->opcode; uqueue[head].vendor_err = entry->vendor_err; uqueue[head].byte_len = entry->byte_len; uqueue[head].ex.imm_data = entry->ex.imm_data; uqueue[head].qp_num = entry->qp->qp_num; uqueue[head].src_qp = entry->src_qp; uqueue[head].wc_flags = entry->wc_flags; uqueue[head].pkey_index = entry->pkey_index; uqueue[head].slid = ib_lid_cpu16(entry->slid); uqueue[head].sl = entry->sl; uqueue[head].dlid_path_bits = entry->dlid_path_bits; uqueue[head].port_num = entry->port_num; /* Make sure entry is written before the head index. */ RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next); } else { kqueue[head] = *entry; k_wc->head = next; } if (cq->notify == IB_CQ_NEXT_COMP || (cq->notify == IB_CQ_SOLICITED && (solicited || entry->status != IB_WC_SUCCESS))) { /* * This will cause send_complete() to be called in * another thread. */ cq->notify = RVT_CQ_NONE; cq->triggered++; queue_work_on(cq->comp_vector_cpu, comp_vector_wq, &cq->comptask); } spin_unlock_irqrestore(&cq->lock, flags); return true; } EXPORT_SYMBOL(rvt_cq_enter); static void send_complete(struct work_struct *work) { struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask); /* * The completion handler will most likely rearm the notification * and poll for all pending entries. If a new completion entry * is added while we are in this routine, queue_work() * won't call us again until we return so we check triggered to * see if we need to call the handler again. */ for (;;) { u8 triggered = cq->triggered; /* * IPoIB connected mode assumes the callback is from a * soft IRQ. We simulate this by blocking "bottom halves". * See the implementation for ipoib_cm_handle_tx_wc(), * netif_tx_lock_bh() and netif_tx_lock(). */ local_bh_disable(); cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); local_bh_enable(); if (cq->triggered == triggered) return; } } /** * rvt_create_cq - create a completion queue * @ibcq: Allocated CQ * @attr: creation attributes * @udata: user data for libibverbs.so * * Called by ib_create_cq() in the generic verbs code. * * Return: 0 on success */ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { struct ib_device *ibdev = ibcq->device; struct rvt_dev_info *rdi = ib_to_rvt(ibdev); struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); struct rvt_cq_wc *u_wc = NULL; struct rvt_k_cq_wc *k_wc = NULL; u32 sz; unsigned int entries = attr->cqe; int comp_vector = attr->comp_vector; int err; if (attr->flags) return -EOPNOTSUPP; if (entries < 1 || entries > rdi->dparms.props.max_cqe) return -EINVAL; if (comp_vector < 0) comp_vector = 0; comp_vector = comp_vector % rdi->ibdev.num_comp_vectors; /* * Allocate the completion queue entries and head/tail pointers. * This is allocated separately so that it can be resized and * also mapped into user space. * We need to use vmalloc() in order to support mmap and large * numbers of entries. */ if (udata && udata->outlen >= sizeof(__u64)) { sz = sizeof(struct ib_uverbs_wc) * (entries + 1); sz += sizeof(*u_wc); u_wc = vmalloc_user(sz); if (!u_wc) return -ENOMEM; } else { sz = sizeof(struct ib_wc) * (entries + 1); sz += sizeof(*k_wc); k_wc = vzalloc_node(sz, rdi->dparms.node); if (!k_wc) return -ENOMEM; } /* * Return the address of the WC as the offset to mmap. * See rvt_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc); if (IS_ERR(cq->ip)) { err = PTR_ERR(cq->ip); goto bail_wc; } err = ib_copy_to_udata(udata, &cq->ip->offset, sizeof(cq->ip->offset)); if (err) goto bail_ip; } spin_lock_irq(&rdi->n_cqs_lock); if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) { spin_unlock_irq(&rdi->n_cqs_lock); err = -ENOMEM; goto bail_ip; } rdi->n_cqs_allocated++; spin_unlock_irq(&rdi->n_cqs_lock); if (cq->ip) { spin_lock_irq(&rdi->pending_lock); list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps); spin_unlock_irq(&rdi->pending_lock); } /* * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. * The number of entries should be >= the number requested or return * an error. */ cq->rdi = rdi; if (rdi->driver_f.comp_vect_cpu_lookup) cq->comp_vector_cpu = rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector); else cq->comp_vector_cpu = cpumask_first(cpumask_of_node(rdi->dparms.node)); cq->ibcq.cqe = entries; cq->notify = RVT_CQ_NONE; spin_lock_init(&cq->lock); INIT_WORK(&cq->comptask, send_complete); if (u_wc) cq->queue = u_wc; else cq->kqueue = k_wc; trace_rvt_create_cq(cq, attr); return 0; bail_ip: kfree(cq->ip); bail_wc: vfree(u_wc); vfree(k_wc); return err; } /** * rvt_destroy_cq - destroy a completion queue * @ibcq: the completion queue to destroy. * @udata: user data or NULL for kernel object * * Called by ib_destroy_cq() in the generic verbs code. */ int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); struct rvt_dev_info *rdi = cq->rdi; flush_work(&cq->comptask); spin_lock_irq(&rdi->n_cqs_lock); rdi->n_cqs_allocated--; spin_unlock_irq(&rdi->n_cqs_lock); if (cq->ip) kref_put(&cq->ip->ref, rvt_release_mmap_info); else vfree(cq->kqueue); return 0; } /** * rvt_req_notify_cq - change the notification type for a completion queue * @ibcq: the completion queue * @notify_flags: the type of notification to request * * This may be called from interrupt context. Also called by * ib_req_notify_cq() in the generic verbs code. * * Return: 0 for success. */ int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) { struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); unsigned long flags; int ret = 0; spin_lock_irqsave(&cq->lock, flags); /* * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). */ if (cq->notify != IB_CQ_NEXT_COMP) cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) { if (cq->queue) { if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) != RDMA_READ_UAPI_ATOMIC(cq->queue->tail)) ret = 1; } else { if (cq->kqueue->head != cq->kqueue->tail) ret = 1; } } spin_unlock_irqrestore(&cq->lock, flags); return ret; } /* * rvt_resize_cq - change the size of the CQ * @ibcq: the completion queue * * Return: 0 for success. */ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) { struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); u32 head, tail, n; int ret; u32 sz; struct rvt_dev_info *rdi = cq->rdi; struct rvt_cq_wc *u_wc = NULL; struct rvt_cq_wc *old_u_wc = NULL; struct rvt_k_cq_wc *k_wc = NULL; struct rvt_k_cq_wc *old_k_wc = NULL; if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) return -EINVAL; /* * Need to use vmalloc() if we want to support large #s of entries. */ if (udata && udata->outlen >= sizeof(__u64)) { sz = sizeof(struct ib_uverbs_wc) * (cqe + 1); sz += sizeof(*u_wc); u_wc = vmalloc_user(sz); if (!u_wc) return -ENOMEM; } else { sz = sizeof(struct ib_wc) * (cqe + 1); sz += sizeof(*k_wc); k_wc = vzalloc_node(sz, rdi->dparms.node); if (!k_wc) return -ENOMEM; } /* Check that we can write the offset to mmap. */ if (udata && udata->outlen >= sizeof(__u64)) { __u64 offset = 0; ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); if (ret) goto bail_free; } spin_lock_irq(&cq->lock); /* * Make sure head and tail are sane since they * might be user writable. */ if (u_wc) { old_u_wc = cq->queue; head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head); tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail); } else { old_k_wc = cq->kqueue; head = old_k_wc->head; tail = old_k_wc->tail; } if (head > (u32)cq->ibcq.cqe) head = (u32)cq->ibcq.cqe; if (tail > (u32)cq->ibcq.cqe) tail = (u32)cq->ibcq.cqe; if (head < tail) n = cq->ibcq.cqe + 1 + head - tail; else n = head - tail; if (unlikely((u32)cqe < n)) { ret = -EINVAL; goto bail_unlock; } for (n = 0; tail != head; n++) { if (u_wc) u_wc->uqueue[n] = old_u_wc->uqueue[tail]; else k_wc->kqueue[n] = old_k_wc->kqueue[tail]; if (tail == (u32)cq->ibcq.cqe) tail = 0; else tail++; } cq->ibcq.cqe = cqe; if (u_wc) { RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n); RDMA_WRITE_UAPI_ATOMIC(u_wc->tail, 0); cq->queue = u_wc; } else { k_wc->head = n; k_wc->tail = 0; cq->kqueue = k_wc; } spin_unlock_irq(&cq->lock); if (u_wc) vfree(old_u_wc); else vfree(old_k_wc); if (cq->ip) { struct rvt_mmap_info *ip = cq->ip; rvt_update_mmap_info(rdi, ip, sz, u_wc); /* * Return the offset to mmap. * See rvt_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { ret = ib_copy_to_udata(udata, &ip->offset, sizeof(ip->offset)); if (ret) return ret; } spin_lock_irq(&rdi->pending_lock); if (list_empty(&ip->pending_mmaps)) list_add(&ip->pending_mmaps, &rdi->pending_mmaps); spin_unlock_irq(&rdi->pending_lock); } return 0; bail_unlock: spin_unlock_irq(&cq->lock); bail_free: vfree(u_wc); vfree(k_wc); return ret; } /** * rvt_poll_cq - poll for work completion entries * @ibcq: the completion queue to poll * @num_entries: the maximum number of entries to return * @entry: pointer to array where work completions are placed * * This may be called from interrupt context. Also called by ib_poll_cq() * in the generic verbs code. * * Return: the number of completion entries polled. */ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) { struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); struct rvt_k_cq_wc *wc; unsigned long flags; int npolled; u32 tail; /* The kernel can only poll a kernel completion queue */ if (cq->ip) return -EINVAL; spin_lock_irqsave(&cq->lock, flags); wc = cq->kqueue; tail = wc->tail; if (tail > (u32)cq->ibcq.cqe) tail = (u32)cq->ibcq.cqe; for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { if (tail == wc->head) break; /* The kernel doesn't need a RMB since it has the lock. */ trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled); *entry = wc->kqueue[tail]; if (tail >= cq->ibcq.cqe) tail = 0; else tail++; } wc->tail = tail; spin_unlock_irqrestore(&cq->lock, flags); return npolled; } /** * rvt_driver_cq_init - Init cq resources on behalf of driver * * Return: 0 on success */ int rvt_driver_cq_init(void) { comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE, 0, "rdmavt_cq"); if (!comp_vector_wq) return -ENOMEM; return 0; } /** * rvt_cq_exit - tear down cq reources */ void rvt_cq_exit(void) { destroy_workqueue(comp_vector_wq); comp_vector_wq = NULL; }
linux-master
drivers/infiniband/sw/rdmavt/cq.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 Intel Corporation. */ #include <linux/slab.h> #include "pd.h" /** * rvt_alloc_pd - allocate a protection domain * @ibpd: PD * @udata: optional user data * * Allocate and keep track of a PD. * * Return: 0 on success */ int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct rvt_dev_info *dev = ib_to_rvt(ibdev); struct rvt_pd *pd = ibpd_to_rvtpd(ibpd); int ret = 0; /* * While we could continue allocating protecetion domains, being * constrained only by system resources. The IBTA spec defines that * there is a max_pd limit that can be set and we need to check for * that. */ spin_lock(&dev->n_pds_lock); if (dev->n_pds_allocated == dev->dparms.props.max_pd) { spin_unlock(&dev->n_pds_lock); ret = -ENOMEM; goto bail; } dev->n_pds_allocated++; spin_unlock(&dev->n_pds_lock); /* ib_alloc_pd() will initialize pd->ibpd. */ pd->user = !!udata; bail: return ret; } /** * rvt_dealloc_pd - Free PD * @ibpd: Free up PD * @udata: Valid user data or NULL for kernel object * * Return: always 0 */ int rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); spin_lock(&dev->n_pds_lock); dev->n_pds_allocated--; spin_unlock(&dev->n_pds_lock); return 0; }
linux-master
drivers/infiniband/sw/rdmavt/pd.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 Intel Corporation. */ #include <linux/err.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <rdma/uverbs_ioctl.h> #include "srq.h" #include "vt.h" #include "qp.h" /** * rvt_driver_srq_init - init srq resources on a per driver basis * @rdi: rvt dev structure * * Do any initialization needed when a driver registers with rdmavt. */ void rvt_driver_srq_init(struct rvt_dev_info *rdi) { spin_lock_init(&rdi->n_srqs_lock); rdi->n_srqs_allocated = 0; } /** * rvt_create_srq - create a shared receive queue * @ibsrq: the protection domain of the SRQ to create * @srq_init_attr: the attributes of the SRQ * @udata: data from libibverbs when creating a user SRQ * * Return: 0 on success */ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata) { struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); u32 sz; int ret; if (srq_init_attr->srq_type != IB_SRQT_BASIC) return -EOPNOTSUPP; if (srq_init_attr->attr.max_sge == 0 || srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge || srq_init_attr->attr.max_wr == 0 || srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr) return -EINVAL; /* * Need to use vmalloc() if we want to support large #s of entries. */ srq->rq.size = srq_init_attr->attr.max_wr + 1; srq->rq.max_sge = srq_init_attr->attr.max_sge; sz = sizeof(struct ib_sge) * srq->rq.max_sge + sizeof(struct rvt_rwqe); if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz, dev->dparms.node, udata)) { ret = -ENOMEM; goto bail_srq; } /* * Return the address of the RWQ as the offset to mmap. * See rvt_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); if (IS_ERR(srq->ip)) { ret = PTR_ERR(srq->ip); goto bail_wq; } ret = ib_copy_to_udata(udata, &srq->ip->offset, sizeof(srq->ip->offset)); if (ret) goto bail_ip; } /* * ib_create_srq() will initialize srq->ibsrq. */ spin_lock_init(&srq->rq.lock); srq->limit = srq_init_attr->attr.srq_limit; spin_lock(&dev->n_srqs_lock); if (dev->n_srqs_allocated == dev->dparms.props.max_srq) { spin_unlock(&dev->n_srqs_lock); ret = -ENOMEM; goto bail_ip; } dev->n_srqs_allocated++; spin_unlock(&dev->n_srqs_lock); if (srq->ip) { spin_lock_irq(&dev->pending_lock); list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } return 0; bail_ip: kfree(srq->ip); bail_wq: rvt_free_rq(&srq->rq); bail_srq: return ret; } /** * rvt_modify_srq - modify a shared receive queue * @ibsrq: the SRQ to modify * @attr: the new attributes of the SRQ * @attr_mask: indicates which attributes to modify * @udata: user data for libibverbs.so * * Return: 0 on success */ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); struct rvt_rq tmp_rq = {}; int ret = 0; if (attr_mask & IB_SRQ_MAX_WR) { struct rvt_krwq *okwq = NULL; struct rvt_rwq *owq = NULL; struct rvt_rwqe *p; u32 sz, size, n, head, tail; /* Check that the requested sizes are below the limits. */ if ((attr->max_wr > dev->dparms.props.max_srq_wr) || ((attr_mask & IB_SRQ_LIMIT) ? attr->srq_limit : srq->limit) > attr->max_wr) return -EINVAL; sz = sizeof(struct rvt_rwqe) + srq->rq.max_sge * sizeof(struct ib_sge); size = attr->max_wr + 1; if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node, udata)) return -ENOMEM; /* Check that we can write the offset to mmap. */ if (udata && udata->inlen >= sizeof(__u64)) { __u64 offset_addr; __u64 offset = 0; ret = ib_copy_from_udata(&offset_addr, udata, sizeof(offset_addr)); if (ret) goto bail_free; udata->outbuf = (void __user *) (unsigned long)offset_addr; ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); if (ret) goto bail_free; } spin_lock_irq(&srq->rq.kwq->c_lock); /* * validate head and tail pointer values and compute * the number of remaining WQEs. */ if (udata) { owq = srq->rq.wq; head = RDMA_READ_UAPI_ATOMIC(owq->head); tail = RDMA_READ_UAPI_ATOMIC(owq->tail); } else { okwq = srq->rq.kwq; head = okwq->head; tail = okwq->tail; } if (head >= srq->rq.size || tail >= srq->rq.size) { ret = -EINVAL; goto bail_unlock; } n = head; if (n < tail) n += srq->rq.size - tail; else n -= tail; if (size <= n) { ret = -EINVAL; goto bail_unlock; } n = 0; p = tmp_rq.kwq->curr_wq; while (tail != head) { struct rvt_rwqe *wqe; int i; wqe = rvt_get_rwqe_ptr(&srq->rq, tail); p->wr_id = wqe->wr_id; p->num_sge = wqe->num_sge; for (i = 0; i < wqe->num_sge; i++) p->sg_list[i] = wqe->sg_list[i]; n++; p = (struct rvt_rwqe *)((char *)p + sz); if (++tail >= srq->rq.size) tail = 0; } srq->rq.kwq = tmp_rq.kwq; if (udata) { srq->rq.wq = tmp_rq.wq; RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n); RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0); } else { tmp_rq.kwq->head = n; tmp_rq.kwq->tail = 0; } srq->rq.size = size; if (attr_mask & IB_SRQ_LIMIT) srq->limit = attr->srq_limit; spin_unlock_irq(&srq->rq.kwq->c_lock); vfree(owq); kvfree(okwq); if (srq->ip) { struct rvt_mmap_info *ip = srq->ip; struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device); u32 s = sizeof(struct rvt_rwq) + size * sz; rvt_update_mmap_info(dev, ip, s, tmp_rq.wq); /* * Return the offset to mmap. * See rvt_mmap() for details. */ if (udata && udata->inlen >= sizeof(__u64)) { ret = ib_copy_to_udata(udata, &ip->offset, sizeof(ip->offset)); if (ret) return ret; } /* * Put user mapping info onto the pending list * unless it already is on the list. */ spin_lock_irq(&dev->pending_lock); if (list_empty(&ip->pending_mmaps)) list_add(&ip->pending_mmaps, &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } } else if (attr_mask & IB_SRQ_LIMIT) { spin_lock_irq(&srq->rq.kwq->c_lock); if (attr->srq_limit >= srq->rq.size) ret = -EINVAL; else srq->limit = attr->srq_limit; spin_unlock_irq(&srq->rq.kwq->c_lock); } return ret; bail_unlock: spin_unlock_irq(&srq->rq.kwq->c_lock); bail_free: rvt_free_rq(&tmp_rq); return ret; } /** * rvt_query_srq - query srq data * @ibsrq: srq to query * @attr: return info in attr * * Return: always 0 */ int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) { struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); attr->max_wr = srq->rq.size - 1; attr->max_sge = srq->rq.max_sge; attr->srq_limit = srq->limit; return 0; } /** * rvt_destroy_srq - destory an srq * @ibsrq: srq object to destroy * @udata: user data for libibverbs.so */ int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); spin_lock(&dev->n_srqs_lock); dev->n_srqs_allocated--; spin_unlock(&dev->n_srqs_lock); if (srq->ip) kref_put(&srq->ip->ref, rvt_release_mmap_info); kvfree(srq->rq.kwq); return 0; }
linux-master
drivers/infiniband/sw/rdmavt/srq.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 Intel Corporation. */ #include <linux/slab.h> #include <linux/vmalloc.h> #include <rdma/ib_umem.h> #include <rdma/rdma_vt.h> #include "vt.h" #include "mr.h" #include "trace.h" /** * rvt_driver_mr_init - Init MR resources per driver * @rdi: rvt dev struct * * Do any intilization needed when a driver registers with rdmavt. * * Return: 0 on success or errno on failure */ int rvt_driver_mr_init(struct rvt_dev_info *rdi) { unsigned int lkey_table_size = rdi->dparms.lkey_table_size; unsigned lk_tab_size; int i; /* * The top hfi1_lkey_table_size bits are used to index the * table. The lower 8 bits can be owned by the user (copied from * the LKEY). The remaining bits act as a generation number or tag. */ if (!lkey_table_size) return -EINVAL; spin_lock_init(&rdi->lkey_table.lock); /* ensure generation is at least 4 bits */ if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) { rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n", lkey_table_size, RVT_MAX_LKEY_TABLE_BITS); rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS; lkey_table_size = rdi->dparms.lkey_table_size; } rdi->lkey_table.max = 1 << lkey_table_size; rdi->lkey_table.shift = 32 - lkey_table_size; lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table); rdi->lkey_table.table = (struct rvt_mregion __rcu **) vmalloc_node(lk_tab_size, rdi->dparms.node); if (!rdi->lkey_table.table) return -ENOMEM; RCU_INIT_POINTER(rdi->dma_mr, NULL); for (i = 0; i < rdi->lkey_table.max; i++) RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL); rdi->dparms.props.max_mr = rdi->lkey_table.max; return 0; } /** * rvt_mr_exit - clean up MR * @rdi: rvt dev structure * * called when drivers have unregistered or perhaps failed to register with us */ void rvt_mr_exit(struct rvt_dev_info *rdi) { if (rdi->dma_mr) rvt_pr_err(rdi, "DMA MR not null!\n"); vfree(rdi->lkey_table.table); } static void rvt_deinit_mregion(struct rvt_mregion *mr) { int i = mr->mapsz; mr->mapsz = 0; while (i) kfree(mr->map[--i]); percpu_ref_exit(&mr->refcount); } static void __rvt_mregion_complete(struct percpu_ref *ref) { struct rvt_mregion *mr = container_of(ref, struct rvt_mregion, refcount); complete(&mr->comp); } static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd, int count, unsigned int percpu_flags) { int m, i = 0; struct rvt_dev_info *dev = ib_to_rvt(pd->device); mr->mapsz = 0; m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; for (; i < m; i++) { mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL, dev->dparms.node); if (!mr->map[i]) goto bail; mr->mapsz++; } init_completion(&mr->comp); /* count returning the ptr to user */ if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete, percpu_flags, GFP_KERNEL)) goto bail; atomic_set(&mr->lkey_invalid, 0); mr->pd = pd; mr->max_segs = count; return 0; bail: rvt_deinit_mregion(mr); return -ENOMEM; } /** * rvt_alloc_lkey - allocate an lkey * @mr: memory region that this lkey protects * @dma_region: 0->normal key, 1->restricted DMA key * * Returns 0 if successful, otherwise returns -errno. * * Increments mr reference count as required. * * Sets the lkey field mr for non-dma regions. * */ static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region) { unsigned long flags; u32 r; u32 n; int ret = 0; struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device); struct rvt_lkey_table *rkt = &dev->lkey_table; rvt_get_mr(mr); spin_lock_irqsave(&rkt->lock, flags); /* special case for dma_mr lkey == 0 */ if (dma_region) { struct rvt_mregion *tmr; tmr = rcu_access_pointer(dev->dma_mr); if (!tmr) { mr->lkey_published = 1; /* Insure published written first */ rcu_assign_pointer(dev->dma_mr, mr); rvt_get_mr(mr); } goto success; } /* Find the next available LKEY */ r = rkt->next; n = r; for (;;) { if (!rcu_access_pointer(rkt->table[r])) break; r = (r + 1) & (rkt->max - 1); if (r == n) goto bail; } rkt->next = (r + 1) & (rkt->max - 1); /* * Make sure lkey is never zero which is reserved to indicate an * unrestricted LKEY. */ rkt->gen++; /* * bits are capped to ensure enough bits for generation number */ mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) | ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen) << 8); if (mr->lkey == 0) { mr->lkey |= 1 << 8; rkt->gen++; } mr->lkey_published = 1; /* Insure published written first */ rcu_assign_pointer(rkt->table[r], mr); success: spin_unlock_irqrestore(&rkt->lock, flags); out: return ret; bail: rvt_put_mr(mr); spin_unlock_irqrestore(&rkt->lock, flags); ret = -ENOMEM; goto out; } /** * rvt_free_lkey - free an lkey * @mr: mr to free from tables */ static void rvt_free_lkey(struct rvt_mregion *mr) { unsigned long flags; u32 lkey = mr->lkey; u32 r; struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device); struct rvt_lkey_table *rkt = &dev->lkey_table; int freed = 0; spin_lock_irqsave(&rkt->lock, flags); if (!lkey) { if (mr->lkey_published) { mr->lkey_published = 0; /* insure published is written before pointer */ rcu_assign_pointer(dev->dma_mr, NULL); rvt_put_mr(mr); } } else { if (!mr->lkey_published) goto out; r = lkey >> (32 - dev->dparms.lkey_table_size); mr->lkey_published = 0; /* insure published is written before pointer */ rcu_assign_pointer(rkt->table[r], NULL); } freed++; out: spin_unlock_irqrestore(&rkt->lock, flags); if (freed) percpu_ref_kill(&mr->refcount); } static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd) { struct rvt_mr *mr; int rval = -ENOMEM; int m; /* Allocate struct plus pointers to first level page tables. */ m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL); if (!mr) goto bail; rval = rvt_init_mregion(&mr->mr, pd, count, 0); if (rval) goto bail; /* * ib_reg_phys_mr() will initialize mr->ibmr except for * lkey and rkey. */ rval = rvt_alloc_lkey(&mr->mr, 0); if (rval) goto bail_mregion; mr->ibmr.lkey = mr->mr.lkey; mr->ibmr.rkey = mr->mr.lkey; done: return mr; bail_mregion: rvt_deinit_mregion(&mr->mr); bail: kfree(mr); mr = ERR_PTR(rval); goto done; } static void __rvt_free_mr(struct rvt_mr *mr) { rvt_free_lkey(&mr->mr); rvt_deinit_mregion(&mr->mr); kfree(mr); } /** * rvt_get_dma_mr - get a DMA memory region * @pd: protection domain for this memory region * @acc: access flags * * Return: the memory region on success, otherwise returns an errno. */ struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc) { struct rvt_mr *mr; struct ib_mr *ret; int rval; if (ibpd_to_rvtpd(pd)->user) return ERR_PTR(-EPERM); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { ret = ERR_PTR(-ENOMEM); goto bail; } rval = rvt_init_mregion(&mr->mr, pd, 0, 0); if (rval) { ret = ERR_PTR(rval); goto bail; } rval = rvt_alloc_lkey(&mr->mr, 1); if (rval) { ret = ERR_PTR(rval); goto bail_mregion; } mr->mr.access_flags = acc; ret = &mr->ibmr; done: return ret; bail_mregion: rvt_deinit_mregion(&mr->mr); bail: kfree(mr); goto done; } /** * rvt_reg_user_mr - register a userspace memory region * @pd: protection domain for this memory region * @start: starting userspace address * @length: length of region to register * @virt_addr: associated virtual address * @mr_access_flags: access flags for this memory region * @udata: unused by the driver * * Return: the memory region on success, otherwise returns an errno. */ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_udata *udata) { struct rvt_mr *mr; struct ib_umem *umem; struct sg_page_iter sg_iter; int n, m; struct ib_mr *ret; if (length == 0) return ERR_PTR(-EINVAL); umem = ib_umem_get(pd->device, start, length, mr_access_flags); if (IS_ERR(umem)) return (void *)umem; n = ib_umem_num_pages(umem); mr = __rvt_alloc_mr(n, pd); if (IS_ERR(mr)) { ret = (struct ib_mr *)mr; goto bail_umem; } mr->mr.user_base = start; mr->mr.iova = virt_addr; mr->mr.length = length; mr->mr.offset = ib_umem_offset(umem); mr->mr.access_flags = mr_access_flags; mr->umem = umem; mr->mr.page_shift = PAGE_SHIFT; m = 0; n = 0; for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) { void *vaddr; vaddr = page_address(sg_page_iter_page(&sg_iter)); if (!vaddr) { ret = ERR_PTR(-EINVAL); goto bail_inval; } mr->mr.map[m]->segs[n].vaddr = vaddr; mr->mr.map[m]->segs[n].length = PAGE_SIZE; trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE); if (++n == RVT_SEGSZ) { m++; n = 0; } } return &mr->ibmr; bail_inval: __rvt_free_mr(mr); bail_umem: ib_umem_release(umem); return ret; } /** * rvt_dereg_clean_qp_cb - callback from iterator * @qp: the qp * @v: the mregion (as u64) * * This routine fields the callback for all QPs and * for QPs in the same PD as the MR will call the * rvt_qp_mr_clean() to potentially cleanup references. */ static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v) { struct rvt_mregion *mr = (struct rvt_mregion *)v; /* skip PDs that are not ours */ if (mr->pd != qp->ibqp.pd) return; rvt_qp_mr_clean(qp, mr->lkey); } /** * rvt_dereg_clean_qps - find QPs for reference cleanup * @mr: the MR that is being deregistered * * This routine iterates RC QPs looking for references * to the lkey noted in mr. */ static void rvt_dereg_clean_qps(struct rvt_mregion *mr) { struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb); } /** * rvt_check_refs - check references * @mr: the megion * @t: the caller identification * * This routine checks MRs holding a reference during * when being de-registered. * * If the count is non-zero, the code calls a clean routine then * waits for the timeout for the count to zero. */ static int rvt_check_refs(struct rvt_mregion *mr, const char *t) { unsigned long timeout; struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); if (mr->lkey) { /* avoid dma mr */ rvt_dereg_clean_qps(mr); /* @mr was indexed on rcu protected @lkey_table */ synchronize_rcu(); } timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); if (!timeout) { rvt_pr_err(rdi, "%s timeout mr %p pd %p lkey %x refcount %ld\n", t, mr, mr->pd, mr->lkey, atomic_long_read(&mr->refcount.data->count)); rvt_get_mr(mr); return -EBUSY; } return 0; } /** * rvt_mr_has_lkey - is MR * @mr: the mregion * @lkey: the lkey */ bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey) { return mr && lkey == mr->lkey; } /** * rvt_ss_has_lkey - is mr in sge tests * @ss: the sge state * @lkey: the lkey * * This code tests for an MR in the indicated * sge state. */ bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey) { int i; bool rval = false; if (!ss->num_sge) return rval; /* first one */ rval = rvt_mr_has_lkey(ss->sge.mr, lkey); /* any others */ for (i = 0; !rval && i < ss->num_sge - 1; i++) rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey); return rval; } /** * rvt_dereg_mr - unregister and free a memory region * @ibmr: the memory region to free * @udata: unused by the driver * * Note that this is called to free MRs created by rvt_get_dma_mr() * or rvt_reg_user_mr(). * * Returns 0 on success. */ int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct rvt_mr *mr = to_imr(ibmr); int ret; rvt_free_lkey(&mr->mr); rvt_put_mr(&mr->mr); /* will set completion if last */ ret = rvt_check_refs(&mr->mr, __func__); if (ret) goto out; rvt_deinit_mregion(&mr->mr); ib_umem_release(mr->umem); kfree(mr); out: return ret; } /** * rvt_alloc_mr - Allocate a memory region usable with the * @pd: protection domain for this memory region * @mr_type: mem region type * @max_num_sg: Max number of segments allowed * * Return: the memory region on success, otherwise return an errno. */ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) { struct rvt_mr *mr; if (mr_type != IB_MR_TYPE_MEM_REG) return ERR_PTR(-EINVAL); mr = __rvt_alloc_mr(max_num_sg, pd); if (IS_ERR(mr)) return (struct ib_mr *)mr; return &mr->ibmr; } /** * rvt_set_page - page assignment function called by ib_sg_to_pages * @ibmr: memory region * @addr: dma address of mapped page * * Return: 0 on success */ static int rvt_set_page(struct ib_mr *ibmr, u64 addr) { struct rvt_mr *mr = to_imr(ibmr); u32 ps = 1 << mr->mr.page_shift; u32 mapped_segs = mr->mr.length >> mr->mr.page_shift; int m, n; if (unlikely(mapped_segs == mr->mr.max_segs)) return -ENOMEM; m = mapped_segs / RVT_SEGSZ; n = mapped_segs % RVT_SEGSZ; mr->mr.map[m]->segs[n].vaddr = (void *)addr; mr->mr.map[m]->segs[n].length = ps; mr->mr.length += ps; trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps); return 0; } /** * rvt_map_mr_sg - map sg list and set it the memory region * @ibmr: memory region * @sg: dma mapped scatterlist * @sg_nents: number of entries in sg * @sg_offset: offset in bytes into sg * * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages. * * Return: number of sg elements mapped to the memory region */ int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct rvt_mr *mr = to_imr(ibmr); int ret; mr->mr.length = 0; mr->mr.page_shift = PAGE_SHIFT; ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page); mr->mr.user_base = ibmr->iova; mr->mr.iova = ibmr->iova; mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; mr->mr.length = (size_t)ibmr->length; trace_rvt_map_mr_sg(ibmr, sg_nents, sg_offset); return ret; } /** * rvt_fast_reg_mr - fast register physical MR * @qp: the queue pair where the work request comes from * @ibmr: the memory region to be registered * @key: updated key for this memory region * @access: access flags for this memory region * * Returns 0 on success. */ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, int access) { struct rvt_mr *mr = to_imr(ibmr); if (qp->ibqp.pd != mr->mr.pd) return -EACCES; /* not applicable to dma MR or user MR */ if (!mr->mr.lkey || mr->umem) return -EINVAL; if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00)) return -EINVAL; ibmr->lkey = key; ibmr->rkey = key; mr->mr.lkey = key; mr->mr.access_flags = access; mr->mr.iova = ibmr->iova; atomic_set(&mr->mr.lkey_invalid, 0); return 0; } EXPORT_SYMBOL(rvt_fast_reg_mr); /** * rvt_invalidate_rkey - invalidate an MR rkey * @qp: queue pair associated with the invalidate op * @rkey: rkey to invalidate * * Returns 0 on success. */ int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey) { struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device); struct rvt_lkey_table *rkt = &dev->lkey_table; struct rvt_mregion *mr; if (rkey == 0) return -EINVAL; rcu_read_lock(); mr = rcu_dereference( rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]); if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) goto bail; atomic_set(&mr->lkey_invalid, 1); rcu_read_unlock(); return 0; bail: rcu_read_unlock(); return -EINVAL; } EXPORT_SYMBOL(rvt_invalidate_rkey); /** * rvt_sge_adjacent - is isge compressible * @last_sge: last outgoing SGE written * @sge: SGE to check * * If adjacent will update last_sge to add length. * * Return: true if isge is adjacent to last sge */ static inline bool rvt_sge_adjacent(struct rvt_sge *last_sge, struct ib_sge *sge) { if (last_sge && sge->lkey == last_sge->mr->lkey && ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) { if (sge->lkey) { if (unlikely((sge->addr - last_sge->mr->user_base + sge->length > last_sge->mr->length))) return false; /* overrun, caller will catch */ } else { last_sge->length += sge->length; } last_sge->sge_length += sge->length; trace_rvt_sge_adjacent(last_sge, sge); return true; } return false; } /** * rvt_lkey_ok - check IB SGE for validity and initialize * @rkt: table containing lkey to check SGE against * @pd: protection domain * @isge: outgoing internal SGE * @last_sge: last outgoing SGE written * @sge: SGE to check * @acc: access flags * * Check the IB SGE for validity and initialize our internal version * of it. * * Increments the reference count when a new sge is stored. * * Return: 0 if compressed, 1 if added , otherwise returns -errno. */ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, struct rvt_sge *isge, struct rvt_sge *last_sge, struct ib_sge *sge, int acc) { struct rvt_mregion *mr; unsigned n, m; size_t off; /* * We use LKEY == zero for kernel virtual addresses * (see rvt_get_dma_mr()). */ if (sge->lkey == 0) { struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device); if (pd->user) return -EINVAL; if (rvt_sge_adjacent(last_sge, sge)) return 0; rcu_read_lock(); mr = rcu_dereference(dev->dma_mr); if (!mr) goto bail; rvt_get_mr(mr); rcu_read_unlock(); isge->mr = mr; isge->vaddr = (void *)sge->addr; isge->length = sge->length; isge->sge_length = sge->length; isge->m = 0; isge->n = 0; goto ok; } if (rvt_sge_adjacent(last_sge, sge)) return 0; rcu_read_lock(); mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]); if (!mr) goto bail; rvt_get_mr(mr); if (!READ_ONCE(mr->lkey_published)) goto bail_unref; if (unlikely(atomic_read(&mr->lkey_invalid) || mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) goto bail_unref; off = sge->addr - mr->user_base; if (unlikely(sge->addr < mr->user_base || off + sge->length > mr->length || (mr->access_flags & acc) != acc)) goto bail_unref; rcu_read_unlock(); off += mr->offset; if (mr->page_shift) { /* * page sizes are uniform power of 2 so no loop is necessary * entries_spanned_by_off is the number of times the loop below * would have executed. */ size_t entries_spanned_by_off; entries_spanned_by_off = off >> mr->page_shift; off -= (entries_spanned_by_off << mr->page_shift); m = entries_spanned_by_off / RVT_SEGSZ; n = entries_spanned_by_off % RVT_SEGSZ; } else { m = 0; n = 0; while (off >= mr->map[m]->segs[n].length) { off -= mr->map[m]->segs[n].length; n++; if (n >= RVT_SEGSZ) { m++; n = 0; } } } isge->mr = mr; isge->vaddr = mr->map[m]->segs[n].vaddr + off; isge->length = mr->map[m]->segs[n].length - off; isge->sge_length = sge->length; isge->m = m; isge->n = n; ok: trace_rvt_sge_new(isge, sge); return 1; bail_unref: rvt_put_mr(mr); bail: rcu_read_unlock(); return -EINVAL; } EXPORT_SYMBOL(rvt_lkey_ok); /** * rvt_rkey_ok - check the IB virtual address, length, and RKEY * @qp: qp for validation * @sge: SGE state * @len: length of data * @vaddr: virtual address to place data * @rkey: rkey to check * @acc: access flags * * Return: 1 if successful, otherwise 0. * * increments the reference count upon success */ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc) { struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device); struct rvt_lkey_table *rkt = &dev->lkey_table; struct rvt_mregion *mr; unsigned n, m; size_t off; /* * We use RKEY == zero for kernel virtual addresses * (see rvt_get_dma_mr()). */ rcu_read_lock(); if (rkey == 0) { struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd); struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device); if (pd->user) goto bail; mr = rcu_dereference(rdi->dma_mr); if (!mr) goto bail; rvt_get_mr(mr); rcu_read_unlock(); sge->mr = mr; sge->vaddr = (void *)vaddr; sge->length = len; sge->sge_length = len; sge->m = 0; sge->n = 0; goto ok; } mr = rcu_dereference(rkt->table[rkey >> rkt->shift]); if (!mr) goto bail; rvt_get_mr(mr); /* insure mr read is before test */ if (!READ_ONCE(mr->lkey_published)) goto bail_unref; if (unlikely(atomic_read(&mr->lkey_invalid) || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) goto bail_unref; off = vaddr - mr->iova; if (unlikely(vaddr < mr->iova || off + len > mr->length || (mr->access_flags & acc) == 0)) goto bail_unref; rcu_read_unlock(); off += mr->offset; if (mr->page_shift) { /* * page sizes are uniform power of 2 so no loop is necessary * entries_spanned_by_off is the number of times the loop below * would have executed. */ size_t entries_spanned_by_off; entries_spanned_by_off = off >> mr->page_shift; off -= (entries_spanned_by_off << mr->page_shift); m = entries_spanned_by_off / RVT_SEGSZ; n = entries_spanned_by_off % RVT_SEGSZ; } else { m = 0; n = 0; while (off >= mr->map[m]->segs[n].length) { off -= mr->map[m]->segs[n].length; n++; if (n >= RVT_SEGSZ) { m++; n = 0; } } } sge->mr = mr; sge->vaddr = mr->map[m]->segs[n].vaddr + off; sge->length = mr->map[m]->segs[n].length - off; sge->sge_length = len; sge->m = m; sge->n = n; ok: return 1; bail_unref: rvt_put_mr(mr); bail: rcu_read_unlock(); return 0; } EXPORT_SYMBOL(rvt_rkey_ok);
linux-master
drivers/infiniband/sw/rdmavt/mr.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 - 2019 Intel Corporation. */ #include <linux/slab.h> #include "ah.h" #include "vt.h" /* for prints */ /** * rvt_check_ah - validate the attributes of AH * @ibdev: the ib device * @ah_attr: the attributes of the AH * * If driver supports a more detailed check_ah function call back to it * otherwise just check the basics. * * Return: 0 on success */ int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) { int err; int port_num = rdma_ah_get_port_num(ah_attr); struct ib_port_attr port_attr; struct rvt_dev_info *rdi = ib_to_rvt(ibdev); u8 ah_flags = rdma_ah_get_ah_flags(ah_attr); u8 static_rate = rdma_ah_get_static_rate(ah_attr); err = ib_query_port(ibdev, port_num, &port_attr); if (err) return -EINVAL; if (port_num < 1 || port_num > ibdev->phys_port_cnt) return -EINVAL; if (static_rate != IB_RATE_PORT_CURRENT && ib_rate_to_mbps(static_rate) < 0) return -EINVAL; if ((ah_flags & IB_AH_GRH) && rdma_ah_read_grh(ah_attr)->sgid_index >= port_attr.gid_tbl_len) return -EINVAL; if (rdi->driver_f.check_ah) return rdi->driver_f.check_ah(ibdev, ah_attr); return 0; } EXPORT_SYMBOL(rvt_check_ah); /** * rvt_create_ah - create an address handle * @ibah: the IB address handle * @init_attr: the attributes of the AH * @udata: pointer to user's input output buffer information. * * This may be called from interrupt context. * * Return: 0 on success */ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata) { struct rvt_ah *ah = ibah_to_rvtah(ibah); struct rvt_dev_info *dev = ib_to_rvt(ibah->device); unsigned long flags; if (rvt_check_ah(ibah->device, init_attr->ah_attr)) return -EINVAL; spin_lock_irqsave(&dev->n_ahs_lock, flags); if (dev->n_ahs_allocated == dev->dparms.props.max_ah) { spin_unlock_irqrestore(&dev->n_ahs_lock, flags); return -ENOMEM; } dev->n_ahs_allocated++; spin_unlock_irqrestore(&dev->n_ahs_lock, flags); rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr); if (dev->driver_f.notify_new_ah) dev->driver_f.notify_new_ah(ibah->device, init_attr->ah_attr, ah); return 0; } /** * rvt_destroy_ah - Destroy an address handle * @ibah: address handle * @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags) * Return: 0 on success */ int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags) { struct rvt_dev_info *dev = ib_to_rvt(ibah->device); struct rvt_ah *ah = ibah_to_rvtah(ibah); unsigned long flags; spin_lock_irqsave(&dev->n_ahs_lock, flags); dev->n_ahs_allocated--; spin_unlock_irqrestore(&dev->n_ahs_lock, flags); rdma_destroy_ah_attr(&ah->attr); return 0; } /** * rvt_modify_ah - modify an ah with given attrs * @ibah: address handle to modify * @ah_attr: attrs to apply * * Return: 0 on success */ int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) { struct rvt_ah *ah = ibah_to_rvtah(ibah); if (rvt_check_ah(ibah->device, ah_attr)) return -EINVAL; ah->attr = *ah_attr; return 0; } /** * rvt_query_ah - return attrs for ah * @ibah: address handle to query * @ah_attr: return info in this * * Return: always 0 */ int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) { struct rvt_ah *ah = ibah_to_rvtah(ibah); *ah_attr = ah->attr; return 0; }
linux-master
drivers/infiniband/sw/rdmavt/ah.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Fredy Neeser */ /* Greg Joyce <greg@opengridcomputing.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ /* Copyright (c) 2017, Open Grid Computing, Inc. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/net.h> #include <linux/inetdevice.h> #include <net/addrconf.h> #include <linux/workqueue.h> #include <net/sock.h> #include <net/tcp.h> #include <linux/inet.h> #include <linux/tcp.h> #include <trace/events/sock.h> #include <rdma/iw_cm.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include "siw.h" #include "siw_cm.h" /* * Set to any combination of * MPA_V2_RDMA_NO_RTR, MPA_V2_RDMA_READ_RTR, MPA_V2_RDMA_WRITE_RTR */ static __be16 rtr_type = MPA_V2_RDMA_READ_RTR | MPA_V2_RDMA_WRITE_RTR; static const bool relaxed_ird_negotiation = true; static void siw_cm_llp_state_change(struct sock *s); static void siw_cm_llp_data_ready(struct sock *s); static void siw_cm_llp_write_space(struct sock *s); static void siw_cm_llp_error_report(struct sock *s); static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason, int status); static void siw_sk_assign_cm_upcalls(struct sock *sk) { write_lock_bh(&sk->sk_callback_lock); sk->sk_state_change = siw_cm_llp_state_change; sk->sk_data_ready = siw_cm_llp_data_ready; sk->sk_write_space = siw_cm_llp_write_space; sk->sk_error_report = siw_cm_llp_error_report; write_unlock_bh(&sk->sk_callback_lock); } static void siw_sk_save_upcalls(struct sock *sk) { struct siw_cep *cep = sk_to_cep(sk); write_lock_bh(&sk->sk_callback_lock); cep->sk_state_change = sk->sk_state_change; cep->sk_data_ready = sk->sk_data_ready; cep->sk_write_space = sk->sk_write_space; cep->sk_error_report = sk->sk_error_report; write_unlock_bh(&sk->sk_callback_lock); } static void siw_sk_restore_upcalls(struct sock *sk, struct siw_cep *cep) { sk->sk_state_change = cep->sk_state_change; sk->sk_data_ready = cep->sk_data_ready; sk->sk_write_space = cep->sk_write_space; sk->sk_error_report = cep->sk_error_report; sk->sk_user_data = NULL; } static void siw_qp_socket_assoc(struct siw_cep *cep, struct siw_qp *qp) { struct socket *s = cep->sock; struct sock *sk = s->sk; write_lock_bh(&sk->sk_callback_lock); qp->attrs.sk = s; sk->sk_data_ready = siw_qp_llp_data_ready; sk->sk_write_space = siw_qp_llp_write_space; write_unlock_bh(&sk->sk_callback_lock); } static void siw_socket_disassoc(struct socket *s) { struct sock *sk = s->sk; struct siw_cep *cep; if (sk) { write_lock_bh(&sk->sk_callback_lock); cep = sk_to_cep(sk); if (cep) { siw_sk_restore_upcalls(sk, cep); siw_cep_put(cep); } else { pr_warn("siw: cannot restore sk callbacks: no ep\n"); } write_unlock_bh(&sk->sk_callback_lock); } else { pr_warn("siw: cannot restore sk callbacks: no sk\n"); } } static void siw_rtr_data_ready(struct sock *sk) { struct siw_cep *cep; struct siw_qp *qp = NULL; read_descriptor_t rd_desc; trace_sk_data_ready(sk); read_lock(&sk->sk_callback_lock); cep = sk_to_cep(sk); if (!cep) { WARN(1, "No connection endpoint\n"); goto out; } qp = sk_to_qp(sk); memset(&rd_desc, 0, sizeof(rd_desc)); rd_desc.arg.data = qp; rd_desc.count = 1; tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data); /* * Check if first frame was successfully processed. * Signal connection full establishment if yes. * Failed data processing would have already scheduled * connection drop. */ if (!qp->rx_stream.rx_suspend) siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0); out: read_unlock(&sk->sk_callback_lock); if (qp) siw_qp_socket_assoc(cep, qp); } static void siw_sk_assign_rtr_upcalls(struct siw_cep *cep) { struct sock *sk = cep->sock->sk; write_lock_bh(&sk->sk_callback_lock); sk->sk_data_ready = siw_rtr_data_ready; sk->sk_write_space = siw_qp_llp_write_space; write_unlock_bh(&sk->sk_callback_lock); } static void siw_cep_socket_assoc(struct siw_cep *cep, struct socket *s) { cep->sock = s; siw_cep_get(cep); s->sk->sk_user_data = cep; siw_sk_save_upcalls(s->sk); siw_sk_assign_cm_upcalls(s->sk); } static struct siw_cep *siw_cep_alloc(struct siw_device *sdev) { struct siw_cep *cep = kzalloc(sizeof(*cep), GFP_KERNEL); unsigned long flags; if (!cep) return NULL; INIT_LIST_HEAD(&cep->listenq); INIT_LIST_HEAD(&cep->devq); INIT_LIST_HEAD(&cep->work_freelist); kref_init(&cep->ref); cep->state = SIW_EPSTATE_IDLE; init_waitqueue_head(&cep->waitq); spin_lock_init(&cep->lock); cep->sdev = sdev; cep->enhanced_rdma_conn_est = false; spin_lock_irqsave(&sdev->lock, flags); list_add_tail(&cep->devq, &sdev->cep_list); spin_unlock_irqrestore(&sdev->lock, flags); siw_dbg_cep(cep, "new endpoint\n"); return cep; } static void siw_cm_free_work(struct siw_cep *cep) { struct list_head *w, *tmp; struct siw_cm_work *work; list_for_each_safe(w, tmp, &cep->work_freelist) { work = list_entry(w, struct siw_cm_work, list); list_del(&work->list); kfree(work); } } static void siw_cancel_mpatimer(struct siw_cep *cep) { spin_lock_bh(&cep->lock); if (cep->mpa_timer) { if (cancel_delayed_work(&cep->mpa_timer->work)) { siw_cep_put(cep); kfree(cep->mpa_timer); /* not needed again */ } cep->mpa_timer = NULL; } spin_unlock_bh(&cep->lock); } static void siw_put_work(struct siw_cm_work *work) { INIT_LIST_HEAD(&work->list); spin_lock_bh(&work->cep->lock); list_add(&work->list, &work->cep->work_freelist); spin_unlock_bh(&work->cep->lock); } static void siw_cep_set_inuse(struct siw_cep *cep) { unsigned long flags; retry: spin_lock_irqsave(&cep->lock, flags); if (cep->in_use) { spin_unlock_irqrestore(&cep->lock, flags); wait_event_interruptible(cep->waitq, !cep->in_use); if (signal_pending(current)) flush_signals(current); goto retry; } else { cep->in_use = 1; spin_unlock_irqrestore(&cep->lock, flags); } } static void siw_cep_set_free(struct siw_cep *cep) { unsigned long flags; spin_lock_irqsave(&cep->lock, flags); cep->in_use = 0; spin_unlock_irqrestore(&cep->lock, flags); wake_up(&cep->waitq); } static void __siw_cep_dealloc(struct kref *ref) { struct siw_cep *cep = container_of(ref, struct siw_cep, ref); struct siw_device *sdev = cep->sdev; unsigned long flags; WARN_ON(cep->listen_cep); /* kfree(NULL) is safe */ kfree(cep->mpa.pdata); spin_lock_bh(&cep->lock); if (!list_empty(&cep->work_freelist)) siw_cm_free_work(cep); spin_unlock_bh(&cep->lock); spin_lock_irqsave(&sdev->lock, flags); list_del(&cep->devq); spin_unlock_irqrestore(&sdev->lock, flags); siw_dbg_cep(cep, "free endpoint\n"); kfree(cep); } static struct siw_cm_work *siw_get_work(struct siw_cep *cep) { struct siw_cm_work *work = NULL; spin_lock_bh(&cep->lock); if (!list_empty(&cep->work_freelist)) { work = list_entry(cep->work_freelist.next, struct siw_cm_work, list); list_del_init(&work->list); } spin_unlock_bh(&cep->lock); return work; } static int siw_cm_alloc_work(struct siw_cep *cep, int num) { struct siw_cm_work *work; while (num--) { work = kmalloc(sizeof(*work), GFP_KERNEL); if (!work) { if (!(list_empty(&cep->work_freelist))) siw_cm_free_work(cep); return -ENOMEM; } work->cep = cep; INIT_LIST_HEAD(&work->list); list_add(&work->list, &cep->work_freelist); } return 0; } /* * siw_cm_upcall() * * Upcall to IWCM to inform about async connection events */ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason, int status) { struct iw_cm_event event; struct iw_cm_id *id; memset(&event, 0, sizeof(event)); event.status = status; event.event = reason; if (reason == IW_CM_EVENT_CONNECT_REQUEST) { event.provider_data = cep; id = cep->listen_cep->cm_id; } else { id = cep->cm_id; } /* Signal IRD and ORD */ if (reason == IW_CM_EVENT_ESTABLISHED || reason == IW_CM_EVENT_CONNECT_REPLY) { /* Signal negotiated IRD/ORD values we will use */ event.ird = cep->ird; event.ord = cep->ord; } else if (reason == IW_CM_EVENT_CONNECT_REQUEST) { event.ird = cep->ord; event.ord = cep->ird; } /* Signal private data and address information */ if (reason == IW_CM_EVENT_CONNECT_REQUEST || reason == IW_CM_EVENT_CONNECT_REPLY) { u16 pd_len = be16_to_cpu(cep->mpa.hdr.params.pd_len); if (pd_len) { /* * hand over MPA private data */ event.private_data_len = pd_len; event.private_data = cep->mpa.pdata; /* Hide MPA V2 IRD/ORD control */ if (cep->enhanced_rdma_conn_est) { event.private_data_len -= sizeof(struct mpa_v2_data); event.private_data += sizeof(struct mpa_v2_data); } } getname_local(cep->sock, &event.local_addr); getname_peer(cep->sock, &event.remote_addr); } siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n", cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status); return id->event_handler(id, &event); } /* * siw_qp_cm_drop() * * Drops established LLP connection if present and not already * scheduled for dropping. Called from user context, SQ workqueue * or receive IRQ. Caller signals if socket can be immediately * closed (basically, if not in IRQ). */ void siw_qp_cm_drop(struct siw_qp *qp, int schedule) { struct siw_cep *cep = qp->cep; qp->rx_stream.rx_suspend = 1; qp->tx_ctx.tx_suspend = 1; if (!qp->cep) return; if (schedule) { siw_cm_queue_work(cep, SIW_CM_WORK_CLOSE_LLP); } else { siw_cep_set_inuse(cep); if (cep->state == SIW_EPSTATE_CLOSED) { siw_dbg_cep(cep, "already closed\n"); goto out; } siw_dbg_cep(cep, "immediate close, state %d\n", cep->state); if (qp->term_info.valid) siw_send_terminate(qp); if (cep->cm_id) { switch (cep->state) { case SIW_EPSTATE_AWAIT_MPAREP: siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL); break; case SIW_EPSTATE_RDMA_MODE: siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0); break; case SIW_EPSTATE_IDLE: case SIW_EPSTATE_LISTENING: case SIW_EPSTATE_CONNECTING: case SIW_EPSTATE_AWAIT_MPAREQ: case SIW_EPSTATE_RECVD_MPAREQ: case SIW_EPSTATE_CLOSED: default: break; } cep->cm_id->rem_ref(cep->cm_id); cep->cm_id = NULL; siw_cep_put(cep); } cep->state = SIW_EPSTATE_CLOSED; if (cep->sock) { siw_socket_disassoc(cep->sock); /* * Immediately close socket */ sock_release(cep->sock); cep->sock = NULL; } if (cep->qp) { cep->qp = NULL; siw_qp_put(qp); } out: siw_cep_set_free(cep); } } void siw_cep_put(struct siw_cep *cep) { WARN_ON(kref_read(&cep->ref) < 1); kref_put(&cep->ref, __siw_cep_dealloc); } void siw_cep_get(struct siw_cep *cep) { kref_get(&cep->ref); } /* * Expects params->pd_len in host byte order */ static int siw_send_mpareqrep(struct siw_cep *cep, const void *pdata, u8 pd_len) { struct socket *s = cep->sock; struct mpa_rr *rr = &cep->mpa.hdr; struct kvec iov[3]; struct msghdr msg; int rv; int iovec_num = 0; int mpa_len; memset(&msg, 0, sizeof(msg)); iov[iovec_num].iov_base = rr; iov[iovec_num].iov_len = sizeof(*rr); mpa_len = sizeof(*rr); if (cep->enhanced_rdma_conn_est) { iovec_num++; iov[iovec_num].iov_base = &cep->mpa.v2_ctrl; iov[iovec_num].iov_len = sizeof(cep->mpa.v2_ctrl); mpa_len += sizeof(cep->mpa.v2_ctrl); } if (pd_len) { iovec_num++; iov[iovec_num].iov_base = (char *)pdata; iov[iovec_num].iov_len = pd_len; mpa_len += pd_len; } if (cep->enhanced_rdma_conn_est) pd_len += sizeof(cep->mpa.v2_ctrl); rr->params.pd_len = cpu_to_be16(pd_len); rv = kernel_sendmsg(s, &msg, iov, iovec_num + 1, mpa_len); return rv < 0 ? rv : 0; } /* * Receive MPA Request/Reply header. * * Returns 0 if complete MPA Request/Reply header including * eventual private data was received. Returns -EAGAIN if * header was partially received or negative error code otherwise. * * Context: May be called in process context only */ static int siw_recv_mpa_rr(struct siw_cep *cep) { struct mpa_rr *hdr = &cep->mpa.hdr; struct socket *s = cep->sock; u16 pd_len; int rcvd, to_rcv; if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) { rcvd = ksock_recv(s, (char *)hdr + cep->mpa.bytes_rcvd, sizeof(struct mpa_rr) - cep->mpa.bytes_rcvd, 0); if (rcvd <= 0) return -ECONNABORTED; cep->mpa.bytes_rcvd += rcvd; if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) return -EAGAIN; if (be16_to_cpu(hdr->params.pd_len) > MPA_MAX_PRIVDATA) return -EPROTO; } pd_len = be16_to_cpu(hdr->params.pd_len); /* * At least the MPA Request/Reply header (frame not including * private data) has been received. * Receive (or continue receiving) any private data. */ to_rcv = pd_len - (cep->mpa.bytes_rcvd - sizeof(struct mpa_rr)); if (!to_rcv) { /* * We must have hdr->params.pd_len == 0 and thus received a * complete MPA Request/Reply frame. * Check against peer protocol violation. */ u32 word; rcvd = ksock_recv(s, (char *)&word, sizeof(word), MSG_DONTWAIT); if (rcvd == -EAGAIN) return 0; if (rcvd == 0) { siw_dbg_cep(cep, "peer EOF\n"); return -EPIPE; } if (rcvd < 0) { siw_dbg_cep(cep, "error: %d\n", rcvd); return rcvd; } siw_dbg_cep(cep, "peer sent extra data: %d\n", rcvd); return -EPROTO; } /* * At this point, we must have hdr->params.pd_len != 0. * A private data buffer gets allocated if hdr->params.pd_len != 0. */ if (!cep->mpa.pdata) { cep->mpa.pdata = kmalloc(pd_len + 4, GFP_KERNEL); if (!cep->mpa.pdata) return -ENOMEM; } rcvd = ksock_recv( s, cep->mpa.pdata + cep->mpa.bytes_rcvd - sizeof(struct mpa_rr), to_rcv + 4, MSG_DONTWAIT); if (rcvd < 0) return rcvd; if (rcvd > to_rcv) return -EPROTO; cep->mpa.bytes_rcvd += rcvd; if (to_rcv == rcvd) { siw_dbg_cep(cep, "%d bytes private data received\n", pd_len); return 0; } return -EAGAIN; } /* * siw_proc_mpareq() * * Read MPA Request from socket and signal new connection to IWCM * if success. Caller must hold lock on corresponding listening CEP. */ static int siw_proc_mpareq(struct siw_cep *cep) { struct mpa_rr *req; int version, rv; u16 pd_len; rv = siw_recv_mpa_rr(cep); if (rv) return rv; req = &cep->mpa.hdr; version = __mpa_rr_revision(req->params.bits); pd_len = be16_to_cpu(req->params.pd_len); if (version > MPA_REVISION_2) /* allow for 0, 1, and 2 only */ return -EPROTO; if (memcmp(req->key, MPA_KEY_REQ, 16)) return -EPROTO; /* Prepare for sending MPA reply */ memcpy(req->key, MPA_KEY_REP, 16); if (version == MPA_REVISION_2 && (req->params.bits & MPA_RR_FLAG_ENHANCED)) { /* * MPA version 2 must signal IRD/ORD values and P2P mode * in private data if header flag MPA_RR_FLAG_ENHANCED * is set. */ if (pd_len < sizeof(struct mpa_v2_data)) goto reject_conn; cep->enhanced_rdma_conn_est = true; } /* MPA Markers: currently not supported. Marker TX to be added. */ if (req->params.bits & MPA_RR_FLAG_MARKERS) goto reject_conn; if (req->params.bits & MPA_RR_FLAG_CRC) { /* * RFC 5044, page 27: CRC MUST be used if peer requests it. * siw specific: 'mpa_crc_strict' parameter to reject * connection with CRC if local CRC off enforced by * 'mpa_crc_strict' module parameter. */ if (!mpa_crc_required && mpa_crc_strict) goto reject_conn; /* Enable CRC if requested by module parameter */ if (mpa_crc_required) req->params.bits |= MPA_RR_FLAG_CRC; } if (cep->enhanced_rdma_conn_est) { struct mpa_v2_data *v2 = (struct mpa_v2_data *)cep->mpa.pdata; /* * Peer requested ORD becomes requested local IRD, * peer requested IRD becomes requested local ORD. * IRD and ORD get limited by global maximum values. */ cep->ord = ntohs(v2->ird) & MPA_IRD_ORD_MASK; cep->ord = min(cep->ord, SIW_MAX_ORD_QP); cep->ird = ntohs(v2->ord) & MPA_IRD_ORD_MASK; cep->ird = min(cep->ird, SIW_MAX_IRD_QP); /* May get overwritten by locally negotiated values */ cep->mpa.v2_ctrl.ird = htons(cep->ird); cep->mpa.v2_ctrl.ord = htons(cep->ord); /* * Support for peer sent zero length Write or Read to * let local side enter RTS. Writes are preferred. * Sends would require pre-posting a Receive and are * not supported. * Propose zero length Write if none of Read and Write * is indicated. */ if (v2->ird & MPA_V2_PEER_TO_PEER) { cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER; if (v2->ord & MPA_V2_RDMA_WRITE_RTR) cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR; else if (v2->ord & MPA_V2_RDMA_READ_RTR) cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_READ_RTR; else cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR; } } cep->state = SIW_EPSTATE_RECVD_MPAREQ; /* Keep reference until IWCM accepts/rejects */ siw_cep_get(cep); rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REQUEST, 0); if (rv) siw_cep_put(cep); return rv; reject_conn: siw_dbg_cep(cep, "reject: crc %d:%d:%d, m %d:%d\n", req->params.bits & MPA_RR_FLAG_CRC ? 1 : 0, mpa_crc_required, mpa_crc_strict, req->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0); req->params.bits &= ~MPA_RR_FLAG_MARKERS; req->params.bits |= MPA_RR_FLAG_REJECT; if (!mpa_crc_required && mpa_crc_strict) req->params.bits &= ~MPA_RR_FLAG_CRC; if (pd_len) kfree(cep->mpa.pdata); cep->mpa.pdata = NULL; siw_send_mpareqrep(cep, NULL, 0); return -EOPNOTSUPP; } static int siw_proc_mpareply(struct siw_cep *cep) { struct siw_qp_attrs qp_attrs; enum siw_qp_attr_mask qp_attr_mask; struct siw_qp *qp = cep->qp; struct mpa_rr *rep; int rv; u16 rep_ord; u16 rep_ird; bool ird_insufficient = false; enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR; rv = siw_recv_mpa_rr(cep); if (rv) goto out_err; siw_cancel_mpatimer(cep); rep = &cep->mpa.hdr; if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) { /* allow for 0, 1, and 2 only */ rv = -EPROTO; goto out_err; } if (memcmp(rep->key, MPA_KEY_REP, 16)) { siw_init_terminate(qp, TERM_ERROR_LAYER_LLP, LLP_ETYPE_MPA, LLP_ECODE_INVALID_REQ_RESP, 0); siw_send_terminate(qp); rv = -EPROTO; goto out_err; } if (rep->params.bits & MPA_RR_FLAG_REJECT) { siw_dbg_cep(cep, "got mpa reject\n"); siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET); return -ECONNRESET; } if (try_gso && rep->params.bits & MPA_RR_FLAG_GSO_EXP) { siw_dbg_cep(cep, "peer allows GSO on TX\n"); qp->tx_ctx.gso_seg_limit = 0; } if ((rep->params.bits & MPA_RR_FLAG_MARKERS) || (mpa_crc_required && !(rep->params.bits & MPA_RR_FLAG_CRC)) || (mpa_crc_strict && !mpa_crc_required && (rep->params.bits & MPA_RR_FLAG_CRC))) { siw_dbg_cep(cep, "reply unsupp: crc %d:%d:%d, m %d:%d\n", rep->params.bits & MPA_RR_FLAG_CRC ? 1 : 0, mpa_crc_required, mpa_crc_strict, rep->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0); siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED); return -EINVAL; } if (cep->enhanced_rdma_conn_est) { struct mpa_v2_data *v2; if (__mpa_rr_revision(rep->params.bits) < MPA_REVISION_2 || !(rep->params.bits & MPA_RR_FLAG_ENHANCED)) { /* * Protocol failure: The responder MUST reply with * MPA version 2 and MUST set MPA_RR_FLAG_ENHANCED. */ siw_dbg_cep(cep, "mpa reply error: vers %d, enhcd %d\n", __mpa_rr_revision(rep->params.bits), rep->params.bits & MPA_RR_FLAG_ENHANCED ? 1 : 0); siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET); return -EINVAL; } v2 = (struct mpa_v2_data *)cep->mpa.pdata; rep_ird = ntohs(v2->ird) & MPA_IRD_ORD_MASK; rep_ord = ntohs(v2->ord) & MPA_IRD_ORD_MASK; if (cep->ird < rep_ord && (relaxed_ird_negotiation == false || rep_ord > cep->sdev->attrs.max_ird)) { siw_dbg_cep(cep, "ird %d, rep_ord %d, max_ord %d\n", cep->ird, rep_ord, cep->sdev->attrs.max_ord); ird_insufficient = true; } if (cep->ord > rep_ird && relaxed_ird_negotiation == false) { siw_dbg_cep(cep, "ord %d, rep_ird %d\n", cep->ord, rep_ird); ird_insufficient = true; } /* * Always report negotiated peer values to user, * even if IRD/ORD negotiation failed */ cep->ird = rep_ord; cep->ord = rep_ird; if (ird_insufficient) { /* * If the initiator IRD is insuffient for the * responder ORD, send a TERM. */ siw_init_terminate(qp, TERM_ERROR_LAYER_LLP, LLP_ETYPE_MPA, LLP_ECODE_INSUFFICIENT_IRD, 0); siw_send_terminate(qp); rv = -ENOMEM; goto out_err; } if (cep->mpa.v2_ctrl_req.ird & MPA_V2_PEER_TO_PEER) mpa_p2p_mode = cep->mpa.v2_ctrl_req.ord & (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR); /* * Check if we requested P2P mode, and if peer agrees */ if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) { if ((mpa_p2p_mode & v2->ord) == 0) { /* * We requested RTR mode(s), but the peer * did not pick any mode we support. */ siw_dbg_cep(cep, "rtr mode: req %2x, got %2x\n", mpa_p2p_mode, v2->ord & (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR)); siw_init_terminate(qp, TERM_ERROR_LAYER_LLP, LLP_ETYPE_MPA, LLP_ECODE_NO_MATCHING_RTR, 0); siw_send_terminate(qp); rv = -EPROTO; goto out_err; } mpa_p2p_mode = v2->ord & (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR); } } memset(&qp_attrs, 0, sizeof(qp_attrs)); if (rep->params.bits & MPA_RR_FLAG_CRC) qp_attrs.flags = SIW_MPA_CRC; qp_attrs.irq_size = cep->ird; qp_attrs.orq_size = cep->ord; qp_attrs.sk = cep->sock; qp_attrs.state = SIW_QP_STATE_RTS; qp_attr_mask = SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE | SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD | SIW_QP_ATTR_MPA; /* Move socket RX/TX under QP control */ down_write(&qp->state_lock); if (qp->attrs.state > SIW_QP_STATE_RTR) { rv = -EINVAL; up_write(&qp->state_lock); goto out_err; } rv = siw_qp_modify(qp, &qp_attrs, qp_attr_mask); siw_qp_socket_assoc(cep, qp); up_write(&qp->state_lock); /* Send extra RDMA frame to trigger peer RTS if negotiated */ if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) { rv = siw_qp_mpa_rts(qp, mpa_p2p_mode); if (rv) goto out_err; } if (!rv) { rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, 0); if (!rv) cep->state = SIW_EPSTATE_RDMA_MODE; return 0; } out_err: if (rv != -EAGAIN) siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL); return rv; } /* * siw_accept_newconn - accept an incoming pending connection * */ static void siw_accept_newconn(struct siw_cep *cep) { struct socket *s = cep->sock; struct socket *new_s = NULL; struct siw_cep *new_cep = NULL; int rv = 0; /* debug only. should disappear */ if (cep->state != SIW_EPSTATE_LISTENING) goto error; new_cep = siw_cep_alloc(cep->sdev); if (!new_cep) goto error; /* * 4: Allocate a sufficient number of work elements * to allow concurrent handling of local + peer close * events, MPA header processing + MPA timeout. */ if (siw_cm_alloc_work(new_cep, 4) != 0) goto error; /* * Copy saved socket callbacks from listening CEP * and assign new socket with new CEP */ new_cep->sk_state_change = cep->sk_state_change; new_cep->sk_data_ready = cep->sk_data_ready; new_cep->sk_write_space = cep->sk_write_space; new_cep->sk_error_report = cep->sk_error_report; rv = kernel_accept(s, &new_s, O_NONBLOCK); if (rv != 0) { /* * Connection already aborted by peer..? */ siw_dbg_cep(cep, "kernel_accept() error: %d\n", rv); goto error; } new_cep->sock = new_s; siw_cep_get(new_cep); new_s->sk->sk_user_data = new_cep; if (siw_tcp_nagle == false) tcp_sock_set_nodelay(new_s->sk); new_cep->state = SIW_EPSTATE_AWAIT_MPAREQ; rv = siw_cm_queue_work(new_cep, SIW_CM_WORK_MPATIMEOUT); if (rv) goto error; /* * See siw_proc_mpareq() etc. for the use of new_cep->listen_cep. */ new_cep->listen_cep = cep; siw_cep_get(cep); if (atomic_read(&new_s->sk->sk_rmem_alloc)) { /* * MPA REQ already queued */ siw_dbg_cep(cep, "immediate mpa request\n"); siw_cep_set_inuse(new_cep); rv = siw_proc_mpareq(new_cep); if (rv != -EAGAIN) { siw_cep_put(cep); new_cep->listen_cep = NULL; if (rv) { siw_cep_set_free(new_cep); goto error; } } siw_cep_set_free(new_cep); } return; error: if (new_cep) siw_cep_put(new_cep); if (new_s) { siw_socket_disassoc(new_s); sock_release(new_s); new_cep->sock = NULL; } siw_dbg_cep(cep, "error %d\n", rv); } static void siw_cm_work_handler(struct work_struct *w) { struct siw_cm_work *work; struct siw_cep *cep; int release_cep = 0, rv = 0; work = container_of(w, struct siw_cm_work, work.work); cep = work->cep; siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n", cep->qp ? qp_id(cep->qp) : UINT_MAX, work->type, cep->state); siw_cep_set_inuse(cep); switch (work->type) { case SIW_CM_WORK_ACCEPT: siw_accept_newconn(cep); break; case SIW_CM_WORK_READ_MPAHDR: if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) { if (cep->listen_cep) { siw_cep_set_inuse(cep->listen_cep); if (cep->listen_cep->state == SIW_EPSTATE_LISTENING) rv = siw_proc_mpareq(cep); else rv = -EFAULT; siw_cep_set_free(cep->listen_cep); if (rv != -EAGAIN) { siw_cep_put(cep->listen_cep); cep->listen_cep = NULL; if (rv) siw_cep_put(cep); } } } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) { rv = siw_proc_mpareply(cep); } else { /* * CEP already moved out of MPA handshake. * any connection management already done. * silently ignore the mpa packet. */ if (cep->state == SIW_EPSTATE_RDMA_MODE) { cep->sock->sk->sk_data_ready(cep->sock->sk); siw_dbg_cep(cep, "already in RDMA mode"); } else { siw_dbg_cep(cep, "out of state: %d\n", cep->state); } } if (rv && rv != -EAGAIN) release_cep = 1; break; case SIW_CM_WORK_CLOSE_LLP: /* * QP scheduled LLP close */ if (cep->qp && cep->qp->term_info.valid) siw_send_terminate(cep->qp); if (cep->cm_id) siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0); release_cep = 1; break; case SIW_CM_WORK_PEER_CLOSE: if (cep->cm_id) { if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) { /* * MPA reply not received, but connection drop */ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET); } else if (cep->state == SIW_EPSTATE_RDMA_MODE) { /* * NOTE: IW_CM_EVENT_DISCONNECT is given just * to transition IWCM into CLOSING. */ siw_cm_upcall(cep, IW_CM_EVENT_DISCONNECT, 0); siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0); } /* * for other states there is no connection * known to the IWCM. */ } else { if (cep->state == SIW_EPSTATE_RECVD_MPAREQ) { /* * Wait for the ulp/CM to call accept/reject */ siw_dbg_cep(cep, "mpa req recvd, wait for ULP\n"); } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) { /* * Socket close before MPA request received. */ siw_dbg_cep(cep, "no mpareq: drop listener\n"); siw_cep_put(cep->listen_cep); cep->listen_cep = NULL; } } release_cep = 1; break; case SIW_CM_WORK_MPATIMEOUT: cep->mpa_timer = NULL; if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) { /* * MPA request timed out: * Hide any partially received private data and signal * timeout */ cep->mpa.hdr.params.pd_len = 0; if (cep->cm_id) siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ETIMEDOUT); release_cep = 1; } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) { /* * No MPA request received after peer TCP stream setup. */ if (cep->listen_cep) { siw_cep_put(cep->listen_cep); cep->listen_cep = NULL; } release_cep = 1; } break; default: WARN(1, "Undefined CM work type: %d\n", work->type); } if (release_cep) { siw_dbg_cep(cep, "release: timer=%s, QP[%u]\n", cep->mpa_timer ? "y" : "n", cep->qp ? qp_id(cep->qp) : UINT_MAX); siw_cancel_mpatimer(cep); cep->state = SIW_EPSTATE_CLOSED; if (cep->qp) { struct siw_qp *qp = cep->qp; /* * Serialize a potential race with application * closing the QP and calling siw_qp_cm_drop() */ siw_qp_get(qp); siw_cep_set_free(cep); siw_qp_llp_close(qp); siw_qp_put(qp); siw_cep_set_inuse(cep); cep->qp = NULL; siw_qp_put(qp); } if (cep->sock) { siw_socket_disassoc(cep->sock); sock_release(cep->sock); cep->sock = NULL; } if (cep->cm_id) { cep->cm_id->rem_ref(cep->cm_id); cep->cm_id = NULL; siw_cep_put(cep); } } siw_cep_set_free(cep); siw_put_work(work); siw_cep_put(cep); } static struct workqueue_struct *siw_cm_wq; int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type) { struct siw_cm_work *work = siw_get_work(cep); unsigned long delay = 0; if (!work) { siw_dbg_cep(cep, "failed with no work available\n"); return -ENOMEM; } work->type = type; work->cep = cep; siw_cep_get(cep); INIT_DELAYED_WORK(&work->work, siw_cm_work_handler); if (type == SIW_CM_WORK_MPATIMEOUT) { cep->mpa_timer = work; if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) delay = MPAREQ_TIMEOUT; else delay = MPAREP_TIMEOUT; } siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n", cep->qp ? qp_id(cep->qp) : -1, type, delay); queue_delayed_work(siw_cm_wq, &work->work, delay); return 0; } static void siw_cm_llp_data_ready(struct sock *sk) { struct siw_cep *cep; trace_sk_data_ready(sk); read_lock(&sk->sk_callback_lock); cep = sk_to_cep(sk); if (!cep) goto out; siw_dbg_cep(cep, "state: %d\n", cep->state); switch (cep->state) { case SIW_EPSTATE_RDMA_MODE: case SIW_EPSTATE_LISTENING: break; case SIW_EPSTATE_AWAIT_MPAREQ: case SIW_EPSTATE_AWAIT_MPAREP: siw_cm_queue_work(cep, SIW_CM_WORK_READ_MPAHDR); break; default: siw_dbg_cep(cep, "unexpected data, state %d\n", cep->state); break; } out: read_unlock(&sk->sk_callback_lock); } static void siw_cm_llp_write_space(struct sock *sk) { struct siw_cep *cep = sk_to_cep(sk); if (cep) siw_dbg_cep(cep, "state: %d\n", cep->state); } static void siw_cm_llp_error_report(struct sock *sk) { struct siw_cep *cep = sk_to_cep(sk); if (cep) { siw_dbg_cep(cep, "error %d, socket state: %d, cep state: %d\n", sk->sk_err, sk->sk_state, cep->state); cep->sk_error_report(sk); } } static void siw_cm_llp_state_change(struct sock *sk) { struct siw_cep *cep; void (*orig_state_change)(struct sock *s); read_lock(&sk->sk_callback_lock); cep = sk_to_cep(sk); if (!cep) { /* endpoint already disassociated */ read_unlock(&sk->sk_callback_lock); return; } orig_state_change = cep->sk_state_change; siw_dbg_cep(cep, "state: %d\n", cep->state); switch (sk->sk_state) { case TCP_ESTABLISHED: /* * handle accepting socket as special case where only * new connection is possible */ siw_cm_queue_work(cep, SIW_CM_WORK_ACCEPT); break; case TCP_CLOSE: case TCP_CLOSE_WAIT: if (cep->qp) cep->qp->tx_ctx.tx_suspend = 1; siw_cm_queue_work(cep, SIW_CM_WORK_PEER_CLOSE); break; default: siw_dbg_cep(cep, "unexpected socket state %d\n", sk->sk_state); } read_unlock(&sk->sk_callback_lock); orig_state_change(sk); } static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr, struct sockaddr *raddr, bool afonly) { int rv, flags = 0; size_t size = laddr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6); /* * Make address available again asap. */ sock_set_reuseaddr(s->sk); if (afonly) { rv = ip6_sock_set_v6only(s->sk); if (rv) return rv; } rv = s->ops->bind(s, laddr, size); if (rv < 0) return rv; rv = s->ops->connect(s, raddr, size, flags); return rv < 0 ? rv : 0; } int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params) { struct siw_device *sdev = to_siw_dev(id->device); struct siw_qp *qp; struct siw_cep *cep = NULL; struct socket *s = NULL; struct sockaddr *laddr = (struct sockaddr *)&id->local_addr, *raddr = (struct sockaddr *)&id->remote_addr; bool p2p_mode = peer_to_peer, v4 = true; u16 pd_len = params->private_data_len; int version = mpa_version, rv; if (pd_len > MPA_MAX_PRIVDATA) return -EINVAL; if (params->ird > sdev->attrs.max_ird || params->ord > sdev->attrs.max_ord) return -ENOMEM; if (laddr->sa_family == AF_INET6) v4 = false; else if (laddr->sa_family != AF_INET) return -EAFNOSUPPORT; /* * Respect any iwarp port mapping: Use mapped remote address * if valid. Local address must not be mapped, since siw * uses kernel TCP stack. */ if ((v4 && to_sockaddr_in(id->remote_addr).sin_port != 0) || to_sockaddr_in6(id->remote_addr).sin6_port != 0) raddr = (struct sockaddr *)&id->m_remote_addr; qp = siw_qp_id2obj(sdev, params->qpn); if (!qp) { WARN(1, "[QP %u] does not exist\n", params->qpn); rv = -EINVAL; goto error; } siw_dbg_qp(qp, "pd_len %d, laddr %pISp, raddr %pISp\n", pd_len, laddr, raddr); rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s); if (rv < 0) goto error; /* * NOTE: For simplification, connect() is called in blocking * mode. Might be reconsidered for async connection setup at * TCP level. */ rv = kernel_bindconnect(s, laddr, raddr, id->afonly); if (rv != 0) { siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv); goto error; } if (siw_tcp_nagle == false) tcp_sock_set_nodelay(s->sk); cep = siw_cep_alloc(sdev); if (!cep) { rv = -ENOMEM; goto error; } siw_cep_set_inuse(cep); /* Associate QP with CEP */ siw_cep_get(cep); qp->cep = cep; /* siw_qp_get(qp) already done by QP lookup */ cep->qp = qp; id->add_ref(id); cep->cm_id = id; /* * 4: Allocate a sufficient number of work elements * to allow concurrent handling of local + peer close * events, MPA header processing + MPA timeout. */ rv = siw_cm_alloc_work(cep, 4); if (rv != 0) { rv = -ENOMEM; goto error; } cep->ird = params->ird; cep->ord = params->ord; if (p2p_mode && cep->ord == 0) cep->ord = 1; cep->state = SIW_EPSTATE_CONNECTING; /* * Associate CEP with socket */ siw_cep_socket_assoc(cep, s); cep->state = SIW_EPSTATE_AWAIT_MPAREP; /* * Set MPA Request bits: CRC if required, no MPA Markers, * MPA Rev. according to module parameter 'mpa_version', Key 'Request'. */ cep->mpa.hdr.params.bits = 0; if (version > MPA_REVISION_2) { pr_warn("Setting MPA version to %u\n", MPA_REVISION_2); version = MPA_REVISION_2; /* Adjust also module parameter */ mpa_version = MPA_REVISION_2; } __mpa_rr_set_revision(&cep->mpa.hdr.params.bits, version); if (try_gso) cep->mpa.hdr.params.bits |= MPA_RR_FLAG_GSO_EXP; if (mpa_crc_required) cep->mpa.hdr.params.bits |= MPA_RR_FLAG_CRC; /* * If MPA version == 2: * o Include ORD and IRD. * o Indicate peer-to-peer mode, if required by module * parameter 'peer_to_peer'. */ if (version == MPA_REVISION_2) { cep->enhanced_rdma_conn_est = true; cep->mpa.hdr.params.bits |= MPA_RR_FLAG_ENHANCED; cep->mpa.v2_ctrl.ird = htons(cep->ird); cep->mpa.v2_ctrl.ord = htons(cep->ord); if (p2p_mode) { cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER; cep->mpa.v2_ctrl.ord |= rtr_type; } /* Remember own P2P mode requested */ cep->mpa.v2_ctrl_req.ird = cep->mpa.v2_ctrl.ird; cep->mpa.v2_ctrl_req.ord = cep->mpa.v2_ctrl.ord; } memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, 16); rv = siw_send_mpareqrep(cep, params->private_data, pd_len); /* * Reset private data. */ cep->mpa.hdr.params.pd_len = 0; if (rv >= 0) { rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT); if (!rv) { siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp)); siw_cep_set_free(cep); return 0; } } error: siw_dbg(id->device, "failed: %d\n", rv); if (cep) { siw_socket_disassoc(s); sock_release(s); cep->sock = NULL; cep->qp = NULL; cep->cm_id = NULL; id->rem_ref(id); qp->cep = NULL; siw_cep_put(cep); cep->state = SIW_EPSTATE_CLOSED; siw_cep_set_free(cep); siw_cep_put(cep); } else if (s) { sock_release(s); } if (qp) siw_qp_put(qp); return rv; } /* * siw_accept - Let SoftiWARP accept an RDMA connection request * * @id: New connection management id to be used for accepted * connection request * @params: Connection parameters provided by ULP for accepting connection * * Transition QP to RTS state, associate new CM id @id with accepted CEP * and get prepared for TCP input by installing socket callbacks. * Then send MPA Reply and generate the "connection established" event. * Socket callbacks must be installed before sending MPA Reply, because * the latter may cause a first RDMA message to arrive from the RDMA Initiator * side very quickly, at which time the socket callbacks must be ready. */ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) { struct siw_device *sdev = to_siw_dev(id->device); struct siw_cep *cep = (struct siw_cep *)id->provider_data; struct siw_qp *qp; struct siw_qp_attrs qp_attrs; int rv, max_priv_data = MPA_MAX_PRIVDATA; bool wait_for_peer_rts = false; siw_cep_set_inuse(cep); siw_cep_put(cep); /* Free lingering inbound private data */ if (cep->mpa.hdr.params.pd_len) { cep->mpa.hdr.params.pd_len = 0; kfree(cep->mpa.pdata); cep->mpa.pdata = NULL; } siw_cancel_mpatimer(cep); if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { siw_dbg_cep(cep, "out of state\n"); siw_cep_set_free(cep); siw_cep_put(cep); return -ECONNRESET; } qp = siw_qp_id2obj(sdev, params->qpn); if (!qp) { WARN(1, "[QP %d] does not exist\n", params->qpn); siw_cep_set_free(cep); siw_cep_put(cep); return -EINVAL; } down_write(&qp->state_lock); if (qp->attrs.state > SIW_QP_STATE_RTR) { rv = -EINVAL; up_write(&qp->state_lock); goto error; } siw_dbg_cep(cep, "[QP %d]\n", params->qpn); if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) { siw_dbg_cep(cep, "peer allows GSO on TX\n"); qp->tx_ctx.gso_seg_limit = 0; } if (params->ord > sdev->attrs.max_ord || params->ird > sdev->attrs.max_ird) { siw_dbg_cep( cep, "[QP %u]: ord %d (max %d), ird %d (max %d)\n", qp_id(qp), params->ord, sdev->attrs.max_ord, params->ird, sdev->attrs.max_ird); rv = -EINVAL; up_write(&qp->state_lock); goto error; } if (cep->enhanced_rdma_conn_est) max_priv_data -= sizeof(struct mpa_v2_data); if (params->private_data_len > max_priv_data) { siw_dbg_cep( cep, "[QP %u]: private data length: %d (max %d)\n", qp_id(qp), params->private_data_len, max_priv_data); rv = -EINVAL; up_write(&qp->state_lock); goto error; } if (cep->enhanced_rdma_conn_est) { if (params->ord > cep->ord) { if (relaxed_ird_negotiation) { params->ord = cep->ord; } else { cep->ird = params->ird; cep->ord = params->ord; rv = -EINVAL; up_write(&qp->state_lock); goto error; } } if (params->ird < cep->ird) { if (relaxed_ird_negotiation && cep->ird <= sdev->attrs.max_ird) params->ird = cep->ird; else { rv = -ENOMEM; up_write(&qp->state_lock); goto error; } } if (cep->mpa.v2_ctrl.ord & (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR)) wait_for_peer_rts = true; /* * Signal back negotiated IRD and ORD values */ cep->mpa.v2_ctrl.ord = htons(params->ord & MPA_IRD_ORD_MASK) | (cep->mpa.v2_ctrl.ord & ~MPA_V2_MASK_IRD_ORD); cep->mpa.v2_ctrl.ird = htons(params->ird & MPA_IRD_ORD_MASK) | (cep->mpa.v2_ctrl.ird & ~MPA_V2_MASK_IRD_ORD); } cep->ird = params->ird; cep->ord = params->ord; cep->cm_id = id; id->add_ref(id); memset(&qp_attrs, 0, sizeof(qp_attrs)); qp_attrs.orq_size = cep->ord; qp_attrs.irq_size = cep->ird; qp_attrs.sk = cep->sock; if (cep->mpa.hdr.params.bits & MPA_RR_FLAG_CRC) qp_attrs.flags = SIW_MPA_CRC; qp_attrs.state = SIW_QP_STATE_RTS; siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp)); /* Associate QP with CEP */ siw_cep_get(cep); qp->cep = cep; /* siw_qp_get(qp) already done by QP lookup */ cep->qp = qp; cep->state = SIW_EPSTATE_RDMA_MODE; /* Move socket RX/TX under QP control */ rv = siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE | SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD | SIW_QP_ATTR_MPA); up_write(&qp->state_lock); if (rv) goto error; siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n", qp_id(qp), params->private_data_len); rv = siw_send_mpareqrep(cep, params->private_data, params->private_data_len); if (rv != 0) goto error; if (wait_for_peer_rts) { siw_sk_assign_rtr_upcalls(cep); } else { siw_qp_socket_assoc(cep, qp); rv = siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0); if (rv) goto error; } siw_cep_set_free(cep); return 0; error: siw_socket_disassoc(cep->sock); sock_release(cep->sock); cep->sock = NULL; cep->state = SIW_EPSTATE_CLOSED; if (cep->cm_id) { cep->cm_id->rem_ref(id); cep->cm_id = NULL; } if (qp->cep) { siw_cep_put(cep); qp->cep = NULL; } cep->qp = NULL; siw_qp_put(qp); siw_cep_set_free(cep); siw_cep_put(cep); return rv; } /* * siw_reject() * * Local connection reject case. Send private data back to peer, * close connection and dereference connection id. */ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len) { struct siw_cep *cep = (struct siw_cep *)id->provider_data; siw_cep_set_inuse(cep); siw_cep_put(cep); siw_cancel_mpatimer(cep); if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { siw_dbg_cep(cep, "out of state\n"); siw_cep_set_free(cep); siw_cep_put(cep); /* put last reference */ return -ECONNRESET; } siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state, pd_len); if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) { cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */ siw_send_mpareqrep(cep, pdata, pd_len); } siw_socket_disassoc(cep->sock); sock_release(cep->sock); cep->sock = NULL; cep->state = SIW_EPSTATE_CLOSED; siw_cep_set_free(cep); siw_cep_put(cep); return 0; } /* * siw_create_listen - Create resources for a listener's IWCM ID @id * * Starts listen on the socket address id->local_addr. * */ int siw_create_listen(struct iw_cm_id *id, int backlog) { struct socket *s; struct siw_cep *cep = NULL; struct siw_device *sdev = to_siw_dev(id->device); int addr_family = id->local_addr.ss_family; int rv = 0; if (addr_family != AF_INET && addr_family != AF_INET6) return -EAFNOSUPPORT; rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s); if (rv < 0) return rv; /* * Allow binding local port when still in TIME_WAIT from last close. */ sock_set_reuseaddr(s->sk); if (addr_family == AF_INET) { struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr); /* For wildcard addr, limit binding to current device only */ if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) s->sk->sk_bound_dev_if = sdev->netdev->ifindex; rv = s->ops->bind(s, (struct sockaddr *)laddr, sizeof(struct sockaddr_in)); } else { struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr); if (id->afonly) { rv = ip6_sock_set_v6only(s->sk); if (rv) { siw_dbg(id->device, "ip6_sock_set_v6only erro: %d\n", rv); goto error; } } /* For wildcard addr, limit binding to current device only */ if (ipv6_addr_any(&laddr->sin6_addr)) s->sk->sk_bound_dev_if = sdev->netdev->ifindex; rv = s->ops->bind(s, (struct sockaddr *)laddr, sizeof(struct sockaddr_in6)); } if (rv) { siw_dbg(id->device, "socket bind error: %d\n", rv); goto error; } cep = siw_cep_alloc(sdev); if (!cep) { rv = -ENOMEM; goto error; } siw_cep_socket_assoc(cep, s); rv = siw_cm_alloc_work(cep, backlog); if (rv) { siw_dbg(id->device, "alloc_work error %d, backlog %d\n", rv, backlog); goto error; } rv = s->ops->listen(s, backlog); if (rv) { siw_dbg(id->device, "listen error %d\n", rv); goto error; } cep->cm_id = id; id->add_ref(id); /* * In case of a wildcard rdma_listen on a multi-homed device, * a listener's IWCM id is associated with more than one listening CEP. * * We currently use id->provider_data in three different ways: * * o For a listener's IWCM id, id->provider_data points to * the list_head of the list of listening CEPs. * Uses: siw_create_listen(), siw_destroy_listen() * * o For each accepted passive-side IWCM id, id->provider_data * points to the CEP itself. This is a consequence of * - siw_cm_upcall() setting event.provider_data = cep and * - the IWCM's cm_conn_req_handler() setting provider_data of the * new passive-side IWCM id equal to event.provider_data * Uses: siw_accept(), siw_reject() * * o For an active-side IWCM id, id->provider_data is not used at all. * */ if (!id->provider_data) { id->provider_data = kmalloc(sizeof(struct list_head), GFP_KERNEL); if (!id->provider_data) { rv = -ENOMEM; goto error; } INIT_LIST_HEAD((struct list_head *)id->provider_data); } list_add_tail(&cep->listenq, (struct list_head *)id->provider_data); cep->state = SIW_EPSTATE_LISTENING; siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr); return 0; error: siw_dbg(id->device, "failed: %d\n", rv); if (cep) { siw_cep_set_inuse(cep); if (cep->cm_id) { cep->cm_id->rem_ref(cep->cm_id); cep->cm_id = NULL; } cep->sock = NULL; siw_socket_disassoc(s); cep->state = SIW_EPSTATE_CLOSED; siw_cep_set_free(cep); siw_cep_put(cep); } sock_release(s); return rv; } static void siw_drop_listeners(struct iw_cm_id *id) { struct list_head *p, *tmp; /* * In case of a wildcard rdma_listen on a multi-homed device, * a listener's IWCM id is associated with more than one listening CEP. */ list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) { struct siw_cep *cep = list_entry(p, struct siw_cep, listenq); list_del(p); siw_dbg_cep(cep, "drop cep, state %d\n", cep->state); siw_cep_set_inuse(cep); if (cep->cm_id) { cep->cm_id->rem_ref(cep->cm_id); cep->cm_id = NULL; } if (cep->sock) { siw_socket_disassoc(cep->sock); sock_release(cep->sock); cep->sock = NULL; } cep->state = SIW_EPSTATE_CLOSED; siw_cep_set_free(cep); siw_cep_put(cep); } } int siw_destroy_listen(struct iw_cm_id *id) { if (!id->provider_data) { siw_dbg(id->device, "no cep(s)\n"); return 0; } siw_drop_listeners(id); kfree(id->provider_data); id->provider_data = NULL; return 0; } int siw_cm_init(void) { /* * create_single_workqueue for strict ordering */ siw_cm_wq = create_singlethread_workqueue("siw_cm_wq"); if (!siw_cm_wq) return -ENOMEM; return 0; } void siw_cm_exit(void) { if (siw_cm_wq) destroy_workqueue(siw_cm_wq); }
linux-master
drivers/infiniband/sw/siw/siw_cm.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ #include <linux/errno.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/xarray.h> #include <net/addrconf.h> #include <rdma/iw_cm.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/uverbs_ioctl.h> #include "siw.h" #include "siw_verbs.h" #include "siw_mem.h" static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = { [IB_QPS_RESET] = SIW_QP_STATE_IDLE, [IB_QPS_INIT] = SIW_QP_STATE_IDLE, [IB_QPS_RTR] = SIW_QP_STATE_RTR, [IB_QPS_RTS] = SIW_QP_STATE_RTS, [IB_QPS_SQD] = SIW_QP_STATE_CLOSING, [IB_QPS_SQE] = SIW_QP_STATE_TERMINATE, [IB_QPS_ERR] = SIW_QP_STATE_ERROR }; static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = { [IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR", [IB_QPS_RTS] = "RTS", [IB_QPS_SQD] = "SQD", [IB_QPS_SQE] = "SQE", [IB_QPS_ERR] = "ERR" }; void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry) { struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry); kfree(entry); } int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma) { struct siw_ucontext *uctx = to_siw_ctx(ctx); size_t size = vma->vm_end - vma->vm_start; struct rdma_user_mmap_entry *rdma_entry; struct siw_user_mmap_entry *entry; int rv = -EINVAL; /* * Must be page aligned */ if (vma->vm_start & (PAGE_SIZE - 1)) { pr_warn("siw: mmap not page aligned\n"); return -EINVAL; } rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma); if (!rdma_entry) { siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n", vma->vm_pgoff, size); return -EINVAL; } entry = to_siw_mmap_entry(rdma_entry); rv = remap_vmalloc_range(vma, entry->address, 0); if (rv) { pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff, size); goto out; } out: rdma_user_mmap_entry_put(rdma_entry); return rv; } int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata) { struct siw_device *sdev = to_siw_dev(base_ctx->device); struct siw_ucontext *ctx = to_siw_ctx(base_ctx); struct siw_uresp_alloc_ctx uresp = {}; int rv; if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) { rv = -ENOMEM; goto err_out; } ctx->sdev = sdev; uresp.dev_id = sdev->vendor_part_id; if (udata->outlen < sizeof(uresp)) { rv = -EINVAL; goto err_out; } rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rv) goto err_out; siw_dbg(base_ctx->device, "success. now %d context(s)\n", atomic_read(&sdev->num_ctx)); return 0; err_out: atomic_dec(&sdev->num_ctx); siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv, atomic_read(&sdev->num_ctx)); return rv; } void siw_dealloc_ucontext(struct ib_ucontext *base_ctx) { struct siw_ucontext *uctx = to_siw_ctx(base_ctx); atomic_dec(&uctx->sdev->num_ctx); } int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, struct ib_udata *udata) { struct siw_device *sdev = to_siw_dev(base_dev); if (udata->inlen || udata->outlen) return -EINVAL; memset(attr, 0, sizeof(*attr)); /* Revisit atomic caps if RFC 7306 gets supported */ attr->atomic_cap = 0; attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS; attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG; attr->max_cq = sdev->attrs.max_cq; attr->max_cqe = sdev->attrs.max_cqe; attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL; attr->max_mr = sdev->attrs.max_mr; attr->max_mw = sdev->attrs.max_mw; attr->max_mr_size = ~0ull; attr->max_pd = sdev->attrs.max_pd; attr->max_qp = sdev->attrs.max_qp; attr->max_qp_init_rd_atom = sdev->attrs.max_ird; attr->max_qp_rd_atom = sdev->attrs.max_ord; attr->max_qp_wr = sdev->attrs.max_qp_wr; attr->max_recv_sge = sdev->attrs.max_sge; attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird; attr->max_send_sge = sdev->attrs.max_sge; attr->max_sge_rd = sdev->attrs.max_sge_rd; attr->max_srq = sdev->attrs.max_srq; attr->max_srq_sge = sdev->attrs.max_srq_sge; attr->max_srq_wr = sdev->attrs.max_srq_wr; attr->page_size_cap = PAGE_SIZE; attr->vendor_id = SIW_VENDOR_ID; attr->vendor_part_id = sdev->vendor_part_id; addrconf_addr_eui48((u8 *)&attr->sys_image_guid, sdev->raw_gid); return 0; } int siw_query_port(struct ib_device *base_dev, u32 port, struct ib_port_attr *attr) { struct siw_device *sdev = to_siw_dev(base_dev); int rv; memset(attr, 0, sizeof(*attr)); rv = ib_get_eth_speed(base_dev, port, &attr->active_speed, &attr->active_width); attr->gid_tbl_len = 1; attr->max_msg_sz = -1; attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); attr->phys_state = sdev->state == IB_PORT_ACTIVE ? IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP; attr->state = sdev->state; /* * All zero * * attr->lid = 0; * attr->bad_pkey_cntr = 0; * attr->qkey_viol_cntr = 0; * attr->sm_lid = 0; * attr->lmc = 0; * attr->max_vl_num = 0; * attr->sm_sl = 0; * attr->subnet_timeout = 0; * attr->init_type_repy = 0; */ return rv; } int siw_get_port_immutable(struct ib_device *base_dev, u32 port, struct ib_port_immutable *port_immutable) { struct ib_port_attr attr; int rv = siw_query_port(base_dev, port, &attr); if (rv) return rv; port_immutable->gid_tbl_len = attr.gid_tbl_len; port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; return 0; } int siw_query_gid(struct ib_device *base_dev, u32 port, int idx, union ib_gid *gid) { struct siw_device *sdev = to_siw_dev(base_dev); /* subnet_prefix == interface_id == 0; */ memset(gid, 0, sizeof(*gid)); memcpy(gid->raw, sdev->raw_gid, ETH_ALEN); return 0; } int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct siw_device *sdev = to_siw_dev(pd->device); if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) { atomic_dec(&sdev->num_pd); return -ENOMEM; } siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd)); return 0; } int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct siw_device *sdev = to_siw_dev(pd->device); siw_dbg_pd(pd, "free PD\n"); atomic_dec(&sdev->num_pd); return 0; } void siw_qp_get_ref(struct ib_qp *base_qp) { siw_qp_get(to_siw_qp(base_qp)); } void siw_qp_put_ref(struct ib_qp *base_qp) { siw_qp_put(to_siw_qp(base_qp)); } static struct rdma_user_mmap_entry * siw_mmap_entry_insert(struct siw_ucontext *uctx, void *address, size_t length, u64 *offset) { struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); int rv; *offset = SIW_INVAL_UOBJ_KEY; if (!entry) return NULL; entry->address = address; rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext, &entry->rdma_entry, length); if (rv) { kfree(entry); return NULL; } *offset = rdma_user_mmap_get_offset(&entry->rdma_entry); return &entry->rdma_entry; } /* * siw_create_qp() * * Create QP of requested size on given device. * * @qp: Queue pait * @attrs: Initial QP attributes. * @udata: used to provide QP ID, SQ and RQ size back to user. */ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, struct ib_udata *udata) { struct ib_pd *pd = ibqp->pd; struct siw_qp *qp = to_siw_qp(ibqp); struct ib_device *base_dev = pd->device; struct siw_device *sdev = to_siw_dev(base_dev); struct siw_ucontext *uctx = rdma_udata_to_drv_context(udata, struct siw_ucontext, base_ucontext); unsigned long flags; int num_sqe, num_rqe, rv = 0; size_t length; siw_dbg(base_dev, "create new QP\n"); if (attrs->create_flags) return -EOPNOTSUPP; if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) { siw_dbg(base_dev, "too many QP's\n"); rv = -ENOMEM; goto err_atomic; } if (attrs->qp_type != IB_QPT_RC) { siw_dbg(base_dev, "only RC QP's supported\n"); rv = -EOPNOTSUPP; goto err_atomic; } if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) || (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) || (attrs->cap.max_send_sge > SIW_MAX_SGE) || (attrs->cap.max_recv_sge > SIW_MAX_SGE)) { siw_dbg(base_dev, "QP size error\n"); rv = -EINVAL; goto err_atomic; } if (attrs->cap.max_inline_data > SIW_MAX_INLINE) { siw_dbg(base_dev, "max inline send: %d > %d\n", attrs->cap.max_inline_data, (int)SIW_MAX_INLINE); rv = -EINVAL; goto err_atomic; } /* * NOTE: we allow for zero element SQ and RQ WQE's SGL's * but not for a QP unable to hold any WQE (SQ + RQ) */ if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) { siw_dbg(base_dev, "QP must have send or receive queue\n"); rv = -EINVAL; goto err_atomic; } if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) { siw_dbg(base_dev, "send CQ or receive CQ invalid\n"); rv = -EINVAL; goto err_atomic; } init_rwsem(&qp->state_lock); spin_lock_init(&qp->sq_lock); spin_lock_init(&qp->rq_lock); spin_lock_init(&qp->orq_lock); rv = siw_qp_add(sdev, qp); if (rv) goto err_atomic; num_sqe = attrs->cap.max_send_wr; num_rqe = attrs->cap.max_recv_wr; /* All queue indices are derived from modulo operations * on a free running 'get' (consumer) and 'put' (producer) * unsigned counter. Having queue sizes at power of two * avoids handling counter wrap around. */ if (num_sqe) num_sqe = roundup_pow_of_two(num_sqe); else { /* Zero sized SQ is not supported */ rv = -EINVAL; goto err_out_xa; } if (num_rqe) num_rqe = roundup_pow_of_two(num_rqe); if (udata) qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe)); else qp->sendq = vcalloc(num_sqe, sizeof(struct siw_sqe)); if (qp->sendq == NULL) { rv = -ENOMEM; goto err_out_xa; } if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) { if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) qp->attrs.flags |= SIW_SIGNAL_ALL_WR; else { rv = -EINVAL; goto err_out_xa; } } qp->pd = pd; qp->scq = to_siw_cq(attrs->send_cq); qp->rcq = to_siw_cq(attrs->recv_cq); if (attrs->srq) { /* * SRQ support. * Verbs 6.3.7: ignore RQ size, if SRQ present * Verbs 6.3.5: do not check PD of SRQ against PD of QP */ qp->srq = to_siw_srq(attrs->srq); qp->attrs.rq_size = 0; siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->base_qp.qp_num); } else if (num_rqe) { if (udata) qp->recvq = vmalloc_user(num_rqe * sizeof(struct siw_rqe)); else qp->recvq = vcalloc(num_rqe, sizeof(struct siw_rqe)); if (qp->recvq == NULL) { rv = -ENOMEM; goto err_out_xa; } qp->attrs.rq_size = num_rqe; } qp->attrs.sq_size = num_sqe; qp->attrs.sq_max_sges = attrs->cap.max_send_sge; qp->attrs.rq_max_sges = attrs->cap.max_recv_sge; /* Make those two tunables fixed for now. */ qp->tx_ctx.gso_seg_limit = 1; qp->tx_ctx.zcopy_tx = zcopy_tx; qp->attrs.state = SIW_QP_STATE_IDLE; if (udata) { struct siw_uresp_create_qp uresp = {}; uresp.num_sqe = num_sqe; uresp.num_rqe = num_rqe; uresp.qp_id = qp_id(qp); if (qp->sendq) { length = num_sqe * sizeof(struct siw_sqe); qp->sq_entry = siw_mmap_entry_insert(uctx, qp->sendq, length, &uresp.sq_key); if (!qp->sq_entry) { rv = -ENOMEM; goto err_out_xa; } } if (qp->recvq) { length = num_rqe * sizeof(struct siw_rqe); qp->rq_entry = siw_mmap_entry_insert(uctx, qp->recvq, length, &uresp.rq_key); if (!qp->rq_entry) { uresp.sq_key = SIW_INVAL_UOBJ_KEY; rv = -ENOMEM; goto err_out_xa; } } if (udata->outlen < sizeof(uresp)) { rv = -EINVAL; goto err_out_xa; } rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rv) goto err_out_xa; } qp->tx_cpu = siw_get_tx_cpu(sdev); if (qp->tx_cpu < 0) { rv = -EINVAL; goto err_out_xa; } INIT_LIST_HEAD(&qp->devq); spin_lock_irqsave(&sdev->lock, flags); list_add_tail(&qp->devq, &sdev->qp_list); spin_unlock_irqrestore(&sdev->lock, flags); init_completion(&qp->qp_free); return 0; err_out_xa: xa_erase(&sdev->qp_xa, qp_id(qp)); if (uctx) { rdma_user_mmap_entry_remove(qp->sq_entry); rdma_user_mmap_entry_remove(qp->rq_entry); } vfree(qp->sendq); vfree(qp->recvq); err_atomic: atomic_dec(&sdev->num_qp); return rv; } /* * Minimum siw_query_qp() verb interface. * * @qp_attr_mask is not used but all available information is provided */ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct siw_qp *qp; struct siw_device *sdev; if (base_qp && qp_attr && qp_init_attr) { qp = to_siw_qp(base_qp); sdev = to_siw_dev(base_qp->device); } else { return -EINVAL; } qp_attr->cap.max_inline_data = SIW_MAX_INLINE; qp_attr->cap.max_send_wr = qp->attrs.sq_size; qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges; qp_attr->cap.max_recv_wr = qp->attrs.rq_size; qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges; qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); qp_attr->max_rd_atomic = qp->attrs.irq_size; qp_attr->max_dest_rd_atomic = qp->attrs.orq_size; qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ; qp_init_attr->qp_type = base_qp->qp_type; qp_init_attr->send_cq = base_qp->send_cq; qp_init_attr->recv_cq = base_qp->recv_cq; qp_init_attr->srq = base_qp->srq; qp_init_attr->cap = qp_attr->cap; return 0; } int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct siw_qp_attrs new_attrs; enum siw_qp_attr_mask siw_attr_mask = 0; struct siw_qp *qp = to_siw_qp(base_qp); int rv = 0; if (!attr_mask) return 0; if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; memset(&new_attrs, 0, sizeof(new_attrs)); if (attr_mask & IB_QP_ACCESS_FLAGS) { siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS; if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) new_attrs.flags |= SIW_RDMA_READ_ENABLED; if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) new_attrs.flags |= SIW_RDMA_WRITE_ENABLED; if (attr->qp_access_flags & IB_ACCESS_MW_BIND) new_attrs.flags |= SIW_RDMA_BIND_ENABLED; } if (attr_mask & IB_QP_STATE) { siw_dbg_qp(qp, "desired IB QP state: %s\n", ib_qp_state_to_string[attr->qp_state]); new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state]; if (new_attrs.state > SIW_QP_STATE_RTS) qp->tx_ctx.tx_suspend = 1; siw_attr_mask |= SIW_QP_ATTR_STATE; } if (!siw_attr_mask) goto out; down_write(&qp->state_lock); rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask); up_write(&qp->state_lock); out: return rv; } int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata) { struct siw_qp *qp = to_siw_qp(base_qp); struct siw_ucontext *uctx = rdma_udata_to_drv_context(udata, struct siw_ucontext, base_ucontext); struct siw_qp_attrs qp_attrs; siw_dbg_qp(qp, "state %d\n", qp->attrs.state); /* * Mark QP as in process of destruction to prevent from * any async callbacks to RDMA core */ qp->attrs.flags |= SIW_QP_IN_DESTROY; qp->rx_stream.rx_suspend = 1; if (uctx) { rdma_user_mmap_entry_remove(qp->sq_entry); rdma_user_mmap_entry_remove(qp->rq_entry); } down_write(&qp->state_lock); qp_attrs.state = SIW_QP_STATE_ERROR; siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE); if (qp->cep) { siw_cep_put(qp->cep); qp->cep = NULL; } up_write(&qp->state_lock); kfree(qp->tx_ctx.mpa_crc_hd); kfree(qp->rx_stream.mpa_crc_hd); qp->scq = qp->rcq = NULL; siw_qp_put(qp); wait_for_completion(&qp->qp_free); return 0; } /* * siw_copy_inline_sgl() * * Prepare sgl of inlined data for sending. For userland callers * function checks if given buffer addresses and len's are within * process context bounds. * Data from all provided sge's are copied together into the wqe, * referenced by a single sge. */ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, struct siw_sqe *sqe) { struct ib_sge *core_sge = core_wr->sg_list; void *kbuf = &sqe->sge[1]; int num_sge = core_wr->num_sge, bytes = 0; sqe->sge[0].laddr = (uintptr_t)kbuf; sqe->sge[0].lkey = 0; while (num_sge--) { if (!core_sge->length) { core_sge++; continue; } bytes += core_sge->length; if (bytes > SIW_MAX_INLINE) { bytes = -EINVAL; break; } memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr), core_sge->length); kbuf += core_sge->length; core_sge++; } sqe->sge[0].length = max(bytes, 0); sqe->num_sge = bytes > 0 ? 1 : 0; return bytes; } /* Complete SQ WR's without processing */ static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { int rv = 0; while (wr) { struct siw_sqe sqe = {}; switch (wr->opcode) { case IB_WR_RDMA_WRITE: sqe.opcode = SIW_OP_WRITE; break; case IB_WR_RDMA_READ: sqe.opcode = SIW_OP_READ; break; case IB_WR_RDMA_READ_WITH_INV: sqe.opcode = SIW_OP_READ_LOCAL_INV; break; case IB_WR_SEND: sqe.opcode = SIW_OP_SEND; break; case IB_WR_SEND_WITH_IMM: sqe.opcode = SIW_OP_SEND_WITH_IMM; break; case IB_WR_SEND_WITH_INV: sqe.opcode = SIW_OP_SEND_REMOTE_INV; break; case IB_WR_LOCAL_INV: sqe.opcode = SIW_OP_INVAL_STAG; break; case IB_WR_REG_MR: sqe.opcode = SIW_OP_REG_MR; break; default: rv = -EINVAL; break; } if (!rv) { sqe.id = wr->wr_id; rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); } if (rv) { if (bad_wr) *bad_wr = wr; break; } wr = wr->next; } return rv; } /* Complete RQ WR's without processing */ static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct siw_rqe rqe = {}; int rv = 0; while (wr) { rqe.id = wr->wr_id; rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR); if (rv) { if (bad_wr) *bad_wr = wr; break; } wr = wr->next; } return rv; } /* * siw_post_send() * * Post a list of S-WR's to a SQ. * * @base_qp: Base QP contained in siw QP * @wr: Null terminated list of user WR's * @bad_wr: Points to failing WR in case of synchronous failure. */ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct siw_qp *qp = to_siw_qp(base_qp); struct siw_wqe *wqe = tx_wqe(qp); unsigned long flags; int rv = 0; if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) { siw_dbg_qp(qp, "wr must be empty for user mapped sq\n"); *bad_wr = wr; return -EINVAL; } /* * Try to acquire QP state lock. Must be non-blocking * to accommodate kernel clients needs. */ if (!down_read_trylock(&qp->state_lock)) { if (qp->attrs.state == SIW_QP_STATE_ERROR) { /* * ERROR state is final, so we can be sure * this state will not change as long as the QP * exists. * * This handles an ib_drain_sq() call with * a concurrent request to set the QP state * to ERROR. */ rv = siw_sq_flush_wr(qp, wr, bad_wr); } else { siw_dbg_qp(qp, "QP locked, state %d\n", qp->attrs.state); *bad_wr = wr; rv = -ENOTCONN; } return rv; } if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) { if (qp->attrs.state == SIW_QP_STATE_ERROR) { /* * Immediately flush this WR to CQ, if QP * is in ERROR state. SQ is guaranteed to * be empty, so WR complets in-order. * * Typically triggered by ib_drain_sq(). */ rv = siw_sq_flush_wr(qp, wr, bad_wr); } else { siw_dbg_qp(qp, "QP out of state %d\n", qp->attrs.state); *bad_wr = wr; rv = -ENOTCONN; } up_read(&qp->state_lock); return rv; } spin_lock_irqsave(&qp->sq_lock, flags); while (wr) { u32 idx = qp->sq_put % qp->attrs.sq_size; struct siw_sqe *sqe = &qp->sendq[idx]; if (sqe->flags) { siw_dbg_qp(qp, "sq full\n"); rv = -ENOMEM; break; } if (wr->num_sge > qp->attrs.sq_max_sges) { siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); rv = -EINVAL; break; } sqe->id = wr->wr_id; if ((wr->send_flags & IB_SEND_SIGNALED) || (qp->attrs.flags & SIW_SIGNAL_ALL_WR)) sqe->flags |= SIW_WQE_SIGNALLED; if (wr->send_flags & IB_SEND_FENCE) sqe->flags |= SIW_WQE_READ_FENCE; switch (wr->opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_INV: if (wr->send_flags & IB_SEND_SOLICITED) sqe->flags |= SIW_WQE_SOLICITED; if (!(wr->send_flags & IB_SEND_INLINE)) { siw_copy_sgl(wr->sg_list, sqe->sge, wr->num_sge); sqe->num_sge = wr->num_sge; } else { rv = siw_copy_inline_sgl(wr, sqe); if (rv <= 0) { rv = -EINVAL; break; } sqe->flags |= SIW_WQE_INLINE; sqe->num_sge = 1; } if (wr->opcode == IB_WR_SEND) sqe->opcode = SIW_OP_SEND; else { sqe->opcode = SIW_OP_SEND_REMOTE_INV; sqe->rkey = wr->ex.invalidate_rkey; } break; case IB_WR_RDMA_READ_WITH_INV: case IB_WR_RDMA_READ: /* * iWarp restricts RREAD sink to SGL containing * 1 SGE only. we could relax to SGL with multiple * elements referring the SAME ltag or even sending * a private per-rreq tag referring to a checked * local sgl with MULTIPLE ltag's. */ if (unlikely(wr->num_sge != 1)) { rv = -EINVAL; break; } siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1); /* * NOTE: zero length RREAD is allowed! */ sqe->raddr = rdma_wr(wr)->remote_addr; sqe->rkey = rdma_wr(wr)->rkey; sqe->num_sge = 1; if (wr->opcode == IB_WR_RDMA_READ) sqe->opcode = SIW_OP_READ; else sqe->opcode = SIW_OP_READ_LOCAL_INV; break; case IB_WR_RDMA_WRITE: if (!(wr->send_flags & IB_SEND_INLINE)) { siw_copy_sgl(wr->sg_list, &sqe->sge[0], wr->num_sge); sqe->num_sge = wr->num_sge; } else { rv = siw_copy_inline_sgl(wr, sqe); if (unlikely(rv < 0)) { rv = -EINVAL; break; } sqe->flags |= SIW_WQE_INLINE; sqe->num_sge = 1; } sqe->raddr = rdma_wr(wr)->remote_addr; sqe->rkey = rdma_wr(wr)->rkey; sqe->opcode = SIW_OP_WRITE; break; case IB_WR_REG_MR: sqe->base_mr = (uintptr_t)reg_wr(wr)->mr; sqe->rkey = reg_wr(wr)->key; sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; sqe->opcode = SIW_OP_REG_MR; break; case IB_WR_LOCAL_INV: sqe->rkey = wr->ex.invalidate_rkey; sqe->opcode = SIW_OP_INVAL_STAG; break; default: siw_dbg_qp(qp, "ib wr type %d unsupported\n", wr->opcode); rv = -EINVAL; break; } siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n", sqe->opcode, sqe->flags, (void *)(uintptr_t)sqe->id); if (unlikely(rv < 0)) break; /* make SQE only valid after completely written */ smp_wmb(); sqe->flags |= SIW_WQE_VALID; qp->sq_put++; wr = wr->next; } /* * Send directly if SQ processing is not in progress. * Eventual immediate errors (rv < 0) do not affect the involved * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ * processing, if new work is already pending. But rv must be passed * to caller. */ if (wqe->wr_status != SIW_WR_IDLE) { spin_unlock_irqrestore(&qp->sq_lock, flags); goto skip_direct_sending; } rv = siw_activate_tx(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); if (rv <= 0) goto skip_direct_sending; if (rdma_is_kernel_res(&qp->base_qp.res)) { rv = siw_sq_start(qp); } else { qp->tx_ctx.in_syscall = 1; if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend)) siw_qp_cm_drop(qp, 0); qp->tx_ctx.in_syscall = 0; } skip_direct_sending: up_read(&qp->state_lock); if (rv >= 0) return 0; /* * Immediate error */ siw_dbg_qp(qp, "error %d\n", rv); *bad_wr = wr; return rv; } /* * siw_post_receive() * * Post a list of R-WR's to a RQ. * * @base_qp: Base QP contained in siw QP * @wr: Null terminated list of user WR's * @bad_wr: Points to failing WR in case of synchronous failure. */ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct siw_qp *qp = to_siw_qp(base_qp); unsigned long flags; int rv = 0; if (qp->srq || qp->attrs.rq_size == 0) { *bad_wr = wr; return -EINVAL; } if (!rdma_is_kernel_res(&qp->base_qp.res)) { siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n"); *bad_wr = wr; return -EINVAL; } /* * Try to acquire QP state lock. Must be non-blocking * to accommodate kernel clients needs. */ if (!down_read_trylock(&qp->state_lock)) { if (qp->attrs.state == SIW_QP_STATE_ERROR) { /* * ERROR state is final, so we can be sure * this state will not change as long as the QP * exists. * * This handles an ib_drain_rq() call with * a concurrent request to set the QP state * to ERROR. */ rv = siw_rq_flush_wr(qp, wr, bad_wr); } else { siw_dbg_qp(qp, "QP locked, state %d\n", qp->attrs.state); *bad_wr = wr; rv = -ENOTCONN; } return rv; } if (qp->attrs.state > SIW_QP_STATE_RTS) { if (qp->attrs.state == SIW_QP_STATE_ERROR) { /* * Immediately flush this WR to CQ, if QP * is in ERROR state. RQ is guaranteed to * be empty, so WR complets in-order. * * Typically triggered by ib_drain_rq(). */ rv = siw_rq_flush_wr(qp, wr, bad_wr); } else { siw_dbg_qp(qp, "QP out of state %d\n", qp->attrs.state); *bad_wr = wr; rv = -ENOTCONN; } up_read(&qp->state_lock); return rv; } /* * Serialize potentially multiple producers. * Not needed for single threaded consumer side. */ spin_lock_irqsave(&qp->rq_lock, flags); while (wr) { u32 idx = qp->rq_put % qp->attrs.rq_size; struct siw_rqe *rqe = &qp->recvq[idx]; if (rqe->flags) { siw_dbg_qp(qp, "RQ full\n"); rv = -ENOMEM; break; } if (wr->num_sge > qp->attrs.rq_max_sges) { siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); rv = -EINVAL; break; } rqe->id = wr->wr_id; rqe->num_sge = wr->num_sge; siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge); /* make sure RQE is completely written before valid */ smp_wmb(); rqe->flags = SIW_WQE_VALID; qp->rq_put++; wr = wr->next; } spin_unlock_irqrestore(&qp->rq_lock, flags); up_read(&qp->state_lock); if (rv < 0) { siw_dbg_qp(qp, "error %d\n", rv); *bad_wr = wr; } return rv > 0 ? 0 : rv; } int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) { struct siw_cq *cq = to_siw_cq(base_cq); struct siw_device *sdev = to_siw_dev(base_cq->device); struct siw_ucontext *ctx = rdma_udata_to_drv_context(udata, struct siw_ucontext, base_ucontext); siw_dbg_cq(cq, "free CQ resources\n"); siw_cq_flush(cq); if (ctx) rdma_user_mmap_entry_remove(cq->cq_entry); atomic_dec(&sdev->num_cq); vfree(cq->queue); return 0; } /* * siw_create_cq() * * Populate CQ of requested size * * @base_cq: CQ as allocated by RDMA midlayer * @attr: Initial CQ attributes * @udata: relates to user context */ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { struct siw_device *sdev = to_siw_dev(base_cq->device); struct siw_cq *cq = to_siw_cq(base_cq); int rv, size = attr->cqe; if (attr->flags) return -EOPNOTSUPP; if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) { siw_dbg(base_cq->device, "too many CQ's\n"); rv = -ENOMEM; goto err_out; } if (size < 1 || size > sdev->attrs.max_cqe) { siw_dbg(base_cq->device, "CQ size error: %d\n", size); rv = -EINVAL; goto err_out; } size = roundup_pow_of_two(size); cq->base_cq.cqe = size; cq->num_cqe = size; if (udata) cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) + sizeof(struct siw_cq_ctrl)); else cq->queue = vzalloc(size * sizeof(struct siw_cqe) + sizeof(struct siw_cq_ctrl)); if (cq->queue == NULL) { rv = -ENOMEM; goto err_out; } get_random_bytes(&cq->id, 4); siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id); spin_lock_init(&cq->lock); cq->notify = (struct siw_cq_ctrl *)&cq->queue[size]; if (udata) { struct siw_uresp_create_cq uresp = {}; struct siw_ucontext *ctx = rdma_udata_to_drv_context(udata, struct siw_ucontext, base_ucontext); size_t length = size * sizeof(struct siw_cqe) + sizeof(struct siw_cq_ctrl); cq->cq_entry = siw_mmap_entry_insert(ctx, cq->queue, length, &uresp.cq_key); if (!cq->cq_entry) { rv = -ENOMEM; goto err_out; } uresp.cq_id = cq->id; uresp.num_cqe = size; if (udata->outlen < sizeof(uresp)) { rv = -EINVAL; goto err_out; } rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rv) goto err_out; } return 0; err_out: siw_dbg(base_cq->device, "CQ creation failed: %d", rv); if (cq->queue) { struct siw_ucontext *ctx = rdma_udata_to_drv_context(udata, struct siw_ucontext, base_ucontext); if (ctx) rdma_user_mmap_entry_remove(cq->cq_entry); vfree(cq->queue); } atomic_dec(&sdev->num_cq); return rv; } /* * siw_poll_cq() * * Reap CQ entries if available and copy work completion status into * array of WC's provided by caller. Returns number of reaped CQE's. * * @base_cq: Base CQ contained in siw CQ. * @num_cqe: Maximum number of CQE's to reap. * @wc: Array of work completions to be filled by siw. */ int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc) { struct siw_cq *cq = to_siw_cq(base_cq); int i; for (i = 0; i < num_cqe; i++) { if (!siw_reap_cqe(cq, wc)) break; wc++; } return i; } /* * siw_req_notify_cq() * * Request notification for new CQE's added to that CQ. * Defined flags: * o SIW_CQ_NOTIFY_SOLICITED lets siw trigger a notification * event if a WQE with notification flag set enters the CQ * o SIW_CQ_NOTIFY_NEXT_COMP lets siw trigger a notification * event if a WQE enters the CQ. * o IB_CQ_REPORT_MISSED_EVENTS: return value will provide the * number of not reaped CQE's regardless of its notification * type and current or new CQ notification settings. * * @base_cq: Base CQ contained in siw CQ. * @flags: Requested notification flags. */ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags) { struct siw_cq *cq = to_siw_cq(base_cq); siw_dbg_cq(cq, "flags: 0x%02x\n", flags); if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) /* * Enable CQ event for next solicited completion. * and make it visible to all associated producers. */ smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED); else /* * Enable CQ event for any signalled completion. * and make it visible to all associated producers. */ smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL); if (flags & IB_CQ_REPORT_MISSED_EVENTS) return cq->cq_put - cq->cq_get; return 0; } /* * siw_dereg_mr() * * Release Memory Region. * * @base_mr: Base MR contained in siw MR. * @udata: points to user context, unused. */ int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata) { struct siw_mr *mr = to_siw_mr(base_mr); struct siw_device *sdev = to_siw_dev(base_mr->device); siw_dbg_mem(mr->mem, "deregister MR\n"); atomic_dec(&sdev->num_mr); siw_mr_drop_mem(mr); kfree_rcu(mr, rcu); return 0; } /* * siw_reg_user_mr() * * Register Memory Region. * * @pd: Protection Domain * @start: starting address of MR (virtual address) * @len: len of MR * @rnic_va: not used by siw * @rights: MR access rights * @udata: user buffer to communicate STag and Key. */ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, u64 rnic_va, int rights, struct ib_udata *udata) { struct siw_mr *mr = NULL; struct siw_umem *umem = NULL; struct siw_ureq_reg_mr ureq; struct siw_device *sdev = to_siw_dev(pd->device); unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); int rv; siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n", (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va, (unsigned long long)len); if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { siw_dbg_pd(pd, "too many mr's\n"); rv = -ENOMEM; goto err_out; } if (!len) { rv = -EINVAL; goto err_out; } if (mem_limit != RLIM_INFINITY) { unsigned long num_pages = (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT; mem_limit >>= PAGE_SHIFT; if (num_pages > mem_limit - current->mm->locked_vm) { siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n", num_pages, mem_limit, current->mm->locked_vm); rv = -ENOMEM; goto err_out; } } umem = siw_umem_get(start, len, ib_access_writable(rights)); if (IS_ERR(umem)) { rv = PTR_ERR(umem); siw_dbg_pd(pd, "getting user memory failed: %d\n", rv); umem = NULL; goto err_out; } mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { rv = -ENOMEM; goto err_out; } rv = siw_mr_add_mem(mr, pd, umem, start, len, rights); if (rv) goto err_out; if (udata) { struct siw_uresp_reg_mr uresp = {}; struct siw_mem *mem = mr->mem; if (udata->inlen < sizeof(ureq)) { rv = -EINVAL; goto err_out; } rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq)); if (rv) goto err_out; mr->base_mr.lkey |= ureq.stag_key; mr->base_mr.rkey |= ureq.stag_key; mem->stag |= ureq.stag_key; uresp.stag = mem->stag; if (udata->outlen < sizeof(uresp)) { rv = -EINVAL; goto err_out; } rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rv) goto err_out; } mr->mem->stag_valid = 1; return &mr->base_mr; err_out: atomic_dec(&sdev->num_mr); if (mr) { if (mr->mem) siw_mr_drop_mem(mr); kfree_rcu(mr, rcu); } else { if (umem) siw_umem_release(umem, false); } return ERR_PTR(rv); } struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_sge) { struct siw_device *sdev = to_siw_dev(pd->device); struct siw_mr *mr = NULL; struct siw_pbl *pbl = NULL; int rv; if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { siw_dbg_pd(pd, "too many mr's\n"); rv = -ENOMEM; goto err_out; } if (mr_type != IB_MR_TYPE_MEM_REG) { siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type); rv = -EOPNOTSUPP; goto err_out; } if (max_sge > SIW_MAX_SGE_PBL) { siw_dbg_pd(pd, "too many sge's: %d\n", max_sge); rv = -ENOMEM; goto err_out; } pbl = siw_pbl_alloc(max_sge); if (IS_ERR(pbl)) { rv = PTR_ERR(pbl); siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv); pbl = NULL; goto err_out; } mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { rv = -ENOMEM; goto err_out; } rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0); if (rv) goto err_out; mr->mem->is_pbl = 1; siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag); return &mr->base_mr; err_out: atomic_dec(&sdev->num_mr); if (!mr) { kfree(pbl); } else { if (mr->mem) siw_mr_drop_mem(mr); kfree_rcu(mr, rcu); } siw_dbg_pd(pd, "failed: %d\n", rv); return ERR_PTR(rv); } /* Just used to count number of pages being mapped */ static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr) { return 0; } int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, unsigned int *sg_off) { struct scatterlist *slp; struct siw_mr *mr = to_siw_mr(base_mr); struct siw_mem *mem = mr->mem; struct siw_pbl *pbl = mem->pbl; struct siw_pble *pble; unsigned long pbl_size; int i, rv; if (!pbl) { siw_dbg_mem(mem, "no PBL allocated\n"); return -EINVAL; } pble = pbl->pbe; if (pbl->max_buf < num_sle) { siw_dbg_mem(mem, "too many SGE's: %d > %d\n", num_sle, pbl->max_buf); return -ENOMEM; } for_each_sg(sl, slp, num_sle, i) { if (sg_dma_len(slp) == 0) { siw_dbg_mem(mem, "empty SGE\n"); return -EINVAL; } if (i == 0) { pble->addr = sg_dma_address(slp); pble->size = sg_dma_len(slp); pble->pbl_off = 0; pbl_size = pble->size; pbl->num_buf = 1; } else { /* Merge PBL entries if adjacent */ if (pble->addr + pble->size == sg_dma_address(slp)) { pble->size += sg_dma_len(slp); } else { pble++; pbl->num_buf++; pble->addr = sg_dma_address(slp); pble->size = sg_dma_len(slp); pble->pbl_off = pbl_size; } pbl_size += sg_dma_len(slp); } siw_dbg_mem(mem, "sge[%d], size %u, addr 0x%p, total %lu\n", i, pble->size, ib_virt_dma_to_ptr(pble->addr), pbl_size); } rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); if (rv > 0) { mem->len = base_mr->length; mem->va = base_mr->iova; siw_dbg_mem(mem, "%llu bytes, start 0x%pK, %u SLE to %u entries\n", mem->len, (void *)(uintptr_t)mem->va, num_sle, pbl->num_buf); } return rv; } /* * siw_get_dma_mr() * * Create a (empty) DMA memory region, where no umem is attached. */ struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights) { struct siw_device *sdev = to_siw_dev(pd->device); struct siw_mr *mr = NULL; int rv; if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { siw_dbg_pd(pd, "too many mr's\n"); rv = -ENOMEM; goto err_out; } mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { rv = -ENOMEM; goto err_out; } rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights); if (rv) goto err_out; mr->mem->stag_valid = 1; siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag); return &mr->base_mr; err_out: if (rv) kfree(mr); atomic_dec(&sdev->num_mr); return ERR_PTR(rv); } /* * siw_create_srq() * * Create Shared Receive Queue of attributes @init_attrs * within protection domain given by @pd. * * @base_srq: Base SRQ contained in siw SRQ. * @init_attrs: SRQ init attributes. * @udata: points to user context */ int siw_create_srq(struct ib_srq *base_srq, struct ib_srq_init_attr *init_attrs, struct ib_udata *udata) { struct siw_srq *srq = to_siw_srq(base_srq); struct ib_srq_attr *attrs = &init_attrs->attr; struct siw_device *sdev = to_siw_dev(base_srq->device); struct siw_ucontext *ctx = rdma_udata_to_drv_context(udata, struct siw_ucontext, base_ucontext); int rv; if (init_attrs->srq_type != IB_SRQT_BASIC) return -EOPNOTSUPP; if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) { siw_dbg_pd(base_srq->pd, "too many SRQ's\n"); rv = -ENOMEM; goto err_out; } if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR || attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) { rv = -EINVAL; goto err_out; } srq->max_sge = attrs->max_sge; srq->num_rqe = roundup_pow_of_two(attrs->max_wr); srq->limit = attrs->srq_limit; if (srq->limit) srq->armed = true; srq->is_kernel_res = !udata; if (udata) srq->recvq = vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe)); else srq->recvq = vcalloc(srq->num_rqe, sizeof(struct siw_rqe)); if (srq->recvq == NULL) { rv = -ENOMEM; goto err_out; } if (udata) { struct siw_uresp_create_srq uresp = {}; size_t length = srq->num_rqe * sizeof(struct siw_rqe); srq->srq_entry = siw_mmap_entry_insert(ctx, srq->recvq, length, &uresp.srq_key); if (!srq->srq_entry) { rv = -ENOMEM; goto err_out; } uresp.num_rqe = srq->num_rqe; if (udata->outlen < sizeof(uresp)) { rv = -EINVAL; goto err_out; } rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rv) goto err_out; } spin_lock_init(&srq->lock); siw_dbg_pd(base_srq->pd, "[SRQ]: success\n"); return 0; err_out: if (srq->recvq) { if (ctx) rdma_user_mmap_entry_remove(srq->srq_entry); vfree(srq->recvq); } atomic_dec(&sdev->num_srq); return rv; } /* * siw_modify_srq() * * Modify SRQ. The caller may resize SRQ and/or set/reset notification * limit and (re)arm IB_EVENT_SRQ_LIMIT_REACHED notification. * * NOTE: it is unclear if RDMA core allows for changing the MAX_SGE * parameter. siw_modify_srq() does not check the attrs->max_sge param. */ int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { struct siw_srq *srq = to_siw_srq(base_srq); unsigned long flags; int rv = 0; spin_lock_irqsave(&srq->lock, flags); if (attr_mask & IB_SRQ_MAX_WR) { /* resize request not yet supported */ rv = -EOPNOTSUPP; goto out; } if (attr_mask & IB_SRQ_LIMIT) { if (attrs->srq_limit) { if (unlikely(attrs->srq_limit > srq->num_rqe)) { rv = -EINVAL; goto out; } srq->armed = true; } else { srq->armed = false; } srq->limit = attrs->srq_limit; } out: spin_unlock_irqrestore(&srq->lock, flags); return rv; } /* * siw_query_srq() * * Query SRQ attributes. */ int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs) { struct siw_srq *srq = to_siw_srq(base_srq); unsigned long flags; spin_lock_irqsave(&srq->lock, flags); attrs->max_wr = srq->num_rqe; attrs->max_sge = srq->max_sge; attrs->srq_limit = srq->limit; spin_unlock_irqrestore(&srq->lock, flags); return 0; } /* * siw_destroy_srq() * * Destroy SRQ. * It is assumed that the SRQ is not referenced by any * QP anymore - the code trusts the RDMA core environment to keep track * of QP references. */ int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata) { struct siw_srq *srq = to_siw_srq(base_srq); struct siw_device *sdev = to_siw_dev(base_srq->device); struct siw_ucontext *ctx = rdma_udata_to_drv_context(udata, struct siw_ucontext, base_ucontext); if (ctx) rdma_user_mmap_entry_remove(srq->srq_entry); vfree(srq->recvq); atomic_dec(&sdev->num_srq); return 0; } /* * siw_post_srq_recv() * * Post a list of receive queue elements to SRQ. * NOTE: The function does not check or lock a certain SRQ state * during the post operation. The code simply trusts the * RDMA core environment. * * @base_srq: Base SRQ contained in siw SRQ * @wr: List of R-WR's * @bad_wr: Updated to failing WR if posting fails. */ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct siw_srq *srq = to_siw_srq(base_srq); unsigned long flags; int rv = 0; if (unlikely(!srq->is_kernel_res)) { siw_dbg_pd(base_srq->pd, "[SRQ]: no kernel post_recv for mapped srq\n"); rv = -EINVAL; goto out; } /* * Serialize potentially multiple producers. * Also needed to serialize potentially multiple * consumers. */ spin_lock_irqsave(&srq->lock, flags); while (wr) { u32 idx = srq->rq_put % srq->num_rqe; struct siw_rqe *rqe = &srq->recvq[idx]; if (rqe->flags) { siw_dbg_pd(base_srq->pd, "SRQ full\n"); rv = -ENOMEM; break; } if (unlikely(wr->num_sge > srq->max_sge)) { siw_dbg_pd(base_srq->pd, "[SRQ]: too many sge's: %d\n", wr->num_sge); rv = -EINVAL; break; } rqe->id = wr->wr_id; rqe->num_sge = wr->num_sge; siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge); /* Make sure S-RQE is completely written before valid */ smp_wmb(); rqe->flags = SIW_WQE_VALID; srq->rq_put++; wr = wr->next; } spin_unlock_irqrestore(&srq->lock, flags); out: if (unlikely(rv < 0)) { siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv); *bad_wr = wr; } return rv; } void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype) { struct ib_event event; struct ib_qp *base_qp = &qp->base_qp; /* * Do not report asynchronous errors on QP which gets * destroyed via verbs interface (siw_destroy_qp()) */ if (qp->attrs.flags & SIW_QP_IN_DESTROY) return; event.event = etype; event.device = base_qp->device; event.element.qp = base_qp; if (base_qp->event_handler) { siw_dbg_qp(qp, "reporting event %d\n", etype); base_qp->event_handler(&event, base_qp->qp_context); } } void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype) { struct ib_event event; struct ib_cq *base_cq = &cq->base_cq; event.event = etype; event.device = base_cq->device; event.element.cq = base_cq; if (base_cq->event_handler) { siw_dbg_cq(cq, "reporting CQ event %d\n", etype); base_cq->event_handler(&event, base_cq->cq_context); } } void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype) { struct ib_event event; struct ib_srq *base_srq = &srq->base_srq; event.event = etype; event.device = base_srq->device; event.element.srq = base_srq; if (base_srq->event_handler) { siw_dbg_pd(srq->base_srq.pd, "reporting SRQ event %d\n", etype); base_srq->event_handler(&event, base_srq->srq_context); } } void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype) { struct ib_event event; event.event = etype; event.device = &sdev->base_dev; event.element.port_num = port; siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype); ib_dispatch_event(&event); }
linux-master
drivers/infiniband/sw/siw/siw_verbs.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ #include <linux/errno.h> #include <linux/types.h> #include <rdma/ib_verbs.h> #include "siw.h" static int map_wc_opcode[SIW_NUM_OPCODES] = { [SIW_OP_WRITE] = IB_WC_RDMA_WRITE, [SIW_OP_SEND] = IB_WC_SEND, [SIW_OP_SEND_WITH_IMM] = IB_WC_SEND, [SIW_OP_READ] = IB_WC_RDMA_READ, [SIW_OP_READ_LOCAL_INV] = IB_WC_RDMA_READ, [SIW_OP_COMP_AND_SWAP] = IB_WC_COMP_SWAP, [SIW_OP_FETCH_AND_ADD] = IB_WC_FETCH_ADD, [SIW_OP_INVAL_STAG] = IB_WC_LOCAL_INV, [SIW_OP_REG_MR] = IB_WC_REG_MR, [SIW_OP_RECEIVE] = IB_WC_RECV, [SIW_OP_READ_RESPONSE] = -1 /* not used */ }; static struct { enum siw_wc_status siw; enum ib_wc_status ib; } map_cqe_status[SIW_NUM_WC_STATUS] = { { SIW_WC_SUCCESS, IB_WC_SUCCESS }, { SIW_WC_LOC_LEN_ERR, IB_WC_LOC_LEN_ERR }, { SIW_WC_LOC_PROT_ERR, IB_WC_LOC_PROT_ERR }, { SIW_WC_LOC_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR }, { SIW_WC_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR }, { SIW_WC_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR }, { SIW_WC_LOC_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR }, { SIW_WC_REM_ACCESS_ERR, IB_WC_REM_ACCESS_ERR }, { SIW_WC_REM_INV_REQ_ERR, IB_WC_REM_INV_REQ_ERR }, { SIW_WC_GENERAL_ERR, IB_WC_GENERAL_ERR } }; /* * Reap one CQE from the CQ. Only used by kernel clients * during CQ normal operation. Might be called during CQ * flush for user mapped CQE array as well. */ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) { struct siw_cqe *cqe; unsigned long flags; spin_lock_irqsave(&cq->lock, flags); cqe = &cq->queue[cq->cq_get % cq->num_cqe]; if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { memset(wc, 0, sizeof(*wc)); wc->wr_id = cqe->id; wc->byte_len = cqe->bytes; /* * During CQ flush, also user land CQE's may get * reaped here, which do not hold a QP reference * and do not qualify for memory extension verbs. */ if (likely(rdma_is_kernel_res(&cq->base_cq.res))) { if (cqe->flags & SIW_WQE_REM_INVAL) { wc->ex.invalidate_rkey = cqe->inval_stag; wc->wc_flags = IB_WC_WITH_INVALIDATE; } wc->qp = cqe->base_qp; wc->opcode = map_wc_opcode[cqe->opcode]; wc->status = map_cqe_status[cqe->status].ib; siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%pK\n", cq->cq_get % cq->num_cqe, cqe->opcode, cqe->flags, (void *)(uintptr_t)cqe->id); } else { /* * A malicious user may set invalid opcode or * status in the user mmapped CQE array. * Sanity check and correct values in that case * to avoid out-of-bounds access to global arrays * for opcode and status mapping. */ u8 opcode = cqe->opcode; u16 status = cqe->status; if (opcode >= SIW_NUM_OPCODES) { opcode = 0; status = SIW_WC_GENERAL_ERR; } else if (status >= SIW_NUM_WC_STATUS) { status = SIW_WC_GENERAL_ERR; } wc->opcode = map_wc_opcode[opcode]; wc->status = map_cqe_status[status].ib; } WRITE_ONCE(cqe->flags, 0); cq->cq_get++; spin_unlock_irqrestore(&cq->lock, flags); return 1; } spin_unlock_irqrestore(&cq->lock, flags); return 0; } /* * siw_cq_flush() * * Flush all CQ elements. */ void siw_cq_flush(struct siw_cq *cq) { struct ib_wc wc; while (siw_reap_cqe(cq, &wc)) ; }
linux-master
drivers/infiniband/sw/siw/siw_cq.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ #include <linux/errno.h> #include <linux/types.h> #include <linux/net.h> #include <linux/scatterlist.h> #include <linux/highmem.h> #include <net/tcp.h> #include <rdma/iw_cm.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include "siw.h" #include "siw_verbs.h" #include "siw_mem.h" #define MAX_HDR_INLINE \ (((uint32_t)(sizeof(struct siw_rreq_pkt) - \ sizeof(struct iwarp_send))) & 0xF8) static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) { struct siw_pbl *pbl = mem->pbl; u64 offset = addr - mem->va; dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); if (paddr) return ib_virt_dma_to_page(paddr); return NULL; } /* * Copy short payload at provided destination payload address */ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr) { struct siw_wqe *wqe = &c_tx->wqe_active; struct siw_sge *sge = &wqe->sqe.sge[0]; u32 bytes = sge->length; if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) return MAX_HDR_INLINE + 1; if (!bytes) return 0; if (tx_flags(wqe) & SIW_WQE_INLINE) { memcpy(paddr, &wqe->sqe.sge[1], bytes); } else { struct siw_mem *mem = wqe->mem[0]; if (!mem->mem_obj) { /* Kernel client using kva */ memcpy(paddr, ib_virt_dma_to_ptr(sge->laddr), bytes); } else if (c_tx->in_syscall) { if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr), bytes)) return -EFAULT; } else { unsigned int off = sge->laddr & ~PAGE_MASK; struct page *p; char *buffer; int pbl_idx = 0; if (!mem->is_pbl) p = siw_get_upage(mem->umem, sge->laddr); else p = siw_get_pblpage(mem, sge->laddr, &pbl_idx); if (unlikely(!p)) return -EFAULT; buffer = kmap_local_page(p); if (likely(PAGE_SIZE - off >= bytes)) { memcpy(paddr, buffer + off, bytes); } else { unsigned long part = bytes - (PAGE_SIZE - off); memcpy(paddr, buffer + off, part); kunmap_local(buffer); if (!mem->is_pbl) p = siw_get_upage(mem->umem, sge->laddr + part); else p = siw_get_pblpage(mem, sge->laddr + part, &pbl_idx); if (unlikely(!p)) return -EFAULT; buffer = kmap_local_page(p); memcpy(paddr + part, buffer, bytes - part); } kunmap_local(buffer); } } return (int)bytes; } #define PKT_FRAGMENTED 1 #define PKT_COMPLETE 0 /* * siw_qp_prepare_tx() * * Prepare tx state for sending out one fpdu. Builds complete pkt * if no user data or only immediate data are present. * * returns PKT_COMPLETE if complete pkt built, PKT_FRAGMENTED otherwise. */ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) { struct siw_wqe *wqe = &c_tx->wqe_active; char *crc = NULL; int data = 0; switch (tx_type(wqe)) { case SIW_OP_READ: case SIW_OP_READ_LOCAL_INV: memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl, sizeof(struct iwarp_ctrl)); c_tx->pkt.rreq.rsvd = 0; c_tx->pkt.rreq.ddp_qn = htonl(RDMAP_UNTAGGED_QN_RDMA_READ); c_tx->pkt.rreq.ddp_msn = htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]); c_tx->pkt.rreq.ddp_mo = 0; c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); c_tx->pkt.rreq.sink_to = cpu_to_be64(wqe->sqe.sge[0].laddr); c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); c_tx->ctrl_len = sizeof(struct iwarp_rdma_rreq); crc = (char *)&c_tx->pkt.rreq_pkt.crc; break; case SIW_OP_SEND: if (tx_flags(wqe) & SIW_WQE_SOLICITED) memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_SEND_SE].ctrl, sizeof(struct iwarp_ctrl)); else memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_SEND].ctrl, sizeof(struct iwarp_ctrl)); c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND; c_tx->pkt.send.ddp_msn = htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]); c_tx->pkt.send.ddp_mo = 0; c_tx->pkt.send_inv.inval_stag = 0; c_tx->ctrl_len = sizeof(struct iwarp_send); crc = (char *)&c_tx->pkt.send_pkt.crc; data = siw_try_1seg(c_tx, crc); break; case SIW_OP_SEND_REMOTE_INV: if (tx_flags(wqe) & SIW_WQE_SOLICITED) memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_SEND_SE_INVAL].ctrl, sizeof(struct iwarp_ctrl)); else memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_SEND_INVAL].ctrl, sizeof(struct iwarp_ctrl)); c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND; c_tx->pkt.send.ddp_msn = htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]); c_tx->pkt.send.ddp_mo = 0; c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); c_tx->ctrl_len = sizeof(struct iwarp_send_inv); crc = (char *)&c_tx->pkt.send_pkt.crc; data = siw_try_1seg(c_tx, crc); break; case SIW_OP_WRITE: memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_RDMA_WRITE].ctrl, sizeof(struct iwarp_ctrl)); c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); c_tx->pkt.rwrite.sink_to = cpu_to_be64(wqe->sqe.raddr); c_tx->ctrl_len = sizeof(struct iwarp_rdma_write); crc = (char *)&c_tx->pkt.write_pkt.crc; data = siw_try_1seg(c_tx, crc); break; case SIW_OP_READ_RESPONSE: memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_RDMA_READ_RESP].ctrl, sizeof(struct iwarp_ctrl)); /* NBO */ c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey); c_tx->pkt.rresp.sink_to = cpu_to_be64(wqe->sqe.raddr); c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp); crc = (char *)&c_tx->pkt.write_pkt.crc; data = siw_try_1seg(c_tx, crc); break; default: siw_dbg_qp(tx_qp(c_tx), "stale wqe type %d\n", tx_type(wqe)); return -EOPNOTSUPP; } if (unlikely(data < 0)) return data; c_tx->ctrl_sent = 0; if (data <= MAX_HDR_INLINE) { if (data) { wqe->processed = data; c_tx->pkt.ctrl.mpa_len = htons(c_tx->ctrl_len + data - MPA_HDR_SIZE); /* Add pad, if needed */ data += -(int)data & 0x3; /* advance CRC location after payload */ crc += data; c_tx->ctrl_len += data; if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED)) c_tx->pkt.c_untagged.ddp_mo = 0; else c_tx->pkt.c_tagged.ddp_to = cpu_to_be64(wqe->sqe.raddr); } *(u32 *)crc = 0; /* * Do complete CRC if enabled and short packet */ if (c_tx->mpa_crc_hd) { crypto_shash_init(c_tx->mpa_crc_hd); if (crypto_shash_update(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt, c_tx->ctrl_len)) return -EINVAL; crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)crc); } c_tx->ctrl_len += MPA_CRC_SIZE; return PKT_COMPLETE; } c_tx->ctrl_len += MPA_CRC_SIZE; c_tx->sge_idx = 0; c_tx->sge_off = 0; c_tx->pbl_idx = 0; /* * Allow direct sending out of user buffer if WR is non signalled * and payload is over threshold. * Per RDMA verbs, the application should not change the send buffer * until the work completed. In iWarp, work completion is only * local delivery to TCP. TCP may reuse the buffer for * retransmission. Changing unsent data also breaks the CRC, * if applied. */ if (c_tx->zcopy_tx && wqe->bytes >= SENDPAGE_THRESH && !(tx_flags(wqe) & SIW_WQE_SIGNALLED)) c_tx->use_sendpage = 1; else c_tx->use_sendpage = 0; return PKT_FRAGMENTED; } /* * Send out one complete control type FPDU, or header of FPDU carrying * data. Used for fixed sized packets like Read.Requests or zero length * SENDs, WRITEs, READ.Responses, or header only. */ static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s, int flags) { struct msghdr msg = { .msg_flags = flags }; struct kvec iov = { .iov_base = (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent, .iov_len = c_tx->ctrl_len - c_tx->ctrl_sent }; int rv = kernel_sendmsg(s, &msg, &iov, 1, c_tx->ctrl_len - c_tx->ctrl_sent); if (rv >= 0) { c_tx->ctrl_sent += rv; if (c_tx->ctrl_sent == c_tx->ctrl_len) rv = 0; else rv = -EAGAIN; } return rv; } /* * 0copy TCP transmit interface: Use MSG_SPLICE_PAGES. * * Using sendpage to push page by page appears to be less efficient * than using sendmsg, even if data are copied. * * A general performance limitation might be the extra four bytes * trailer checksum segment to be pushed after user data. */ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset, size_t size) { struct bio_vec bvec; struct msghdr msg = { .msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SPLICE_PAGES), }; struct sock *sk = s->sk; int i = 0, rv = 0, sent = 0; while (size) { size_t bytes = min_t(size_t, PAGE_SIZE - offset, size); if (size + offset <= PAGE_SIZE) msg.msg_flags &= ~MSG_MORE; tcp_rate_check_app_limited(sk); bvec_set_page(&bvec, page[i], bytes, offset); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); try_page_again: lock_sock(sk); rv = tcp_sendmsg_locked(sk, &msg, size); release_sock(sk); if (rv > 0) { size -= rv; sent += rv; if (rv != bytes) { offset += rv; bytes -= rv; goto try_page_again; } offset = 0; } else { if (rv == -EAGAIN || rv == 0) break; return rv; } i++; } return sent; } /* * siw_0copy_tx() * * Pushes list of pages to TCP socket. If pages from multiple * SGE's, all referenced pages of each SGE are pushed in one * shot. */ static int siw_0copy_tx(struct socket *s, struct page **page, struct siw_sge *sge, unsigned int offset, unsigned int size) { int i = 0, sent = 0, rv; int sge_bytes = min(sge->length - offset, size); offset = (sge->laddr + offset) & ~PAGE_MASK; while (sent != size) { rv = siw_tcp_sendpages(s, &page[i], offset, sge_bytes); if (rv >= 0) { sent += rv; if (size == sent || sge_bytes > rv) break; i += PAGE_ALIGN(sge_bytes + offset) >> PAGE_SHIFT; sge++; sge_bytes = min(sge->length, size - sent); offset = sge->laddr & ~PAGE_MASK; } else { sent = rv; break; } } return sent; } #define MAX_TRAILER (MPA_CRC_SIZE + 4) static void siw_unmap_pages(struct kvec *iov, unsigned long kmap_mask, int len) { int i; /* * Work backwards through the array to honor the kmap_local_page() * ordering requirements. */ for (i = (len-1); i >= 0; i--) { if (kmap_mask & BIT(i)) { unsigned long addr = (unsigned long)iov[i].iov_base; kunmap_local((void *)(addr & PAGE_MASK)); } } } /* * siw_tx_hdt() tries to push a complete packet to TCP where all * packet fragments are referenced by the elements of one iovec. * For the data portion, each involved page must be referenced by * one extra element. All sge's data can be non-aligned to page * boundaries. Two more elements are referencing iWARP header * and trailer: * MAX_ARRAY = 64KB/PAGE_SIZE + 1 + (2 * (SIW_MAX_SGE - 1) + HDR + TRL */ #define MAX_ARRAY ((0xffff / PAGE_SIZE) + 1 + (2 * (SIW_MAX_SGE - 1) + 2)) /* * Write out iov referencing hdr, data and trailer of current FPDU. * Update transmit state dependent on write return status */ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) { struct siw_wqe *wqe = &c_tx->wqe_active; struct siw_sge *sge = &wqe->sqe.sge[c_tx->sge_idx]; struct kvec iov[MAX_ARRAY]; struct page *page_array[MAX_ARRAY]; struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR }; int seg = 0, do_crc = c_tx->do_crc, is_kva = 0, rv; unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0, sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx, pbl_idx = c_tx->pbl_idx; unsigned long kmap_mask = 0L; if (c_tx->state == SIW_SEND_HDR) { if (c_tx->use_sendpage) { rv = siw_tx_ctrl(c_tx, s, MSG_DONTWAIT | MSG_MORE); if (rv) goto done; c_tx->state = SIW_SEND_DATA; } else { iov[0].iov_base = (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent; iov[0].iov_len = hdr_len = c_tx->ctrl_len - c_tx->ctrl_sent; seg = 1; } } wqe->processed += data_len; while (data_len) { /* walk the list of SGE's */ unsigned int sge_len = min(sge->length - sge_off, data_len); unsigned int fp_off = (sge->laddr + sge_off) & ~PAGE_MASK; struct siw_mem *mem; if (!(tx_flags(wqe) & SIW_WQE_INLINE)) { mem = wqe->mem[sge_idx]; is_kva = mem->mem_obj == NULL ? 1 : 0; } else { is_kva = 1; } if (is_kva && !c_tx->use_sendpage) { /* * tx from kernel virtual address: either inline data * or memory region with assigned kernel buffer */ iov[seg].iov_base = ib_virt_dma_to_ptr(sge->laddr + sge_off); iov[seg].iov_len = sge_len; if (do_crc) crypto_shash_update(c_tx->mpa_crc_hd, iov[seg].iov_base, sge_len); sge_off += sge_len; data_len -= sge_len; seg++; goto sge_done; } while (sge_len) { size_t plen = min((int)PAGE_SIZE - fp_off, sge_len); void *kaddr; if (!is_kva) { struct page *p; if (mem->is_pbl) p = siw_get_pblpage( mem, sge->laddr + sge_off, &pbl_idx); else p = siw_get_upage(mem->umem, sge->laddr + sge_off); if (unlikely(!p)) { siw_unmap_pages(iov, kmap_mask, seg); wqe->processed -= c_tx->bytes_unsent; rv = -EFAULT; goto done_crc; } page_array[seg] = p; if (!c_tx->use_sendpage) { void *kaddr = kmap_local_page(p); /* Remember for later kunmap() */ kmap_mask |= BIT(seg); iov[seg].iov_base = kaddr + fp_off; iov[seg].iov_len = plen; if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, iov[seg].iov_base, plen); } else if (do_crc) { kaddr = kmap_local_page(p); crypto_shash_update(c_tx->mpa_crc_hd, kaddr + fp_off, plen); kunmap_local(kaddr); } } else { /* * Cast to an uintptr_t to preserve all 64 bits * in sge->laddr. */ u64 va = sge->laddr + sge_off; page_array[seg] = ib_virt_dma_to_page(va); if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, ib_virt_dma_to_ptr(va), plen); } sge_len -= plen; sge_off += plen; data_len -= plen; fp_off = 0; if (++seg >= (int)MAX_ARRAY) { siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); siw_unmap_pages(iov, kmap_mask, seg-1); wqe->processed -= c_tx->bytes_unsent; rv = -EMSGSIZE; goto done_crc; } } sge_done: /* Update SGE variables at end of SGE */ if (sge_off == sge->length && (data_len != 0 || wqe->processed < wqe->bytes)) { sge_idx++; sge++; sge_off = 0; } } /* trailer */ if (likely(c_tx->state != SIW_SEND_TRAILER)) { iov[seg].iov_base = &c_tx->trailer.pad[4 - c_tx->pad]; iov[seg].iov_len = trl_len = MAX_TRAILER - (4 - c_tx->pad); } else { iov[seg].iov_base = &c_tx->trailer.pad[c_tx->ctrl_sent]; iov[seg].iov_len = trl_len = MAX_TRAILER - c_tx->ctrl_sent; } if (c_tx->pad) { *(u32 *)c_tx->trailer.pad = 0; if (do_crc) crypto_shash_update(c_tx->mpa_crc_hd, (u8 *)&c_tx->trailer.crc - c_tx->pad, c_tx->pad); } if (!c_tx->mpa_crc_hd) c_tx->trailer.crc = 0; else if (do_crc) crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)&c_tx->trailer.crc); data_len = c_tx->bytes_unsent; if (c_tx->use_sendpage) { rv = siw_0copy_tx(s, page_array, &wqe->sqe.sge[c_tx->sge_idx], c_tx->sge_off, data_len); if (rv == data_len) { rv = kernel_sendmsg(s, &msg, &iov[seg], 1, trl_len); if (rv > 0) rv += data_len; else rv = data_len; } } else { rv = kernel_sendmsg(s, &msg, iov, seg + 1, hdr_len + data_len + trl_len); siw_unmap_pages(iov, kmap_mask, seg); } if (rv < (int)hdr_len) { /* Not even complete hdr pushed or negative rv */ wqe->processed -= data_len; if (rv >= 0) { c_tx->ctrl_sent += rv; rv = -EAGAIN; } goto done_crc; } rv -= hdr_len; if (rv >= (int)data_len) { /* all user data pushed to TCP or no data to push */ if (data_len > 0 && wqe->processed < wqe->bytes) { /* Save the current state for next tx */ c_tx->sge_idx = sge_idx; c_tx->sge_off = sge_off; c_tx->pbl_idx = pbl_idx; } rv -= data_len; if (rv == trl_len) /* all pushed */ rv = 0; else { c_tx->state = SIW_SEND_TRAILER; c_tx->ctrl_len = MAX_TRAILER; c_tx->ctrl_sent = rv + 4 - c_tx->pad; c_tx->bytes_unsent = 0; rv = -EAGAIN; } } else if (data_len > 0) { /* Maybe some user data pushed to TCP */ c_tx->state = SIW_SEND_DATA; wqe->processed -= data_len - rv; if (rv) { /* * Some bytes out. Recompute tx state based * on old state and bytes pushed */ unsigned int sge_unsent; c_tx->bytes_unsent -= rv; sge = &wqe->sqe.sge[c_tx->sge_idx]; sge_unsent = sge->length - c_tx->sge_off; while (sge_unsent <= rv) { rv -= sge_unsent; c_tx->sge_idx++; c_tx->sge_off = 0; sge++; sge_unsent = sge->length; } c_tx->sge_off += rv; } rv = -EAGAIN; } done_crc: c_tx->do_crc = 0; done: return rv; } static void siw_update_tcpseg(struct siw_iwarp_tx *c_tx, struct socket *s) { struct tcp_sock *tp = tcp_sk(s->sk); if (tp->gso_segs) { if (c_tx->gso_seg_limit == 0) c_tx->tcp_seglen = tp->mss_cache * tp->gso_segs; else c_tx->tcp_seglen = tp->mss_cache * min_t(u16, c_tx->gso_seg_limit, tp->gso_segs); } else { c_tx->tcp_seglen = tp->mss_cache; } /* Loopback may give odd numbers */ c_tx->tcp_seglen &= 0xfffffff8; } /* * siw_prepare_fpdu() * * Prepares transmit context to send out one FPDU if FPDU will contain * user data and user data are not immediate data. * Computes maximum FPDU length to fill up TCP MSS if possible. * * @qp: QP from which to transmit * @wqe: Current WQE causing transmission * * TODO: Take into account real available sendspace on socket * to avoid header misalignment due to send pausing within * fpdu transmission */ static void siw_prepare_fpdu(struct siw_qp *qp, struct siw_wqe *wqe) { struct siw_iwarp_tx *c_tx = &qp->tx_ctx; int data_len; c_tx->ctrl_len = iwarp_pktinfo[__rdmap_get_opcode(&c_tx->pkt.ctrl)].hdr_len; c_tx->ctrl_sent = 0; /* * Update target buffer offset if any */ if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED)) /* Untagged message */ c_tx->pkt.c_untagged.ddp_mo = cpu_to_be32(wqe->processed); else /* Tagged message */ c_tx->pkt.c_tagged.ddp_to = cpu_to_be64(wqe->sqe.raddr + wqe->processed); data_len = wqe->bytes - wqe->processed; if (data_len + c_tx->ctrl_len + MPA_CRC_SIZE > c_tx->tcp_seglen) { /* Trim DDP payload to fit into current TCP segment */ data_len = c_tx->tcp_seglen - (c_tx->ctrl_len + MPA_CRC_SIZE); c_tx->pkt.ctrl.ddp_rdmap_ctrl &= ~DDP_FLAG_LAST; c_tx->pad = 0; } else { c_tx->pkt.ctrl.ddp_rdmap_ctrl |= DDP_FLAG_LAST; c_tx->pad = -data_len & 0x3; } c_tx->bytes_unsent = data_len; c_tx->pkt.ctrl.mpa_len = htons(c_tx->ctrl_len + data_len - MPA_HDR_SIZE); /* * Init MPA CRC computation */ if (c_tx->mpa_crc_hd) { crypto_shash_init(c_tx->mpa_crc_hd); crypto_shash_update(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt, c_tx->ctrl_len); c_tx->do_crc = 1; } } /* * siw_check_sgl_tx() * * Check permissions for a list of SGE's (SGL). * A successful check will have all memory referenced * for transmission resolved and assigned to the WQE. * * @pd: Protection Domain SGL should belong to * @wqe: WQE to be checked * @perms: requested access permissions * */ static int siw_check_sgl_tx(struct ib_pd *pd, struct siw_wqe *wqe, enum ib_access_flags perms) { struct siw_sge *sge = &wqe->sqe.sge[0]; int i, len, num_sge = wqe->sqe.num_sge; if (unlikely(num_sge > SIW_MAX_SGE)) return -EINVAL; for (i = 0, len = 0; num_sge; num_sge--, i++, sge++) { /* * rdma verbs: do not check stag for a zero length sge */ if (sge->length) { int rv = siw_check_sge(pd, sge, &wqe->mem[i], perms, 0, sge->length); if (unlikely(rv != E_ACCESS_OK)) return rv; } len += sge->length; } return len; } /* * siw_qp_sq_proc_tx() * * Process one WQE which needs transmission on the wire. */ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe) { struct siw_iwarp_tx *c_tx = &qp->tx_ctx; struct socket *s = qp->attrs.sk; int rv = 0, burst_len = qp->tx_ctx.burst; enum rdmap_ecode ecode = RDMAP_ECODE_CATASTROPHIC_STREAM; if (unlikely(wqe->wr_status == SIW_WR_IDLE)) return 0; if (!burst_len) burst_len = SQ_USER_MAXBURST; if (wqe->wr_status == SIW_WR_QUEUED) { if (!(wqe->sqe.flags & SIW_WQE_INLINE)) { if (tx_type(wqe) == SIW_OP_READ_RESPONSE) wqe->sqe.num_sge = 1; if (tx_type(wqe) != SIW_OP_READ && tx_type(wqe) != SIW_OP_READ_LOCAL_INV) { /* * Reference memory to be tx'd w/o checking * access for LOCAL_READ permission, since * not defined in RDMA core. */ rv = siw_check_sgl_tx(qp->pd, wqe, 0); if (rv < 0) { if (tx_type(wqe) == SIW_OP_READ_RESPONSE) ecode = siw_rdmap_error(-rv); rv = -EINVAL; goto tx_error; } wqe->bytes = rv; } else { wqe->bytes = 0; } } else { wqe->bytes = wqe->sqe.sge[0].length; if (!rdma_is_kernel_res(&qp->base_qp.res)) { if (wqe->bytes > SIW_MAX_INLINE) { rv = -EINVAL; goto tx_error; } wqe->sqe.sge[0].laddr = (u64)(uintptr_t)&wqe->sqe.sge[1]; } } wqe->wr_status = SIW_WR_INPROGRESS; wqe->processed = 0; siw_update_tcpseg(c_tx, s); rv = siw_qp_prepare_tx(c_tx); if (rv == PKT_FRAGMENTED) { c_tx->state = SIW_SEND_HDR; siw_prepare_fpdu(qp, wqe); } else if (rv == PKT_COMPLETE) { c_tx->state = SIW_SEND_SHORT_FPDU; } else { goto tx_error; } } next_segment: siw_dbg_qp(qp, "wr type %d, state %d, data %u, sent %u, id %llx\n", tx_type(wqe), wqe->wr_status, wqe->bytes, wqe->processed, wqe->sqe.id); if (--burst_len == 0) { rv = -EINPROGRESS; goto tx_done; } if (c_tx->state == SIW_SEND_SHORT_FPDU) { enum siw_opcode tx_type = tx_type(wqe); unsigned int msg_flags; if (siw_sq_empty(qp) || !siw_tcp_nagle || burst_len == 1) /* * End current TCP segment, if SQ runs empty, * or siw_tcp_nagle is not set, or we bail out * soon due to no burst credit left. */ msg_flags = MSG_DONTWAIT; else msg_flags = MSG_DONTWAIT | MSG_MORE; rv = siw_tx_ctrl(c_tx, s, msg_flags); if (!rv && tx_type != SIW_OP_READ && tx_type != SIW_OP_READ_LOCAL_INV) wqe->processed = wqe->bytes; goto tx_done; } else { rv = siw_tx_hdt(c_tx, s); } if (!rv) { /* * One segment sent. Processing completed if last * segment, Do next segment otherwise. */ if (unlikely(c_tx->tx_suspend)) { /* * Verbs, 6.4.: Try stopping sending after a full * DDP segment if the connection goes down * (== peer halfclose) */ rv = -ECONNABORTED; goto tx_done; } if (c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_LAST) { siw_dbg_qp(qp, "WQE completed\n"); goto tx_done; } c_tx->state = SIW_SEND_HDR; siw_update_tcpseg(c_tx, s); siw_prepare_fpdu(qp, wqe); goto next_segment; } tx_done: qp->tx_ctx.burst = burst_len; return rv; tx_error: if (ecode != RDMAP_ECODE_CATASTROPHIC_STREAM) siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP, RDMAP_ETYPE_REMOTE_PROTECTION, ecode, 1); else siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP, RDMAP_ETYPE_CATASTROPHIC, RDMAP_ECODE_UNSPECIFIED, 1); return rv; } static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) { struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr; struct siw_device *sdev = to_siw_dev(pd->device); struct siw_mem *mem; int rv = 0; siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); if (unlikely(!base_mr)) { pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); return -EINVAL; } if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey); return -EINVAL; } mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); if (unlikely(!mem)) { pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); return -EINVAL; } if (unlikely(mem->pd != pd)) { pr_warn("siw: fastreg: PD mismatch\n"); rv = -EINVAL; goto out; } if (unlikely(mem->stag_valid)) { pr_warn("siw: fastreg: STag 0x%08x already valid\n", sqe->rkey); rv = -EINVAL; goto out; } /* Refresh STag since user may have changed key part */ mem->stag = sqe->rkey; mem->perms = sqe->access; siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey); mem->va = base_mr->iova; mem->stag_valid = 1; out: siw_mem_put(mem); return rv; } static int siw_qp_sq_proc_local(struct siw_qp *qp, struct siw_wqe *wqe) { int rv; switch (tx_type(wqe)) { case SIW_OP_REG_MR: rv = siw_fastreg_mr(qp->pd, &wqe->sqe); break; case SIW_OP_INVAL_STAG: rv = siw_invalidate_stag(qp->pd, wqe->sqe.rkey); break; default: rv = -EINVAL; } return rv; } /* * siw_qp_sq_process() * * Core TX path routine for RDMAP/DDP/MPA using a TCP kernel socket. * Sends RDMAP payload for the current SQ WR @wqe of @qp in one or more * MPA FPDUs, each containing a DDP segment. * * SQ processing may occur in user context as a result of posting * new WQE's or from siw_sq_work_handler() context. Processing in * user context is limited to non-kernel verbs users. * * SQ processing may get paused anytime, possibly in the middle of a WR * or FPDU, if insufficient send space is available. SQ processing * gets resumed from siw_sq_work_handler(), if send space becomes * available again. * * Must be called with the QP state read-locked. * * Note: * An outbound RREQ can be satisfied by the corresponding RRESP * _before_ it gets assigned to the ORQ. This happens regularly * in RDMA READ via loopback case. Since both outbound RREQ and * inbound RRESP can be handled by the same CPU, locking the ORQ * is dead-lock prone and thus not an option. With that, the * RREQ gets assigned to the ORQ _before_ being sent - see * siw_activate_tx() - and pulled back in case of send failure. */ int siw_qp_sq_process(struct siw_qp *qp) { struct siw_wqe *wqe = tx_wqe(qp); enum siw_opcode tx_type; unsigned long flags; int rv = 0; siw_dbg_qp(qp, "enter for type %d\n", tx_type(wqe)); next_wqe: /* * Stop QP processing if SQ state changed */ if (unlikely(qp->tx_ctx.tx_suspend)) { siw_dbg_qp(qp, "tx suspended\n"); goto done; } tx_type = tx_type(wqe); if (tx_type <= SIW_OP_READ_RESPONSE) rv = siw_qp_sq_proc_tx(qp, wqe); else rv = siw_qp_sq_proc_local(qp, wqe); if (!rv) { /* * WQE processing done */ switch (tx_type) { case SIW_OP_SEND: case SIW_OP_SEND_REMOTE_INV: case SIW_OP_WRITE: siw_wqe_put_mem(wqe, tx_type); fallthrough; case SIW_OP_INVAL_STAG: case SIW_OP_REG_MR: if (tx_flags(wqe) & SIW_WQE_SIGNALLED) siw_sqe_complete(qp, &wqe->sqe, wqe->bytes, SIW_WC_SUCCESS); break; case SIW_OP_READ: case SIW_OP_READ_LOCAL_INV: /* * already enqueued to ORQ queue */ break; case SIW_OP_READ_RESPONSE: siw_wqe_put_mem(wqe, tx_type); break; default: WARN(1, "undefined WQE type %d\n", tx_type); rv = -EINVAL; goto done; } spin_lock_irqsave(&qp->sq_lock, flags); wqe->wr_status = SIW_WR_IDLE; rv = siw_activate_tx(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); if (rv <= 0) goto done; goto next_wqe; } else if (rv == -EAGAIN) { siw_dbg_qp(qp, "sq paused: hd/tr %d of %d, data %d\n", qp->tx_ctx.ctrl_sent, qp->tx_ctx.ctrl_len, qp->tx_ctx.bytes_unsent); rv = 0; goto done; } else if (rv == -EINPROGRESS) { rv = siw_sq_start(qp); goto done; } else { /* * WQE processing failed. * Verbs 8.3.2: * o It turns any WQE into a signalled WQE. * o Local catastrophic error must be surfaced * o QP must be moved into Terminate state: done by code * doing socket state change processing * * o TODO: Termination message must be sent. * o TODO: Implement more precise work completion errors, * see enum ib_wc_status in ib_verbs.h */ siw_dbg_qp(qp, "wqe type %d processing failed: %d\n", tx_type(wqe), rv); spin_lock_irqsave(&qp->sq_lock, flags); /* * RREQ may have already been completed by inbound RRESP! */ if ((tx_type == SIW_OP_READ || tx_type == SIW_OP_READ_LOCAL_INV) && qp->attrs.orq_size) { /* Cleanup pending entry in ORQ */ qp->orq_put--; qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0; } spin_unlock_irqrestore(&qp->sq_lock, flags); /* * immediately suspends further TX processing */ if (!qp->tx_ctx.tx_suspend) siw_qp_cm_drop(qp, 0); switch (tx_type) { case SIW_OP_SEND: case SIW_OP_SEND_REMOTE_INV: case SIW_OP_SEND_WITH_IMM: case SIW_OP_WRITE: case SIW_OP_READ: case SIW_OP_READ_LOCAL_INV: siw_wqe_put_mem(wqe, tx_type); fallthrough; case SIW_OP_INVAL_STAG: case SIW_OP_REG_MR: siw_sqe_complete(qp, &wqe->sqe, wqe->bytes, SIW_WC_LOC_QP_OP_ERR); siw_qp_event(qp, IB_EVENT_QP_FATAL); break; case SIW_OP_READ_RESPONSE: siw_dbg_qp(qp, "proc. read.response failed: %d\n", rv); siw_qp_event(qp, IB_EVENT_QP_REQ_ERR); siw_wqe_put_mem(wqe, SIW_OP_READ_RESPONSE); break; default: WARN(1, "undefined WQE type %d\n", tx_type); rv = -EINVAL; } wqe->wr_status = SIW_WR_IDLE; } done: return rv; } static void siw_sq_resume(struct siw_qp *qp) { if (down_read_trylock(&qp->state_lock)) { if (likely(qp->attrs.state == SIW_QP_STATE_RTS && !qp->tx_ctx.tx_suspend)) { int rv = siw_qp_sq_process(qp); up_read(&qp->state_lock); if (unlikely(rv < 0)) { siw_dbg_qp(qp, "SQ task failed: err %d\n", rv); if (!qp->tx_ctx.tx_suspend) siw_qp_cm_drop(qp, 0); } } else { up_read(&qp->state_lock); } } else { siw_dbg_qp(qp, "Resume SQ while QP locked\n"); } siw_qp_put(qp); } struct tx_task_t { struct llist_head active; wait_queue_head_t waiting; }; static DEFINE_PER_CPU(struct tx_task_t, siw_tx_task_g); int siw_create_tx_threads(void) { int cpu, assigned = 0; for_each_online_cpu(cpu) { struct tx_task_t *tx_task; /* Skip HT cores */ if (cpu % cpumask_weight(topology_sibling_cpumask(cpu))) continue; tx_task = &per_cpu(siw_tx_task_g, cpu); init_llist_head(&tx_task->active); init_waitqueue_head(&tx_task->waiting); siw_tx_thread[cpu] = kthread_run_on_cpu(siw_run_sq, (unsigned long *)(long)cpu, cpu, "siw_tx/%u"); if (IS_ERR(siw_tx_thread[cpu])) { siw_tx_thread[cpu] = NULL; continue; } assigned++; } return assigned; } void siw_stop_tx_threads(void) { int cpu; for_each_possible_cpu(cpu) { if (siw_tx_thread[cpu]) { kthread_stop(siw_tx_thread[cpu]); wake_up(&per_cpu(siw_tx_task_g, cpu).waiting); siw_tx_thread[cpu] = NULL; } } } int siw_run_sq(void *data) { const int nr_cpu = (unsigned int)(long)data; struct llist_node *active; struct siw_qp *qp; struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu); while (1) { struct llist_node *fifo_list = NULL; wait_event_interruptible(tx_task->waiting, !llist_empty(&tx_task->active) || kthread_should_stop()); if (kthread_should_stop()) break; active = llist_del_all(&tx_task->active); /* * llist_del_all returns a list with newest entry first. * Re-order list for fairness among QP's. */ fifo_list = llist_reverse_order(active); while (fifo_list) { qp = container_of(fifo_list, struct siw_qp, tx_list); fifo_list = llist_next(fifo_list); qp->tx_list.next = NULL; siw_sq_resume(qp); } } active = llist_del_all(&tx_task->active); if (active) { llist_for_each_entry(qp, active, tx_list) { qp->tx_list.next = NULL; siw_sq_resume(qp); } } return 0; } int siw_sq_start(struct siw_qp *qp) { if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) return 0; if (unlikely(!cpu_online(qp->tx_cpu))) { siw_put_tx_cpu(qp->tx_cpu); qp->tx_cpu = siw_get_tx_cpu(qp->sdev); if (qp->tx_cpu < 0) { pr_warn("siw: no tx cpu available\n"); return -EIO; } } siw_qp_get(qp); llist_add(&qp->tx_list, &per_cpu(siw_tx_task_g, qp->tx_cpu).active); wake_up(&per_cpu(siw_tx_task_g, qp->tx_cpu).waiting); return 0; }
linux-master
drivers/infiniband/sw/siw/siw_qp_tx.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ #include <linux/errno.h> #include <linux/types.h> #include <linux/net.h> #include <linux/scatterlist.h> #include <linux/highmem.h> #include <rdma/iw_cm.h> #include <rdma/ib_verbs.h> #include "siw.h" #include "siw_verbs.h" #include "siw_mem.h" /* * siw_rx_umem() * * Receive data of @len into target referenced by @dest_addr. * * @srx: Receive Context * @umem: siw representation of target memory * @dest_addr: user virtual address * @len: number of bytes to place */ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, u64 dest_addr, int len) { int copied = 0; while (len) { struct page *p; int pg_off, bytes, rv; void *dest; p = siw_get_upage(umem, dest_addr); if (unlikely(!p)) { pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n", __func__, qp_id(rx_qp(srx)), (void *)(uintptr_t)dest_addr, (void *)(uintptr_t)umem->fp_addr); /* siw internal error */ srx->skb_copied += copied; srx->skb_new -= copied; return -EFAULT; } pg_off = dest_addr & ~PAGE_MASK; bytes = min(len, (int)PAGE_SIZE - pg_off); siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes); dest = kmap_atomic(p); rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, bytes); if (unlikely(rv)) { kunmap_atomic(dest); srx->skb_copied += copied; srx->skb_new -= copied; pr_warn("siw: [QP %u]: %s, len %d, page %p, rv %d\n", qp_id(rx_qp(srx)), __func__, len, p, rv); return -EFAULT; } if (srx->mpa_crc_hd) { if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) { crypto_shash_update(srx->mpa_crc_hd, (u8 *)(dest + pg_off), bytes); kunmap_atomic(dest); } else { kunmap_atomic(dest); /* * Do CRC on original, not target buffer. * Some user land applications may * concurrently write the target buffer, * which would yield a broken CRC. * Walking the skb twice is very ineffcient. * Folding the CRC into skb_copy_bits() * would be much better, but is currently * not supported. */ siw_crc_skb(srx, bytes); } } else { kunmap_atomic(dest); } srx->skb_offset += bytes; copied += bytes; len -= bytes; dest_addr += bytes; pg_off = 0; } srx->skb_copied += copied; srx->skb_new -= copied; return copied; } static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len) { int rv; siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len); rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); if (unlikely(rv)) { pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n", qp_id(rx_qp(srx)), __func__, len, kva, rv); return rv; } if (srx->mpa_crc_hd) crypto_shash_update(srx->mpa_crc_hd, (u8 *)kva, len); srx->skb_offset += len; srx->skb_copied += len; srx->skb_new -= len; return len; } static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx, struct siw_mem *mem, u64 addr, int len) { struct siw_pbl *pbl = mem->pbl; u64 offset = addr - mem->va; int copied = 0; while (len) { int bytes; dma_addr_t buf_addr = siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx); if (!buf_addr) break; bytes = min(bytes, len); if (siw_rx_kva(srx, ib_virt_dma_to_ptr(buf_addr), bytes) == bytes) { copied += bytes; offset += bytes; len -= bytes; } else { break; } } return copied; } /* * siw_rresp_check_ntoh() * * Check incoming RRESP fragment header against expected * header values and update expected values for potential next * fragment. * * NOTE: This function must be called only if a RRESP DDP segment * starts but not for fragmented consecutive pieces of an * already started DDP segment. */ static int siw_rresp_check_ntoh(struct siw_rx_stream *srx, struct siw_rx_fpdu *frx) { struct iwarp_rdma_rresp *rresp = &srx->hdr.rresp; struct siw_wqe *wqe = &frx->wqe_active; enum ddp_ecode ecode; u32 sink_stag = be32_to_cpu(rresp->sink_stag); u64 sink_to = be64_to_cpu(rresp->sink_to); if (frx->first_ddp_seg) { srx->ddp_stag = wqe->sqe.sge[0].lkey; srx->ddp_to = wqe->sqe.sge[0].laddr; frx->pbl_idx = 0; } /* Below checks extend beyond the semantics of DDP, and * into RDMAP: * We check if the read response matches exactly the * read request which was send to the remote peer to * trigger this read response. RFC5040/5041 do not * always have a proper error code for the detected * error cases. We choose 'base or bounds error' for * cases where the inbound STag is valid, but offset * or length do not match our response receive state. */ if (unlikely(srx->ddp_stag != sink_stag)) { pr_warn("siw: [QP %u]: rresp stag: %08x != %08x\n", qp_id(rx_qp(srx)), sink_stag, srx->ddp_stag); ecode = DDP_ECODE_T_INVALID_STAG; goto error; } if (unlikely(srx->ddp_to != sink_to)) { pr_warn("siw: [QP %u]: rresp off: %016llx != %016llx\n", qp_id(rx_qp(srx)), (unsigned long long)sink_to, (unsigned long long)srx->ddp_to); ecode = DDP_ECODE_T_BASE_BOUNDS; goto error; } if (unlikely(!frx->more_ddp_segs && (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { pr_warn("siw: [QP %u]: rresp len: %d != %d\n", qp_id(rx_qp(srx)), wqe->processed + srx->fpdu_part_rem, wqe->bytes); ecode = DDP_ECODE_T_BASE_BOUNDS; goto error; } return 0; error: siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP, DDP_ETYPE_TAGGED_BUF, ecode, 0); return -EINVAL; } /* * siw_write_check_ntoh() * * Check incoming WRITE fragment header against expected * header values and update expected values for potential next * fragment * * NOTE: This function must be called only if a WRITE DDP segment * starts but not for fragmented consecutive pieces of an * already started DDP segment. */ static int siw_write_check_ntoh(struct siw_rx_stream *srx, struct siw_rx_fpdu *frx) { struct iwarp_rdma_write *write = &srx->hdr.rwrite; enum ddp_ecode ecode; u32 sink_stag = be32_to_cpu(write->sink_stag); u64 sink_to = be64_to_cpu(write->sink_to); if (frx->first_ddp_seg) { srx->ddp_stag = sink_stag; srx->ddp_to = sink_to; frx->pbl_idx = 0; } else { if (unlikely(srx->ddp_stag != sink_stag)) { pr_warn("siw: [QP %u]: write stag: %08x != %08x\n", qp_id(rx_qp(srx)), sink_stag, srx->ddp_stag); ecode = DDP_ECODE_T_INVALID_STAG; goto error; } if (unlikely(srx->ddp_to != sink_to)) { pr_warn("siw: [QP %u]: write off: %016llx != %016llx\n", qp_id(rx_qp(srx)), (unsigned long long)sink_to, (unsigned long long)srx->ddp_to); ecode = DDP_ECODE_T_BASE_BOUNDS; goto error; } } return 0; error: siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP, DDP_ETYPE_TAGGED_BUF, ecode, 0); return -EINVAL; } /* * siw_send_check_ntoh() * * Check incoming SEND fragment header against expected * header values and update expected MSN if no next * fragment expected * * NOTE: This function must be called only if a SEND DDP segment * starts but not for fragmented consecutive pieces of an * already started DDP segment. */ static int siw_send_check_ntoh(struct siw_rx_stream *srx, struct siw_rx_fpdu *frx) { struct iwarp_send_inv *send = &srx->hdr.send_inv; struct siw_wqe *wqe = &frx->wqe_active; enum ddp_ecode ecode; u32 ddp_msn = be32_to_cpu(send->ddp_msn); u32 ddp_mo = be32_to_cpu(send->ddp_mo); u32 ddp_qn = be32_to_cpu(send->ddp_qn); if (unlikely(ddp_qn != RDMAP_UNTAGGED_QN_SEND)) { pr_warn("siw: [QP %u]: invalid ddp qn %d for send\n", qp_id(rx_qp(srx)), ddp_qn); ecode = DDP_ECODE_UT_INVALID_QN; goto error; } if (unlikely(ddp_msn != srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND])) { pr_warn("siw: [QP %u]: send msn: %u != %u\n", qp_id(rx_qp(srx)), ddp_msn, srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]); ecode = DDP_ECODE_UT_INVALID_MSN_RANGE; goto error; } if (unlikely(ddp_mo != wqe->processed)) { pr_warn("siw: [QP %u], send mo: %u != %u\n", qp_id(rx_qp(srx)), ddp_mo, wqe->processed); ecode = DDP_ECODE_UT_INVALID_MO; goto error; } if (frx->first_ddp_seg) { /* initialize user memory write position */ frx->sge_idx = 0; frx->sge_off = 0; frx->pbl_idx = 0; /* only valid for SEND_INV and SEND_SE_INV operations */ srx->inval_stag = be32_to_cpu(send->inval_stag); } if (unlikely(wqe->bytes < wqe->processed + srx->fpdu_part_rem)) { siw_dbg_qp(rx_qp(srx), "receive space short: %d - %d < %d\n", wqe->bytes, wqe->processed, srx->fpdu_part_rem); wqe->wc_status = SIW_WC_LOC_LEN_ERR; ecode = DDP_ECODE_UT_INVALID_MSN_NOBUF; goto error; } return 0; error: siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP, DDP_ETYPE_UNTAGGED_BUF, ecode, 0); return -EINVAL; } static struct siw_wqe *siw_rqe_get(struct siw_qp *qp) { struct siw_rqe *rqe; struct siw_srq *srq; struct siw_wqe *wqe = NULL; bool srq_event = false; unsigned long flags; srq = qp->srq; if (srq) { spin_lock_irqsave(&srq->lock, flags); if (unlikely(!srq->num_rqe)) goto out; rqe = &srq->recvq[srq->rq_get % srq->num_rqe]; } else { if (unlikely(!qp->recvq)) goto out; rqe = &qp->recvq[qp->rq_get % qp->attrs.rq_size]; } if (likely(rqe->flags == SIW_WQE_VALID)) { int num_sge = rqe->num_sge; if (likely(num_sge <= SIW_MAX_SGE)) { int i = 0; wqe = rx_wqe(&qp->rx_untagged); rx_type(wqe) = SIW_OP_RECEIVE; wqe->wr_status = SIW_WR_INPROGRESS; wqe->bytes = 0; wqe->processed = 0; wqe->rqe.id = rqe->id; wqe->rqe.num_sge = num_sge; while (i < num_sge) { wqe->rqe.sge[i].laddr = rqe->sge[i].laddr; wqe->rqe.sge[i].lkey = rqe->sge[i].lkey; wqe->rqe.sge[i].length = rqe->sge[i].length; wqe->bytes += wqe->rqe.sge[i].length; wqe->mem[i] = NULL; i++; } /* can be re-used by appl */ smp_store_mb(rqe->flags, 0); } else { siw_dbg_qp(qp, "too many sge's: %d\n", rqe->num_sge); if (srq) spin_unlock_irqrestore(&srq->lock, flags); return NULL; } if (!srq) { qp->rq_get++; } else { if (srq->armed) { /* Test SRQ limit */ u32 off = (srq->rq_get + srq->limit) % srq->num_rqe; struct siw_rqe *rqe2 = &srq->recvq[off]; if (!(rqe2->flags & SIW_WQE_VALID)) { srq->armed = false; srq_event = true; } } srq->rq_get++; } } out: if (srq) { spin_unlock_irqrestore(&srq->lock, flags); if (srq_event) siw_srq_event(srq, IB_EVENT_SRQ_LIMIT_REACHED); } return wqe; } /* * siw_proc_send: * * Process one incoming SEND and place data into memory referenced by * receive wqe. * * Function supports partially received sends (suspending/resuming * current receive wqe processing) * * return value: * 0: reached the end of a DDP segment * -EAGAIN: to be called again to finish the DDP segment */ int siw_proc_send(struct siw_qp *qp) { struct siw_rx_stream *srx = &qp->rx_stream; struct siw_rx_fpdu *frx = &qp->rx_untagged; struct siw_wqe *wqe; u32 data_bytes; /* all data bytes available */ u32 rcvd_bytes; /* sum of data bytes rcvd */ int rv = 0; if (frx->first_ddp_seg) { wqe = siw_rqe_get(qp); if (unlikely(!wqe)) { siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_UNTAGGED_BUF, DDP_ECODE_UT_INVALID_MSN_NOBUF, 0); return -ENOENT; } } else { wqe = rx_wqe(frx); } if (srx->state == SIW_GET_DATA_START) { rv = siw_send_check_ntoh(srx, frx); if (unlikely(rv)) { siw_qp_event(qp, IB_EVENT_QP_FATAL); return rv; } if (!srx->fpdu_part_rem) /* zero length SEND */ return 0; } data_bytes = min(srx->fpdu_part_rem, srx->skb_new); rcvd_bytes = 0; /* A zero length SEND will skip below loop */ while (data_bytes) { struct ib_pd *pd; struct siw_mem **mem, *mem_p; struct siw_sge *sge; u32 sge_bytes; /* data bytes avail for SGE */ sge = &wqe->rqe.sge[frx->sge_idx]; if (!sge->length) { /* just skip empty sge's */ frx->sge_idx++; frx->sge_off = 0; frx->pbl_idx = 0; continue; } sge_bytes = min(data_bytes, sge->length - frx->sge_off); mem = &wqe->mem[frx->sge_idx]; /* * check with QP's PD if no SRQ present, SRQ's PD otherwise */ pd = qp->srq == NULL ? qp->pd : qp->srq->base_srq.pd; rv = siw_check_sge(pd, sge, mem, IB_ACCESS_LOCAL_WRITE, frx->sge_off, sge_bytes); if (unlikely(rv)) { siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_CATASTROPHIC, DDP_ECODE_CATASTROPHIC, 0); siw_qp_event(qp, IB_EVENT_QP_ACCESS_ERR); break; } mem_p = *mem; if (mem_p->mem_obj == NULL) rv = siw_rx_kva(srx, ib_virt_dma_to_ptr(sge->laddr + frx->sge_off), sge_bytes); else if (!mem_p->is_pbl) rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + frx->sge_off, sge_bytes); else rv = siw_rx_pbl(srx, &frx->pbl_idx, mem_p, sge->laddr + frx->sge_off, sge_bytes); if (unlikely(rv != sge_bytes)) { wqe->processed += rcvd_bytes; siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_CATASTROPHIC, DDP_ECODE_CATASTROPHIC, 0); return -EINVAL; } frx->sge_off += rv; if (frx->sge_off == sge->length) { frx->sge_idx++; frx->sge_off = 0; frx->pbl_idx = 0; } data_bytes -= rv; rcvd_bytes += rv; srx->fpdu_part_rem -= rv; srx->fpdu_part_rcvd += rv; } wqe->processed += rcvd_bytes; if (!srx->fpdu_part_rem) return 0; return (rv < 0) ? rv : -EAGAIN; } /* * siw_proc_write: * * Place incoming WRITE after referencing and checking target buffer * Function supports partially received WRITEs (suspending/resuming * current receive processing) * * return value: * 0: reached the end of a DDP segment * -EAGAIN: to be called again to finish the DDP segment */ int siw_proc_write(struct siw_qp *qp) { struct siw_rx_stream *srx = &qp->rx_stream; struct siw_rx_fpdu *frx = &qp->rx_tagged; struct siw_mem *mem; int bytes, rv; if (srx->state == SIW_GET_DATA_START) { if (!srx->fpdu_part_rem) /* zero length WRITE */ return 0; rv = siw_write_check_ntoh(srx, frx); if (unlikely(rv)) { siw_qp_event(qp, IB_EVENT_QP_FATAL); return rv; } } bytes = min(srx->fpdu_part_rem, srx->skb_new); if (frx->first_ddp_seg) { struct siw_wqe *wqe = rx_wqe(frx); rx_mem(frx) = siw_mem_id2obj(qp->sdev, srx->ddp_stag >> 8); if (unlikely(!rx_mem(frx))) { siw_dbg_qp(qp, "sink stag not found/invalid, stag 0x%08x\n", srx->ddp_stag); siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_TAGGED_BUF, DDP_ECODE_T_INVALID_STAG, 0); return -EINVAL; } wqe->rqe.num_sge = 1; rx_type(wqe) = SIW_OP_WRITE; wqe->wr_status = SIW_WR_INPROGRESS; } mem = rx_mem(frx); /* * Check if application re-registered memory with different * key field of STag. */ if (unlikely(mem->stag != srx->ddp_stag)) { siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_TAGGED_BUF, DDP_ECODE_T_INVALID_STAG, 0); return -EINVAL; } rv = siw_check_mem(qp->pd, mem, srx->ddp_to + srx->fpdu_part_rcvd, IB_ACCESS_REMOTE_WRITE, bytes); if (unlikely(rv)) { siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_TAGGED_BUF, siw_tagged_error(-rv), 0); siw_qp_event(qp, IB_EVENT_QP_ACCESS_ERR); return -EINVAL; } if (mem->mem_obj == NULL) rv = siw_rx_kva(srx, (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd), bytes); else if (!mem->is_pbl) rv = siw_rx_umem(srx, mem->umem, srx->ddp_to + srx->fpdu_part_rcvd, bytes); else rv = siw_rx_pbl(srx, &frx->pbl_idx, mem, srx->ddp_to + srx->fpdu_part_rcvd, bytes); if (unlikely(rv != bytes)) { siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_CATASTROPHIC, DDP_ECODE_CATASTROPHIC, 0); return -EINVAL; } srx->fpdu_part_rem -= rv; srx->fpdu_part_rcvd += rv; if (!srx->fpdu_part_rem) { srx->ddp_to += srx->fpdu_part_rcvd; return 0; } return -EAGAIN; } /* * Inbound RREQ's cannot carry user data. */ int siw_proc_rreq(struct siw_qp *qp) { struct siw_rx_stream *srx = &qp->rx_stream; if (!srx->fpdu_part_rem) return 0; pr_warn("siw: [QP %u]: rreq with mpa len %d\n", qp_id(qp), be16_to_cpu(srx->hdr.ctrl.mpa_len)); return -EPROTO; } /* * siw_init_rresp: * * Process inbound RDMA READ REQ. Produce a pseudo READ RESPONSE WQE. * Put it at the tail of the IRQ, if there is another WQE currently in * transmit processing. If not, make it the current WQE to be processed * and schedule transmit processing. * * Can be called from softirq context and from process * context (RREAD socket loopback case!) * * return value: * 0: success, * failure code otherwise */ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx) { struct siw_wqe *tx_work = tx_wqe(qp); struct siw_sqe *resp; uint64_t raddr = be64_to_cpu(srx->hdr.rreq.sink_to), laddr = be64_to_cpu(srx->hdr.rreq.source_to); uint32_t length = be32_to_cpu(srx->hdr.rreq.read_size), lkey = be32_to_cpu(srx->hdr.rreq.source_stag), rkey = be32_to_cpu(srx->hdr.rreq.sink_stag), msn = be32_to_cpu(srx->hdr.rreq.ddp_msn); int run_sq = 1, rv = 0; unsigned long flags; if (unlikely(msn != srx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ])) { siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_UNTAGGED_BUF, DDP_ECODE_UT_INVALID_MSN_RANGE, 0); return -EPROTO; } spin_lock_irqsave(&qp->sq_lock, flags); if (unlikely(!qp->attrs.irq_size)) { run_sq = 0; goto error_irq; } if (tx_work->wr_status == SIW_WR_IDLE) { /* * immediately schedule READ response w/o * consuming IRQ entry: IRQ must be empty. */ tx_work->processed = 0; tx_work->mem[0] = NULL; tx_work->wr_status = SIW_WR_QUEUED; resp = &tx_work->sqe; } else { resp = irq_alloc_free(qp); run_sq = 0; } if (likely(resp)) { resp->opcode = SIW_OP_READ_RESPONSE; resp->sge[0].length = length; resp->sge[0].laddr = laddr; resp->sge[0].lkey = lkey; /* Keep aside message sequence number for potential * error reporting during Read Response generation. */ resp->sge[1].length = msn; resp->raddr = raddr; resp->rkey = rkey; resp->num_sge = length ? 1 : 0; /* RRESP now valid as current TX wqe or placed into IRQ */ smp_store_mb(resp->flags, SIW_WQE_VALID); } else { error_irq: pr_warn("siw: [QP %u]: IRQ exceeded or null, size %d\n", qp_id(qp), qp->attrs.irq_size); siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP, RDMAP_ETYPE_REMOTE_OPERATION, RDMAP_ECODE_CATASTROPHIC_STREAM, 0); rv = -EPROTO; } spin_unlock_irqrestore(&qp->sq_lock, flags); if (run_sq) rv = siw_sq_start(qp); return rv; } /* * Only called at start of Read.Resonse processing. * Transfer pending Read from tip of ORQ into currrent rx wqe, * but keep ORQ entry valid until Read.Response processing done. * No Queue locking needed. */ static int siw_orqe_start_rx(struct siw_qp *qp) { struct siw_sqe *orqe; struct siw_wqe *wqe = NULL; if (unlikely(!qp->attrs.orq_size)) return -EPROTO; /* make sure ORQ indices are current */ smp_mb(); orqe = orq_get_current(qp); if (READ_ONCE(orqe->flags) & SIW_WQE_VALID) { /* RRESP is a TAGGED RDMAP operation */ wqe = rx_wqe(&qp->rx_tagged); wqe->sqe.id = orqe->id; wqe->sqe.opcode = orqe->opcode; wqe->sqe.sge[0].laddr = orqe->sge[0].laddr; wqe->sqe.sge[0].lkey = orqe->sge[0].lkey; wqe->sqe.sge[0].length = orqe->sge[0].length; wqe->sqe.flags = orqe->flags; wqe->sqe.num_sge = 1; wqe->bytes = orqe->sge[0].length; wqe->processed = 0; wqe->mem[0] = NULL; /* make sure WQE is completely written before valid */ smp_wmb(); wqe->wr_status = SIW_WR_INPROGRESS; return 0; } return -EPROTO; } /* * siw_proc_rresp: * * Place incoming RRESP data into memory referenced by RREQ WQE * which is at the tip of the ORQ * * Function supports partially received RRESP's (suspending/resuming * current receive processing) */ int siw_proc_rresp(struct siw_qp *qp) { struct siw_rx_stream *srx = &qp->rx_stream; struct siw_rx_fpdu *frx = &qp->rx_tagged; struct siw_wqe *wqe = rx_wqe(frx); struct siw_mem **mem, *mem_p; struct siw_sge *sge; int bytes, rv; if (frx->first_ddp_seg) { if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { pr_warn("siw: [QP %u]: proc RRESP: status %d, op %d\n", qp_id(qp), wqe->wr_status, wqe->sqe.opcode); rv = -EPROTO; goto error_term; } /* * fetch pending RREQ from orq */ rv = siw_orqe_start_rx(qp); if (rv) { pr_warn("siw: [QP %u]: ORQ empty, size %d\n", qp_id(qp), qp->attrs.orq_size); goto error_term; } rv = siw_rresp_check_ntoh(srx, frx); if (unlikely(rv)) { siw_qp_event(qp, IB_EVENT_QP_FATAL); return rv; } } else { if (unlikely(wqe->wr_status != SIW_WR_INPROGRESS)) { pr_warn("siw: [QP %u]: resume RRESP: status %d\n", qp_id(qp), wqe->wr_status); rv = -EPROTO; goto error_term; } } if (!srx->fpdu_part_rem) /* zero length RRESPONSE */ return 0; sge = wqe->sqe.sge; /* there is only one */ mem = &wqe->mem[0]; if (!(*mem)) { /* * check target memory which resolves memory on first fragment */ rv = siw_check_sge(qp->pd, sge, mem, IB_ACCESS_LOCAL_WRITE, 0, wqe->bytes); if (unlikely(rv)) { siw_dbg_qp(qp, "target mem check: %d\n", rv); wqe->wc_status = SIW_WC_LOC_PROT_ERR; siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_TAGGED_BUF, siw_tagged_error(-rv), 0); siw_qp_event(qp, IB_EVENT_QP_ACCESS_ERR); return -EINVAL; } } mem_p = *mem; bytes = min(srx->fpdu_part_rem, srx->skb_new); if (mem_p->mem_obj == NULL) rv = siw_rx_kva(srx, ib_virt_dma_to_ptr(sge->laddr + wqe->processed), bytes); else if (!mem_p->is_pbl) rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, bytes); else rv = siw_rx_pbl(srx, &frx->pbl_idx, mem_p, sge->laddr + wqe->processed, bytes); if (rv != bytes) { wqe->wc_status = SIW_WC_GENERAL_ERR; rv = -EINVAL; goto error_term; } srx->fpdu_part_rem -= rv; srx->fpdu_part_rcvd += rv; wqe->processed += rv; if (!srx->fpdu_part_rem) { srx->ddp_to += srx->fpdu_part_rcvd; return 0; } return -EAGAIN; error_term: siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_CATASTROPHIC, DDP_ECODE_CATASTROPHIC, 0); return rv; } int siw_proc_terminate(struct siw_qp *qp) { struct siw_rx_stream *srx = &qp->rx_stream; struct sk_buff *skb = srx->skb; struct iwarp_terminate *term = &srx->hdr.terminate; union iwarp_hdr term_info; u8 *infop = (u8 *)&term_info; enum rdma_opcode op; u16 to_copy = sizeof(struct iwarp_ctrl); pr_warn("siw: got TERMINATE. layer %d, type %d, code %d\n", __rdmap_term_layer(term), __rdmap_term_etype(term), __rdmap_term_ecode(term)); if (be32_to_cpu(term->ddp_qn) != RDMAP_UNTAGGED_QN_TERMINATE || be32_to_cpu(term->ddp_msn) != qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] || be32_to_cpu(term->ddp_mo) != 0) { pr_warn("siw: rx bogus TERM [QN x%08x, MSN x%08x, MO x%08x]\n", be32_to_cpu(term->ddp_qn), be32_to_cpu(term->ddp_msn), be32_to_cpu(term->ddp_mo)); return -ECONNRESET; } /* * Receive remaining pieces of TERM if indicated */ if (!term->flag_m) return -ECONNRESET; /* Do not take the effort to reassemble a network fragmented * TERM message */ if (srx->skb_new < sizeof(struct iwarp_ctrl_tagged)) return -ECONNRESET; memset(infop, 0, sizeof(term_info)); skb_copy_bits(skb, srx->skb_offset, infop, to_copy); op = __rdmap_get_opcode(&term_info.ctrl); if (op >= RDMAP_TERMINATE) goto out; infop += to_copy; srx->skb_offset += to_copy; srx->skb_new -= to_copy; srx->skb_copied += to_copy; srx->fpdu_part_rcvd += to_copy; srx->fpdu_part_rem -= to_copy; to_copy = iwarp_pktinfo[op].hdr_len - to_copy; /* Again, no network fragmented TERM's */ if (to_copy + MPA_CRC_SIZE > srx->skb_new) return -ECONNRESET; skb_copy_bits(skb, srx->skb_offset, infop, to_copy); if (term->flag_r) { siw_dbg_qp(qp, "TERM reports RDMAP hdr type %u, len %u (%s)\n", op, be16_to_cpu(term_info.ctrl.mpa_len), term->flag_m ? "valid" : "invalid"); } else if (term->flag_d) { siw_dbg_qp(qp, "TERM reports DDP hdr type %u, len %u (%s)\n", op, be16_to_cpu(term_info.ctrl.mpa_len), term->flag_m ? "valid" : "invalid"); } out: srx->skb_new -= to_copy; srx->skb_offset += to_copy; srx->skb_copied += to_copy; srx->fpdu_part_rcvd += to_copy; srx->fpdu_part_rem -= to_copy; return -ECONNRESET; } static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx) { struct sk_buff *skb = srx->skb; int avail = min(srx->skb_new, srx->fpdu_part_rem); u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad; __wsum crc_in, crc_own = 0; siw_dbg_qp(qp, "expected %d, available %d, pad %u\n", srx->fpdu_part_rem, srx->skb_new, srx->pad); skb_copy_bits(skb, srx->skb_offset, tbuf, avail); srx->skb_new -= avail; srx->skb_offset += avail; srx->skb_copied += avail; srx->fpdu_part_rem -= avail; if (srx->fpdu_part_rem) return -EAGAIN; if (!srx->mpa_crc_hd) return 0; if (srx->pad) crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad); /* * CRC32 is computed, transmitted and received directly in NBO, * so there's never a reason to convert byte order. */ crypto_shash_final(srx->mpa_crc_hd, (u8 *)&crc_own); crc_in = (__force __wsum)srx->trailer.crc; if (unlikely(crc_in != crc_own)) { pr_warn("siw: crc error. in: %08x, own %08x, op %u\n", crc_in, crc_own, qp->rx_stream.rdmap_op); siw_init_terminate(qp, TERM_ERROR_LAYER_LLP, LLP_ETYPE_MPA, LLP_ECODE_RECEIVED_CRC, 0); return -EINVAL; } return 0; } #define MIN_DDP_HDR sizeof(struct iwarp_ctrl_tagged) static int siw_get_hdr(struct siw_rx_stream *srx) { struct sk_buff *skb = srx->skb; struct siw_qp *qp = rx_qp(srx); struct iwarp_ctrl *c_hdr = &srx->hdr.ctrl; struct siw_rx_fpdu *frx; u8 opcode; int bytes; if (srx->fpdu_part_rcvd < MIN_DDP_HDR) { /* * copy a mimimum sized (tagged) DDP frame control part */ bytes = min_t(int, srx->skb_new, MIN_DDP_HDR - srx->fpdu_part_rcvd); skb_copy_bits(skb, srx->skb_offset, (char *)c_hdr + srx->fpdu_part_rcvd, bytes); srx->fpdu_part_rcvd += bytes; srx->skb_new -= bytes; srx->skb_offset += bytes; srx->skb_copied += bytes; if (srx->fpdu_part_rcvd < MIN_DDP_HDR) return -EAGAIN; if (unlikely(__ddp_get_version(c_hdr) != DDP_VERSION)) { enum ddp_etype etype; enum ddp_ecode ecode; pr_warn("siw: received ddp version unsupported %d\n", __ddp_get_version(c_hdr)); if (c_hdr->ddp_rdmap_ctrl & DDP_FLAG_TAGGED) { etype = DDP_ETYPE_TAGGED_BUF; ecode = DDP_ECODE_T_VERSION; } else { etype = DDP_ETYPE_UNTAGGED_BUF; ecode = DDP_ECODE_UT_VERSION; } siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP, etype, ecode, 0); return -EINVAL; } if (unlikely(__rdmap_get_version(c_hdr) != RDMAP_VERSION)) { pr_warn("siw: received rdmap version unsupported %d\n", __rdmap_get_version(c_hdr)); siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_RDMAP, RDMAP_ETYPE_REMOTE_OPERATION, RDMAP_ECODE_VERSION, 0); return -EINVAL; } opcode = __rdmap_get_opcode(c_hdr); if (opcode > RDMAP_TERMINATE) { pr_warn("siw: received unknown packet type %u\n", opcode); siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_RDMAP, RDMAP_ETYPE_REMOTE_OPERATION, RDMAP_ECODE_OPCODE, 0); return -EINVAL; } siw_dbg_qp(rx_qp(srx), "new header, opcode %u\n", opcode); } else { opcode = __rdmap_get_opcode(c_hdr); } set_rx_fpdu_context(qp, opcode); frx = qp->rx_fpdu; /* * Figure out len of current hdr: variable length of * iwarp hdr may force us to copy hdr information in * two steps. Only tagged DDP messages are already * completely received. */ if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) { int hdrlen = iwarp_pktinfo[opcode].hdr_len; bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new); skb_copy_bits(skb, srx->skb_offset, (char *)c_hdr + srx->fpdu_part_rcvd, bytes); srx->fpdu_part_rcvd += bytes; srx->skb_new -= bytes; srx->skb_offset += bytes; srx->skb_copied += bytes; if (srx->fpdu_part_rcvd < hdrlen) return -EAGAIN; } /* * DDP/RDMAP header receive completed. Check if the current * DDP segment starts a new RDMAP message or continues a previously * started RDMAP message. * * Alternating reception of DDP segments (or FPDUs) from incomplete * tagged and untagged RDMAP messages is supported, as long as * the current tagged or untagged message gets eventually completed * w/o intersection from another message of the same type * (tagged/untagged). E.g., a WRITE can get intersected by a SEND, * but not by a READ RESPONSE etc. */ if (srx->mpa_crc_hd) { /* * Restart CRC computation */ crypto_shash_init(srx->mpa_crc_hd); crypto_shash_update(srx->mpa_crc_hd, (u8 *)c_hdr, srx->fpdu_part_rcvd); } if (frx->more_ddp_segs) { frx->first_ddp_seg = 0; if (frx->prev_rdmap_op != opcode) { pr_warn("siw: packet intersection: %u : %u\n", frx->prev_rdmap_op, opcode); /* * The last inbound RDMA operation of same type * (tagged or untagged) is left unfinished. * To complete it in error, make it the current * operation again, even with the header already * overwritten. For error handling, only the opcode * and current rx context are relevant. */ set_rx_fpdu_context(qp, frx->prev_rdmap_op); __rdmap_set_opcode(c_hdr, frx->prev_rdmap_op); return -EPROTO; } } else { frx->prev_rdmap_op = opcode; frx->first_ddp_seg = 1; } frx->more_ddp_segs = c_hdr->ddp_rdmap_ctrl & DDP_FLAG_LAST ? 0 : 1; return 0; } static int siw_check_tx_fence(struct siw_qp *qp) { struct siw_wqe *tx_waiting = tx_wqe(qp); struct siw_sqe *rreq; int resume_tx = 0, rv = 0; unsigned long flags; spin_lock_irqsave(&qp->orq_lock, flags); /* free current orq entry */ rreq = orq_get_current(qp); WRITE_ONCE(rreq->flags, 0); qp->orq_get++; if (qp->tx_ctx.orq_fence) { if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) { pr_warn("siw: [QP %u]: fence resume: bad status %d\n", qp_id(qp), tx_waiting->wr_status); rv = -EPROTO; goto out; } /* resume SQ processing, if possible */ if (tx_waiting->sqe.opcode == SIW_OP_READ || tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) { /* SQ processing was stopped because of a full ORQ */ rreq = orq_get_free(qp); if (unlikely(!rreq)) { pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp)); rv = -EPROTO; goto out; } siw_read_to_orq(rreq, &tx_waiting->sqe); qp->orq_put++; qp->tx_ctx.orq_fence = 0; resume_tx = 1; } else if (siw_orq_empty(qp)) { /* * SQ processing was stopped by fenced work request. * Resume since all previous Read's are now completed. */ qp->tx_ctx.orq_fence = 0; resume_tx = 1; } } out: spin_unlock_irqrestore(&qp->orq_lock, flags); if (resume_tx) rv = siw_sq_start(qp); return rv; } /* * siw_rdmap_complete() * * Complete processing of an RDMA message after receiving all * DDP segmens or ABort processing after encountering error case. * * o SENDs + RRESPs will need for completion, * o RREQs need for READ RESPONSE initialization * o WRITEs need memory dereferencing * * TODO: Failed WRITEs need local error to be surfaced. */ static int siw_rdmap_complete(struct siw_qp *qp, int error) { struct siw_rx_stream *srx = &qp->rx_stream; struct siw_wqe *wqe = rx_wqe(qp->rx_fpdu); enum siw_wc_status wc_status = wqe->wc_status; u8 opcode = __rdmap_get_opcode(&srx->hdr.ctrl); int rv = 0; switch (opcode) { case RDMAP_SEND_SE: case RDMAP_SEND_SE_INVAL: wqe->rqe.flags |= SIW_WQE_SOLICITED; fallthrough; case RDMAP_SEND: case RDMAP_SEND_INVAL: if (wqe->wr_status == SIW_WR_IDLE) break; srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]++; if (error != 0 && wc_status == SIW_WC_SUCCESS) wc_status = SIW_WC_GENERAL_ERR; /* * Handle STag invalidation request */ if (wc_status == SIW_WC_SUCCESS && (opcode == RDMAP_SEND_INVAL || opcode == RDMAP_SEND_SE_INVAL)) { rv = siw_invalidate_stag(qp->pd, srx->inval_stag); if (rv) { siw_init_terminate( qp, TERM_ERROR_LAYER_RDMAP, rv == -EACCES ? RDMAP_ETYPE_REMOTE_PROTECTION : RDMAP_ETYPE_REMOTE_OPERATION, RDMAP_ECODE_CANNOT_INVALIDATE, 0); wc_status = SIW_WC_REM_INV_REQ_ERR; } rv = siw_rqe_complete(qp, &wqe->rqe, wqe->processed, rv ? 0 : srx->inval_stag, wc_status); } else { rv = siw_rqe_complete(qp, &wqe->rqe, wqe->processed, 0, wc_status); } siw_wqe_put_mem(wqe, SIW_OP_RECEIVE); break; case RDMAP_RDMA_READ_RESP: if (wqe->wr_status == SIW_WR_IDLE) break; if (error != 0) { if ((srx->state == SIW_GET_HDR && qp->rx_fpdu->first_ddp_seg) || error == -ENODATA) /* possible RREQ in ORQ left untouched */ break; if (wc_status == SIW_WC_SUCCESS) wc_status = SIW_WC_GENERAL_ERR; } else if (rdma_is_kernel_res(&qp->base_qp.res) && rx_type(wqe) == SIW_OP_READ_LOCAL_INV) { /* * Handle any STag invalidation request */ rv = siw_invalidate_stag(qp->pd, wqe->sqe.sge[0].lkey); if (rv) { siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP, RDMAP_ETYPE_CATASTROPHIC, RDMAP_ECODE_UNSPECIFIED, 0); if (wc_status == SIW_WC_SUCCESS) { wc_status = SIW_WC_GENERAL_ERR; error = rv; } } } /* * All errors turn the wqe into signalled. */ if ((wqe->sqe.flags & SIW_WQE_SIGNALLED) || error != 0) rv = siw_sqe_complete(qp, &wqe->sqe, wqe->processed, wc_status); siw_wqe_put_mem(wqe, SIW_OP_READ); if (!error) { rv = siw_check_tx_fence(qp); } else { /* Disable current ORQ element */ if (qp->attrs.orq_size) WRITE_ONCE(orq_get_current(qp)->flags, 0); } break; case RDMAP_RDMA_READ_REQ: if (!error) { rv = siw_init_rresp(qp, srx); srx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]++; } break; case RDMAP_RDMA_WRITE: if (wqe->wr_status == SIW_WR_IDLE) break; /* * Free References from memory object if * attached to receive context (inbound WRITE). * While a zero-length WRITE is allowed, * no memory reference got created. */ if (rx_mem(&qp->rx_tagged)) { siw_mem_put(rx_mem(&qp->rx_tagged)); rx_mem(&qp->rx_tagged) = NULL; } break; default: break; } wqe->wr_status = SIW_WR_IDLE; return rv; } /* * siw_tcp_rx_data() * * Main routine to consume inbound TCP payload * * @rd_desc: read descriptor * @skb: socket buffer * @off: offset in skb * @len: skb->len - offset : payload in skb */ int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int off, size_t len) { struct siw_qp *qp = rd_desc->arg.data; struct siw_rx_stream *srx = &qp->rx_stream; int rv; srx->skb = skb; srx->skb_new = skb->len - off; srx->skb_offset = off; srx->skb_copied = 0; siw_dbg_qp(qp, "new data, len %d\n", srx->skb_new); while (srx->skb_new) { int run_completion = 1; if (unlikely(srx->rx_suspend)) { /* Do not process any more data */ srx->skb_copied += srx->skb_new; break; } switch (srx->state) { case SIW_GET_HDR: rv = siw_get_hdr(srx); if (!rv) { srx->fpdu_part_rem = be16_to_cpu(srx->hdr.ctrl.mpa_len) - srx->fpdu_part_rcvd + MPA_HDR_SIZE; if (srx->fpdu_part_rem) srx->pad = -srx->fpdu_part_rem & 0x3; else srx->pad = 0; srx->state = SIW_GET_DATA_START; srx->fpdu_part_rcvd = 0; } break; case SIW_GET_DATA_MORE: /* * Another data fragment of the same DDP segment. * Setting first_ddp_seg = 0 avoids repeating * initializations that shall occur only once per * DDP segment. */ qp->rx_fpdu->first_ddp_seg = 0; fallthrough; case SIW_GET_DATA_START: /* * Headers will be checked by the opcode-specific * data receive function below. */ rv = iwarp_pktinfo[qp->rx_stream.rdmap_op].rx_data(qp); if (!rv) { int mpa_len = be16_to_cpu(srx->hdr.ctrl.mpa_len) + MPA_HDR_SIZE; srx->fpdu_part_rem = (-mpa_len & 0x3) + MPA_CRC_SIZE; srx->fpdu_part_rcvd = 0; srx->state = SIW_GET_TRAILER; } else { if (unlikely(rv == -ECONNRESET)) run_completion = 0; else srx->state = SIW_GET_DATA_MORE; } break; case SIW_GET_TRAILER: /* * read CRC + any padding */ rv = siw_get_trailer(qp, srx); if (likely(!rv)) { /* * FPDU completed. * complete RDMAP message if last fragment */ srx->state = SIW_GET_HDR; srx->fpdu_part_rcvd = 0; if (!(srx->hdr.ctrl.ddp_rdmap_ctrl & DDP_FLAG_LAST)) /* more frags */ break; rv = siw_rdmap_complete(qp, 0); run_completion = 0; } break; default: pr_warn("QP[%u]: RX out of state\n", qp_id(qp)); rv = -EPROTO; run_completion = 0; } if (unlikely(rv != 0 && rv != -EAGAIN)) { if ((srx->state > SIW_GET_HDR || qp->rx_fpdu->more_ddp_segs) && run_completion) siw_rdmap_complete(qp, rv); siw_dbg_qp(qp, "rx error %d, rx state %d\n", rv, srx->state); siw_qp_cm_drop(qp, 1); break; } if (rv) { siw_dbg_qp(qp, "fpdu fragment, state %d, missing %d\n", srx->state, srx->fpdu_part_rem); break; } } return srx->skb_copied; }
linux-master
drivers/infiniband/sw/siw/siw_qp_rx.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <net/net_namespace.h> #include <linux/rtnetlink.h> #include <linux/if_arp.h> #include <linux/list.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <net/addrconf.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/rdma_netlink.h> #include <linux/kthread.h> #include "siw.h" #include "siw_verbs.h" MODULE_AUTHOR("Bernard Metzler"); MODULE_DESCRIPTION("Software iWARP Driver"); MODULE_LICENSE("Dual BSD/GPL"); /* transmit from user buffer, if possible */ const bool zcopy_tx = true; /* Restrict usage of GSO, if hardware peer iwarp is unable to process * large packets. try_gso = true lets siw try to use local GSO, * if peer agrees. Not using GSO severly limits siw maximum tx bandwidth. */ const bool try_gso; /* Attach siw also with loopback devices */ const bool loopback_enabled = true; /* We try to negotiate CRC on, if true */ const bool mpa_crc_required; /* MPA CRC on/off enforced */ const bool mpa_crc_strict; /* Control TCP_NODELAY socket option */ const bool siw_tcp_nagle; /* Select MPA version to be used during connection setup */ u_char mpa_version = MPA_REVISION_2; /* Selects MPA P2P mode (additional handshake during connection * setup, if true. */ const bool peer_to_peer; struct task_struct *siw_tx_thread[NR_CPUS]; struct crypto_shash *siw_crypto_shash; static int siw_device_register(struct siw_device *sdev, const char *name) { struct ib_device *base_dev = &sdev->base_dev; static int dev_id = 1; int rv; sdev->vendor_part_id = dev_id++; rv = ib_register_device(base_dev, name, NULL); if (rv) { pr_warn("siw: device registration error %d\n", rv); return rv; } siw_dbg(base_dev, "HWaddr=%pM\n", sdev->raw_gid); return 0; } static void siw_device_cleanup(struct ib_device *base_dev) { struct siw_device *sdev = to_siw_dev(base_dev); xa_destroy(&sdev->qp_xa); xa_destroy(&sdev->mem_xa); } static int siw_dev_qualified(struct net_device *netdev) { /* * Additional hardware support can be added here * (e.g. ARPHRD_FDDI, ARPHRD_ATM, ...) - see * <linux/if_arp.h> for type identifiers. */ if (netdev->type == ARPHRD_ETHER || netdev->type == ARPHRD_IEEE802 || netdev->type == ARPHRD_NONE || (netdev->type == ARPHRD_LOOPBACK && loopback_enabled)) return 1; return 0; } static DEFINE_PER_CPU(atomic_t, siw_use_cnt); static struct { struct cpumask **tx_valid_cpus; int num_nodes; } siw_cpu_info; static int siw_init_cpulist(void) { int i, num_nodes = nr_node_ids; memset(siw_tx_thread, 0, sizeof(siw_tx_thread)); siw_cpu_info.num_nodes = num_nodes; siw_cpu_info.tx_valid_cpus = kcalloc(num_nodes, sizeof(struct cpumask *), GFP_KERNEL); if (!siw_cpu_info.tx_valid_cpus) { siw_cpu_info.num_nodes = 0; return -ENOMEM; } for (i = 0; i < siw_cpu_info.num_nodes; i++) { siw_cpu_info.tx_valid_cpus[i] = kzalloc(sizeof(struct cpumask), GFP_KERNEL); if (!siw_cpu_info.tx_valid_cpus[i]) goto out_err; cpumask_clear(siw_cpu_info.tx_valid_cpus[i]); } for_each_possible_cpu(i) cpumask_set_cpu(i, siw_cpu_info.tx_valid_cpus[cpu_to_node(i)]); return 0; out_err: siw_cpu_info.num_nodes = 0; while (--i >= 0) kfree(siw_cpu_info.tx_valid_cpus[i]); kfree(siw_cpu_info.tx_valid_cpus); siw_cpu_info.tx_valid_cpus = NULL; return -ENOMEM; } static void siw_destroy_cpulist(void) { int i = 0; while (i < siw_cpu_info.num_nodes) kfree(siw_cpu_info.tx_valid_cpus[i++]); kfree(siw_cpu_info.tx_valid_cpus); } /* * Choose CPU with least number of active QP's from NUMA node of * TX interface. */ int siw_get_tx_cpu(struct siw_device *sdev) { const struct cpumask *tx_cpumask; int i, num_cpus, cpu, min_use, node = sdev->numa_node, tx_cpu = -1; if (node < 0) tx_cpumask = cpu_online_mask; else tx_cpumask = siw_cpu_info.tx_valid_cpus[node]; num_cpus = cpumask_weight(tx_cpumask); if (!num_cpus) { /* no CPU on this NUMA node */ tx_cpumask = cpu_online_mask; num_cpus = cpumask_weight(tx_cpumask); } if (!num_cpus) goto out; cpu = cpumask_first(tx_cpumask); for (i = 0, min_use = SIW_MAX_QP; i < num_cpus; i++, cpu = cpumask_next(cpu, tx_cpumask)) { int usage; /* Skip any cores which have no TX thread */ if (!siw_tx_thread[cpu]) continue; usage = atomic_read(&per_cpu(siw_use_cnt, cpu)); if (usage <= min_use) { tx_cpu = cpu; min_use = usage; } } siw_dbg(&sdev->base_dev, "tx cpu %d, node %d, %d qp's\n", tx_cpu, node, min_use); out: if (tx_cpu >= 0) atomic_inc(&per_cpu(siw_use_cnt, tx_cpu)); else pr_warn("siw: no tx cpu found\n"); return tx_cpu; } void siw_put_tx_cpu(int cpu) { atomic_dec(&per_cpu(siw_use_cnt, cpu)); } static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id) { struct siw_qp *qp = siw_qp_id2obj(to_siw_dev(base_dev), id); if (qp) { /* * siw_qp_id2obj() increments object reference count */ siw_qp_put(qp); return &qp->base_qp; } return NULL; } static const struct ib_device_ops siw_device_ops = { .owner = THIS_MODULE, .uverbs_abi_ver = SIW_ABI_VERSION, .driver_id = RDMA_DRIVER_SIW, .alloc_mr = siw_alloc_mr, .alloc_pd = siw_alloc_pd, .alloc_ucontext = siw_alloc_ucontext, .create_cq = siw_create_cq, .create_qp = siw_create_qp, .create_srq = siw_create_srq, .dealloc_driver = siw_device_cleanup, .dealloc_pd = siw_dealloc_pd, .dealloc_ucontext = siw_dealloc_ucontext, .dereg_mr = siw_dereg_mr, .destroy_cq = siw_destroy_cq, .destroy_qp = siw_destroy_qp, .destroy_srq = siw_destroy_srq, .get_dma_mr = siw_get_dma_mr, .get_port_immutable = siw_get_port_immutable, .iw_accept = siw_accept, .iw_add_ref = siw_qp_get_ref, .iw_connect = siw_connect, .iw_create_listen = siw_create_listen, .iw_destroy_listen = siw_destroy_listen, .iw_get_qp = siw_get_base_qp, .iw_reject = siw_reject, .iw_rem_ref = siw_qp_put_ref, .map_mr_sg = siw_map_mr_sg, .mmap = siw_mmap, .mmap_free = siw_mmap_free, .modify_qp = siw_verbs_modify_qp, .modify_srq = siw_modify_srq, .poll_cq = siw_poll_cq, .post_recv = siw_post_receive, .post_send = siw_post_send, .post_srq_recv = siw_post_srq_recv, .query_device = siw_query_device, .query_gid = siw_query_gid, .query_port = siw_query_port, .query_qp = siw_query_qp, .query_srq = siw_query_srq, .req_notify_cq = siw_req_notify_cq, .reg_user_mr = siw_reg_user_mr, INIT_RDMA_OBJ_SIZE(ib_cq, siw_cq, base_cq), INIT_RDMA_OBJ_SIZE(ib_pd, siw_pd, base_pd), INIT_RDMA_OBJ_SIZE(ib_qp, siw_qp, base_qp), INIT_RDMA_OBJ_SIZE(ib_srq, siw_srq, base_srq), INIT_RDMA_OBJ_SIZE(ib_ucontext, siw_ucontext, base_ucontext), }; static struct siw_device *siw_device_create(struct net_device *netdev) { struct siw_device *sdev = NULL; struct ib_device *base_dev; int rv; sdev = ib_alloc_device(siw_device, base_dev); if (!sdev) return NULL; base_dev = &sdev->base_dev; sdev->netdev = netdev; if (netdev->addr_len) { memcpy(sdev->raw_gid, netdev->dev_addr, min_t(unsigned int, netdev->addr_len, ETH_ALEN)); } else { /* * This device does not have a HW address, but * connection mangagement requires a unique gid. */ eth_random_addr(sdev->raw_gid); } addrconf_addr_eui48((u8 *)&base_dev->node_guid, sdev->raw_gid); base_dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND); base_dev->node_type = RDMA_NODE_RNIC; memcpy(base_dev->node_desc, SIW_NODE_DESC_COMMON, sizeof(SIW_NODE_DESC_COMMON)); /* * Current model (one-to-one device association): * One Softiwarp device per net_device or, equivalently, * per physical port. */ base_dev->phys_port_cnt = 1; base_dev->num_comp_vectors = num_possible_cpus(); xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1); xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1); ib_set_device_ops(base_dev, &siw_device_ops); rv = ib_device_set_netdev(base_dev, netdev, 1); if (rv) goto error; memcpy(base_dev->iw_ifname, netdev->name, sizeof(base_dev->iw_ifname)); /* Disable TCP port mapping */ base_dev->iw_driver_flags = IW_F_NO_PORT_MAP; sdev->attrs.max_qp = SIW_MAX_QP; sdev->attrs.max_qp_wr = SIW_MAX_QP_WR; sdev->attrs.max_ord = SIW_MAX_ORD_QP; sdev->attrs.max_ird = SIW_MAX_IRD_QP; sdev->attrs.max_sge = SIW_MAX_SGE; sdev->attrs.max_sge_rd = SIW_MAX_SGE_RD; sdev->attrs.max_cq = SIW_MAX_CQ; sdev->attrs.max_cqe = SIW_MAX_CQE; sdev->attrs.max_mr = SIW_MAX_MR; sdev->attrs.max_pd = SIW_MAX_PD; sdev->attrs.max_mw = SIW_MAX_MW; sdev->attrs.max_srq = SIW_MAX_SRQ; sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR; sdev->attrs.max_srq_sge = SIW_MAX_SGE; INIT_LIST_HEAD(&sdev->cep_list); INIT_LIST_HEAD(&sdev->qp_list); atomic_set(&sdev->num_ctx, 0); atomic_set(&sdev->num_srq, 0); atomic_set(&sdev->num_qp, 0); atomic_set(&sdev->num_cq, 0); atomic_set(&sdev->num_mr, 0); atomic_set(&sdev->num_pd, 0); sdev->numa_node = dev_to_node(&netdev->dev); spin_lock_init(&sdev->lock); return sdev; error: ib_dealloc_device(base_dev); return NULL; } /* * Network link becomes unavailable. Mark all * affected QP's accordingly. */ static void siw_netdev_down(struct work_struct *work) { struct siw_device *sdev = container_of(work, struct siw_device, netdev_down); struct siw_qp_attrs qp_attrs; struct list_head *pos, *tmp; memset(&qp_attrs, 0, sizeof(qp_attrs)); qp_attrs.state = SIW_QP_STATE_ERROR; list_for_each_safe(pos, tmp, &sdev->qp_list) { struct siw_qp *qp = list_entry(pos, struct siw_qp, devq); down_write(&qp->state_lock); WARN_ON(siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE)); up_write(&qp->state_lock); } ib_device_put(&sdev->base_dev); } static void siw_device_goes_down(struct siw_device *sdev) { if (ib_device_try_get(&sdev->base_dev)) { INIT_WORK(&sdev->netdev_down, siw_netdev_down); schedule_work(&sdev->netdev_down); } } static int siw_netdev_event(struct notifier_block *nb, unsigned long event, void *arg) { struct net_device *netdev = netdev_notifier_info_to_dev(arg); struct ib_device *base_dev; struct siw_device *sdev; dev_dbg(&netdev->dev, "siw: event %lu\n", event); base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW); if (!base_dev) return NOTIFY_OK; sdev = to_siw_dev(base_dev); switch (event) { case NETDEV_UP: sdev->state = IB_PORT_ACTIVE; siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE); break; case NETDEV_GOING_DOWN: siw_device_goes_down(sdev); break; case NETDEV_DOWN: sdev->state = IB_PORT_DOWN; siw_port_event(sdev, 1, IB_EVENT_PORT_ERR); break; case NETDEV_REGISTER: /* * Device registration now handled only by * rdma netlink commands. So it shall be impossible * to end up here with a valid siw device. */ siw_dbg(base_dev, "unexpected NETDEV_REGISTER event\n"); break; case NETDEV_UNREGISTER: ib_unregister_device_queued(&sdev->base_dev); break; case NETDEV_CHANGEADDR: siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE); break; /* * Todo: Below netdev events are currently not handled. */ case NETDEV_CHANGEMTU: case NETDEV_CHANGE: break; default: break; } ib_device_put(&sdev->base_dev); return NOTIFY_OK; } static struct notifier_block siw_netdev_nb = { .notifier_call = siw_netdev_event, }; static int siw_newlink(const char *basedev_name, struct net_device *netdev) { struct ib_device *base_dev; struct siw_device *sdev = NULL; int rv = -ENOMEM; if (!siw_dev_qualified(netdev)) return -EINVAL; base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW); if (base_dev) { ib_device_put(base_dev); return -EEXIST; } sdev = siw_device_create(netdev); if (sdev) { dev_dbg(&netdev->dev, "siw: new device\n"); if (netif_running(netdev) && netif_carrier_ok(netdev)) sdev->state = IB_PORT_ACTIVE; else sdev->state = IB_PORT_DOWN; rv = siw_device_register(sdev, basedev_name); if (rv) ib_dealloc_device(&sdev->base_dev); } return rv; } static struct rdma_link_ops siw_link_ops = { .type = "siw", .newlink = siw_newlink, }; /* * siw_init_module - Initialize Softiwarp module and register with netdev * subsystem. */ static __init int siw_init_module(void) { int rv; if (SENDPAGE_THRESH < SIW_MAX_INLINE) { pr_info("siw: sendpage threshold too small: %u\n", (int)SENDPAGE_THRESH); rv = -EINVAL; goto out_error; } rv = siw_init_cpulist(); if (rv) goto out_error; rv = siw_cm_init(); if (rv) goto out_error; if (!siw_create_tx_threads()) { pr_info("siw: Could not start any TX thread\n"); rv = -ENOMEM; goto out_error; } /* * Locate CRC32 algorithm. If unsuccessful, fail * loading siw only, if CRC is required. */ siw_crypto_shash = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(siw_crypto_shash)) { pr_info("siw: Loading CRC32c failed: %ld\n", PTR_ERR(siw_crypto_shash)); siw_crypto_shash = NULL; if (mpa_crc_required) { rv = -EOPNOTSUPP; goto out_error; } } rv = register_netdevice_notifier(&siw_netdev_nb); if (rv) goto out_error; rdma_link_register(&siw_link_ops); pr_info("SoftiWARP attached\n"); return 0; out_error: siw_stop_tx_threads(); if (siw_crypto_shash) crypto_free_shash(siw_crypto_shash); pr_info("SoftIWARP attach failed. Error: %d\n", rv); siw_cm_exit(); siw_destroy_cpulist(); return rv; } static void __exit siw_exit_module(void) { siw_stop_tx_threads(); unregister_netdevice_notifier(&siw_netdev_nb); rdma_link_unregister(&siw_link_ops); ib_unregister_driver(RDMA_DRIVER_SIW); siw_cm_exit(); siw_destroy_cpulist(); if (siw_crypto_shash) crypto_free_shash(siw_crypto_shash); pr_info("SoftiWARP detached\n"); } module_init(siw_init_module); module_exit(siw_exit_module); MODULE_ALIAS_RDMA_LINK("siw");
linux-master
drivers/infiniband/sw/siw/siw_main.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ #include <linux/errno.h> #include <linux/types.h> #include <linux/net.h> #include <linux/scatterlist.h> #include <linux/llist.h> #include <asm/barrier.h> #include <net/tcp.h> #include <trace/events/sock.h> #include "siw.h" #include "siw_verbs.h" #include "siw_mem.h" static char siw_qp_state_to_string[SIW_QP_STATE_COUNT][sizeof "TERMINATE"] = { [SIW_QP_STATE_IDLE] = "IDLE", [SIW_QP_STATE_RTR] = "RTR", [SIW_QP_STATE_RTS] = "RTS", [SIW_QP_STATE_CLOSING] = "CLOSING", [SIW_QP_STATE_TERMINATE] = "TERMINATE", [SIW_QP_STATE_ERROR] = "ERROR" }; /* * iWARP (RDMAP, DDP and MPA) parameters as well as Softiwarp settings on a * per-RDMAP message basis. Please keep order of initializer. All MPA len * is initialized to minimum packet size. */ struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1] = { { /* RDMAP_RDMA_WRITE */ .hdr_len = sizeof(struct iwarp_rdma_write), .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_write) - 2), .ctrl.ddp_rdmap_ctrl = DDP_FLAG_TAGGED | DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | cpu_to_be16(RDMAP_VERSION << 6) | cpu_to_be16(RDMAP_RDMA_WRITE), .rx_data = siw_proc_write }, { /* RDMAP_RDMA_READ_REQ */ .hdr_len = sizeof(struct iwarp_rdma_rreq), .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rreq) - 2), .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | cpu_to_be16(RDMAP_VERSION << 6) | cpu_to_be16(RDMAP_RDMA_READ_REQ), .rx_data = siw_proc_rreq }, { /* RDMAP_RDMA_READ_RESP */ .hdr_len = sizeof(struct iwarp_rdma_rresp), .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rresp) - 2), .ctrl.ddp_rdmap_ctrl = DDP_FLAG_TAGGED | DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | cpu_to_be16(RDMAP_VERSION << 6) | cpu_to_be16(RDMAP_RDMA_READ_RESP), .rx_data = siw_proc_rresp }, { /* RDMAP_SEND */ .hdr_len = sizeof(struct iwarp_send), .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2), .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | cpu_to_be16(RDMAP_VERSION << 6) | cpu_to_be16(RDMAP_SEND), .rx_data = siw_proc_send }, { /* RDMAP_SEND_INVAL */ .hdr_len = sizeof(struct iwarp_send_inv), .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2), .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | cpu_to_be16(RDMAP_VERSION << 6) | cpu_to_be16(RDMAP_SEND_INVAL), .rx_data = siw_proc_send }, { /* RDMAP_SEND_SE */ .hdr_len = sizeof(struct iwarp_send), .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2), .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | cpu_to_be16(RDMAP_VERSION << 6) | cpu_to_be16(RDMAP_SEND_SE), .rx_data = siw_proc_send }, { /* RDMAP_SEND_SE_INVAL */ .hdr_len = sizeof(struct iwarp_send_inv), .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2), .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | cpu_to_be16(RDMAP_VERSION << 6) | cpu_to_be16(RDMAP_SEND_SE_INVAL), .rx_data = siw_proc_send }, { /* RDMAP_TERMINATE */ .hdr_len = sizeof(struct iwarp_terminate), .ctrl.mpa_len = htons(sizeof(struct iwarp_terminate) - 2), .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | cpu_to_be16(RDMAP_VERSION << 6) | cpu_to_be16(RDMAP_TERMINATE), .rx_data = siw_proc_terminate } }; void siw_qp_llp_data_ready(struct sock *sk) { struct siw_qp *qp; trace_sk_data_ready(sk); read_lock(&sk->sk_callback_lock); if (unlikely(!sk->sk_user_data || !sk_to_qp(sk))) goto done; qp = sk_to_qp(sk); if (likely(!qp->rx_stream.rx_suspend && down_read_trylock(&qp->state_lock))) { read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 }; if (likely(qp->attrs.state == SIW_QP_STATE_RTS)) /* * Implements data receive operation during * socket callback. TCP gracefully catches * the case where there is nothing to receive * (not calling siw_tcp_rx_data() then). */ tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data); up_read(&qp->state_lock); } else { siw_dbg_qp(qp, "unable to process RX, suspend: %d\n", qp->rx_stream.rx_suspend); } done: read_unlock(&sk->sk_callback_lock); } void siw_qp_llp_close(struct siw_qp *qp) { siw_dbg_qp(qp, "enter llp close, state = %s\n", siw_qp_state_to_string[qp->attrs.state]); down_write(&qp->state_lock); qp->rx_stream.rx_suspend = 1; qp->tx_ctx.tx_suspend = 1; qp->attrs.sk = NULL; switch (qp->attrs.state) { case SIW_QP_STATE_RTS: case SIW_QP_STATE_RTR: case SIW_QP_STATE_IDLE: case SIW_QP_STATE_TERMINATE: qp->attrs.state = SIW_QP_STATE_ERROR; break; /* * SIW_QP_STATE_CLOSING: * * This is a forced close. shall the QP be moved to * ERROR or IDLE ? */ case SIW_QP_STATE_CLOSING: if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) qp->attrs.state = SIW_QP_STATE_ERROR; else qp->attrs.state = SIW_QP_STATE_IDLE; break; default: siw_dbg_qp(qp, "llp close: no state transition needed: %s\n", siw_qp_state_to_string[qp->attrs.state]); break; } siw_sq_flush(qp); siw_rq_flush(qp); /* * Dereference closing CEP */ if (qp->cep) { siw_cep_put(qp->cep); qp->cep = NULL; } up_write(&qp->state_lock); siw_dbg_qp(qp, "llp close exit: state %s\n", siw_qp_state_to_string[qp->attrs.state]); } /* * socket callback routine informing about newly available send space. * Function schedules SQ work for processing SQ items. */ void siw_qp_llp_write_space(struct sock *sk) { struct siw_cep *cep; read_lock(&sk->sk_callback_lock); cep = sk_to_cep(sk); if (cep) { cep->sk_write_space(sk); if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) (void)siw_sq_start(cep->qp); } read_unlock(&sk->sk_callback_lock); } static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size) { if (irq_size) { irq_size = roundup_pow_of_two(irq_size); qp->irq = vcalloc(irq_size, sizeof(struct siw_sqe)); if (!qp->irq) { qp->attrs.irq_size = 0; return -ENOMEM; } } if (orq_size) { orq_size = roundup_pow_of_two(orq_size); qp->orq = vcalloc(orq_size, sizeof(struct siw_sqe)); if (!qp->orq) { qp->attrs.orq_size = 0; qp->attrs.irq_size = 0; vfree(qp->irq); return -ENOMEM; } } qp->attrs.irq_size = irq_size; qp->attrs.orq_size = orq_size; siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size); return 0; } static int siw_qp_enable_crc(struct siw_qp *qp) { struct siw_rx_stream *c_rx = &qp->rx_stream; struct siw_iwarp_tx *c_tx = &qp->tx_ctx; int size; if (siw_crypto_shash == NULL) return -ENOENT; size = crypto_shash_descsize(siw_crypto_shash) + sizeof(struct shash_desc); c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) { kfree(c_tx->mpa_crc_hd); kfree(c_rx->mpa_crc_hd); c_tx->mpa_crc_hd = NULL; c_rx->mpa_crc_hd = NULL; return -ENOMEM; } c_tx->mpa_crc_hd->tfm = siw_crypto_shash; c_rx->mpa_crc_hd->tfm = siw_crypto_shash; return 0; } /* * Send a non signalled READ or WRITE to peer side as negotiated * with MPAv2 P2P setup protocol. The work request is only created * as a current active WR and does not consume Send Queue space. * * Caller must hold QP state lock. */ int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl) { struct siw_wqe *wqe = tx_wqe(qp); unsigned long flags; int rv = 0; spin_lock_irqsave(&qp->sq_lock, flags); if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { spin_unlock_irqrestore(&qp->sq_lock, flags); return -EIO; } memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); wqe->wr_status = SIW_WR_QUEUED; wqe->sqe.flags = 0; wqe->sqe.num_sge = 1; wqe->sqe.sge[0].length = 0; wqe->sqe.sge[0].laddr = 0; wqe->sqe.sge[0].lkey = 0; /* * While it must not be checked for inbound zero length * READ/WRITE, some HW may treat STag 0 special. */ wqe->sqe.rkey = 1; wqe->sqe.raddr = 0; wqe->processed = 0; if (ctrl & MPA_V2_RDMA_WRITE_RTR) wqe->sqe.opcode = SIW_OP_WRITE; else if (ctrl & MPA_V2_RDMA_READ_RTR) { struct siw_sqe *rreq = NULL; wqe->sqe.opcode = SIW_OP_READ; spin_lock(&qp->orq_lock); if (qp->attrs.orq_size) rreq = orq_get_free(qp); if (rreq) { siw_read_to_orq(rreq, &wqe->sqe); qp->orq_put++; } else rv = -EIO; spin_unlock(&qp->orq_lock); } else rv = -EINVAL; if (rv) wqe->wr_status = SIW_WR_IDLE; spin_unlock_irqrestore(&qp->sq_lock, flags); if (!rv) rv = siw_sq_start(qp); return rv; } /* * Map memory access error to DDP tagged error */ enum ddp_ecode siw_tagged_error(enum siw_access_state state) { switch (state) { case E_STAG_INVALID: return DDP_ECODE_T_INVALID_STAG; case E_BASE_BOUNDS: return DDP_ECODE_T_BASE_BOUNDS; case E_PD_MISMATCH: return DDP_ECODE_T_STAG_NOT_ASSOC; case E_ACCESS_PERM: /* * RFC 5041 (DDP) lacks an ecode for insufficient access * permissions. 'Invalid STag' seem to be the closest * match though. */ return DDP_ECODE_T_INVALID_STAG; default: WARN_ON(1); return DDP_ECODE_T_INVALID_STAG; } } /* * Map memory access error to RDMAP protection error */ enum rdmap_ecode siw_rdmap_error(enum siw_access_state state) { switch (state) { case E_STAG_INVALID: return RDMAP_ECODE_INVALID_STAG; case E_BASE_BOUNDS: return RDMAP_ECODE_BASE_BOUNDS; case E_PD_MISMATCH: return RDMAP_ECODE_STAG_NOT_ASSOC; case E_ACCESS_PERM: return RDMAP_ECODE_ACCESS_RIGHTS; default: return RDMAP_ECODE_UNSPECIFIED; } } void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype, u8 ecode, int in_tx) { if (!qp->term_info.valid) { memset(&qp->term_info, 0, sizeof(qp->term_info)); qp->term_info.layer = layer; qp->term_info.etype = etype; qp->term_info.ecode = ecode; qp->term_info.in_tx = in_tx; qp->term_info.valid = 1; } siw_dbg_qp(qp, "init TERM: layer %d, type %d, code %d, in tx %s\n", layer, etype, ecode, in_tx ? "yes" : "no"); } /* * Send a TERMINATE message, as defined in RFC's 5040/5041/5044/6581. * Sending TERMINATE messages is best effort - such messages * can only be send if the QP is still connected and it does * not have another outbound message in-progress, i.e. the * TERMINATE message must not interfer with an incomplete current * transmit operation. */ void siw_send_terminate(struct siw_qp *qp) { struct kvec iov[3]; struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR }; struct iwarp_terminate *term = NULL; union iwarp_hdr *err_hdr = NULL; struct socket *s = qp->attrs.sk; struct siw_rx_stream *srx = &qp->rx_stream; union iwarp_hdr *rx_hdr = &srx->hdr; u32 crc = 0; int num_frags, len_terminate, rv; if (!qp->term_info.valid) return; qp->term_info.valid = 0; if (tx_wqe(qp)->wr_status == SIW_WR_INPROGRESS) { siw_dbg_qp(qp, "cannot send TERMINATE: op %d in progress\n", tx_type(tx_wqe(qp))); return; } if (!s && qp->cep) /* QP not yet in RTS. Take socket from connection end point */ s = qp->cep->sock; if (!s) { siw_dbg_qp(qp, "cannot send TERMINATE: not connected\n"); return; } term = kzalloc(sizeof(*term), GFP_KERNEL); if (!term) return; term->ddp_qn = cpu_to_be32(RDMAP_UNTAGGED_QN_TERMINATE); term->ddp_mo = 0; term->ddp_msn = cpu_to_be32(1); iov[0].iov_base = term; iov[0].iov_len = sizeof(*term); if ((qp->term_info.layer == TERM_ERROR_LAYER_DDP) || ((qp->term_info.layer == TERM_ERROR_LAYER_RDMAP) && (qp->term_info.etype != RDMAP_ETYPE_CATASTROPHIC))) { err_hdr = kzalloc(sizeof(*err_hdr), GFP_KERNEL); if (!err_hdr) { kfree(term); return; } } memcpy(&term->ctrl, &iwarp_pktinfo[RDMAP_TERMINATE].ctrl, sizeof(struct iwarp_ctrl)); __rdmap_term_set_layer(term, qp->term_info.layer); __rdmap_term_set_etype(term, qp->term_info.etype); __rdmap_term_set_ecode(term, qp->term_info.ecode); switch (qp->term_info.layer) { case TERM_ERROR_LAYER_RDMAP: if (qp->term_info.etype == RDMAP_ETYPE_CATASTROPHIC) /* No additional DDP/RDMAP header to be included */ break; if (qp->term_info.etype == RDMAP_ETYPE_REMOTE_PROTECTION) { /* * Complete RDMAP frame will get attached, and * DDP segment length is valid */ term->flag_m = 1; term->flag_d = 1; term->flag_r = 1; if (qp->term_info.in_tx) { struct iwarp_rdma_rreq *rreq; struct siw_wqe *wqe = tx_wqe(qp); /* Inbound RREQ error, detected during * RRESP creation. Take state from * current TX work queue element to * reconstruct peers RREQ. */ rreq = (struct iwarp_rdma_rreq *)err_hdr; memcpy(&rreq->ctrl, &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl, sizeof(struct iwarp_ctrl)); rreq->rsvd = 0; rreq->ddp_qn = htonl(RDMAP_UNTAGGED_QN_RDMA_READ); /* Provide RREQ's MSN as kept aside */ rreq->ddp_msn = htonl(wqe->sqe.sge[0].length); rreq->ddp_mo = htonl(wqe->processed); rreq->sink_stag = htonl(wqe->sqe.rkey); rreq->sink_to = cpu_to_be64(wqe->sqe.raddr); rreq->read_size = htonl(wqe->sqe.sge[0].length); rreq->source_stag = htonl(wqe->sqe.sge[0].lkey); rreq->source_to = cpu_to_be64(wqe->sqe.sge[0].laddr); iov[1].iov_base = rreq; iov[1].iov_len = sizeof(*rreq); rx_hdr = (union iwarp_hdr *)rreq; } else { /* Take RDMAP/DDP information from * current (failed) inbound frame. */ iov[1].iov_base = rx_hdr; if (__rdmap_get_opcode(&rx_hdr->ctrl) == RDMAP_RDMA_READ_REQ) iov[1].iov_len = sizeof(struct iwarp_rdma_rreq); else /* SEND type */ iov[1].iov_len = sizeof(struct iwarp_send); } } else { /* Do not report DDP hdr information if packet * layout is unknown */ if ((qp->term_info.ecode == RDMAP_ECODE_VERSION) || (qp->term_info.ecode == RDMAP_ECODE_OPCODE)) break; iov[1].iov_base = rx_hdr; /* Only DDP frame will get attached */ if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED) iov[1].iov_len = sizeof(struct iwarp_rdma_write); else iov[1].iov_len = sizeof(struct iwarp_send); term->flag_m = 1; term->flag_d = 1; } term->ctrl.mpa_len = cpu_to_be16(iov[1].iov_len); break; case TERM_ERROR_LAYER_DDP: /* Report error encountered while DDP processing. * This can only happen as a result of inbound * DDP processing */ /* Do not report DDP hdr information if packet * layout is unknown */ if (((qp->term_info.etype == DDP_ETYPE_TAGGED_BUF) && (qp->term_info.ecode == DDP_ECODE_T_VERSION)) || ((qp->term_info.etype == DDP_ETYPE_UNTAGGED_BUF) && (qp->term_info.ecode == DDP_ECODE_UT_VERSION))) break; iov[1].iov_base = rx_hdr; if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED) iov[1].iov_len = sizeof(struct iwarp_ctrl_tagged); else iov[1].iov_len = sizeof(struct iwarp_ctrl_untagged); term->flag_m = 1; term->flag_d = 1; break; default: break; } if (term->flag_m || term->flag_d || term->flag_r) { iov[2].iov_base = &crc; iov[2].iov_len = sizeof(crc); len_terminate = sizeof(*term) + iov[1].iov_len + MPA_CRC_SIZE; num_frags = 3; } else { iov[1].iov_base = &crc; iov[1].iov_len = sizeof(crc); len_terminate = sizeof(*term) + MPA_CRC_SIZE; num_frags = 2; } /* Adjust DDP Segment Length parameter, if valid */ if (term->flag_m) { u32 real_ddp_len = be16_to_cpu(rx_hdr->ctrl.mpa_len); enum rdma_opcode op = __rdmap_get_opcode(&rx_hdr->ctrl); real_ddp_len -= iwarp_pktinfo[op].hdr_len - MPA_HDR_SIZE; rx_hdr->ctrl.mpa_len = cpu_to_be16(real_ddp_len); } term->ctrl.mpa_len = cpu_to_be16(len_terminate - (MPA_HDR_SIZE + MPA_CRC_SIZE)); if (qp->tx_ctx.mpa_crc_hd) { crypto_shash_init(qp->tx_ctx.mpa_crc_hd); if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, (u8 *)iov[0].iov_base, iov[0].iov_len)) goto out; if (num_frags == 3) { if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, (u8 *)iov[1].iov_base, iov[1].iov_len)) goto out; } crypto_shash_final(qp->tx_ctx.mpa_crc_hd, (u8 *)&crc); } rv = kernel_sendmsg(s, &msg, iov, num_frags, len_terminate); siw_dbg_qp(qp, "sent TERM: %s, layer %d, type %d, code %d (%d bytes)\n", rv == len_terminate ? "success" : "failure", __rdmap_term_layer(term), __rdmap_term_etype(term), __rdmap_term_ecode(term), rv); out: kfree(term); kfree(err_hdr); } /* * Handle all attrs other than state */ static void siw_qp_modify_nonstate(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) { if (mask & SIW_QP_ATTR_ACCESS_FLAGS) { if (attrs->flags & SIW_RDMA_BIND_ENABLED) qp->attrs.flags |= SIW_RDMA_BIND_ENABLED; else qp->attrs.flags &= ~SIW_RDMA_BIND_ENABLED; if (attrs->flags & SIW_RDMA_WRITE_ENABLED) qp->attrs.flags |= SIW_RDMA_WRITE_ENABLED; else qp->attrs.flags &= ~SIW_RDMA_WRITE_ENABLED; if (attrs->flags & SIW_RDMA_READ_ENABLED) qp->attrs.flags |= SIW_RDMA_READ_ENABLED; else qp->attrs.flags &= ~SIW_RDMA_READ_ENABLED; } } static int siw_qp_nextstate_from_idle(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) { int rv = 0; switch (attrs->state) { case SIW_QP_STATE_RTS: if (attrs->flags & SIW_MPA_CRC) { rv = siw_qp_enable_crc(qp); if (rv) break; } if (!(mask & SIW_QP_ATTR_LLP_HANDLE)) { siw_dbg_qp(qp, "no socket\n"); rv = -EINVAL; break; } if (!(mask & SIW_QP_ATTR_MPA)) { siw_dbg_qp(qp, "no MPA\n"); rv = -EINVAL; break; } /* * Initialize iWARP TX state */ qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 0; qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 0; qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 0; /* * Initialize iWARP RX state */ qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 1; qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 1; qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 1; /* * init IRD free queue, caller has already checked * limits. */ rv = siw_qp_readq_init(qp, attrs->irq_size, attrs->orq_size); if (rv) break; qp->attrs.sk = attrs->sk; qp->attrs.state = SIW_QP_STATE_RTS; siw_dbg_qp(qp, "enter RTS: crc=%s, ord=%u, ird=%u\n", attrs->flags & SIW_MPA_CRC ? "y" : "n", qp->attrs.orq_size, qp->attrs.irq_size); break; case SIW_QP_STATE_ERROR: siw_rq_flush(qp); qp->attrs.state = SIW_QP_STATE_ERROR; if (qp->cep) { siw_cep_put(qp->cep); qp->cep = NULL; } break; default: break; } return rv; } static int siw_qp_nextstate_from_rts(struct siw_qp *qp, struct siw_qp_attrs *attrs) { int drop_conn = 0; switch (attrs->state) { case SIW_QP_STATE_CLOSING: /* * Verbs: move to IDLE if SQ and ORQ are empty. * Move to ERROR otherwise. But first of all we must * close the connection. So we keep CLOSING or ERROR * as a transient state, schedule connection drop work * and wait for the socket state change upcall to * come back closed. */ if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) { qp->attrs.state = SIW_QP_STATE_CLOSING; } else { qp->attrs.state = SIW_QP_STATE_ERROR; siw_sq_flush(qp); } siw_rq_flush(qp); drop_conn = 1; break; case SIW_QP_STATE_TERMINATE: qp->attrs.state = SIW_QP_STATE_TERMINATE; siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP, RDMAP_ETYPE_CATASTROPHIC, RDMAP_ECODE_UNSPECIFIED, 1); drop_conn = 1; break; case SIW_QP_STATE_ERROR: /* * This is an emergency close. * * Any in progress transmit operation will get * cancelled. * This will likely result in a protocol failure, * if a TX operation is in transit. The caller * could unconditional wait to give the current * operation a chance to complete. * Esp., how to handle the non-empty IRQ case? * The peer was asking for data transfer at a valid * point in time. */ siw_sq_flush(qp); siw_rq_flush(qp); qp->attrs.state = SIW_QP_STATE_ERROR; drop_conn = 1; break; default: break; } return drop_conn; } static void siw_qp_nextstate_from_term(struct siw_qp *qp, struct siw_qp_attrs *attrs) { switch (attrs->state) { case SIW_QP_STATE_ERROR: siw_rq_flush(qp); qp->attrs.state = SIW_QP_STATE_ERROR; if (tx_wqe(qp)->wr_status != SIW_WR_IDLE) siw_sq_flush(qp); break; default: break; } } static int siw_qp_nextstate_from_close(struct siw_qp *qp, struct siw_qp_attrs *attrs) { int rv = 0; switch (attrs->state) { case SIW_QP_STATE_IDLE: WARN_ON(tx_wqe(qp)->wr_status != SIW_WR_IDLE); qp->attrs.state = SIW_QP_STATE_IDLE; break; case SIW_QP_STATE_CLOSING: /* * The LLP may already moved the QP to closing * due to graceful peer close init */ break; case SIW_QP_STATE_ERROR: /* * QP was moved to CLOSING by LLP event * not yet seen by user. */ qp->attrs.state = SIW_QP_STATE_ERROR; if (tx_wqe(qp)->wr_status != SIW_WR_IDLE) siw_sq_flush(qp); siw_rq_flush(qp); break; default: siw_dbg_qp(qp, "state transition undefined: %s => %s\n", siw_qp_state_to_string[qp->attrs.state], siw_qp_state_to_string[attrs->state]); rv = -ECONNABORTED; } return rv; } /* * Caller must hold qp->state_lock */ int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) { int drop_conn = 0, rv = 0; if (!mask) return 0; siw_dbg_qp(qp, "state: %s => %s\n", siw_qp_state_to_string[qp->attrs.state], siw_qp_state_to_string[attrs->state]); if (mask != SIW_QP_ATTR_STATE) siw_qp_modify_nonstate(qp, attrs, mask); if (!(mask & SIW_QP_ATTR_STATE)) return 0; switch (qp->attrs.state) { case SIW_QP_STATE_IDLE: case SIW_QP_STATE_RTR: rv = siw_qp_nextstate_from_idle(qp, attrs, mask); break; case SIW_QP_STATE_RTS: drop_conn = siw_qp_nextstate_from_rts(qp, attrs); break; case SIW_QP_STATE_TERMINATE: siw_qp_nextstate_from_term(qp, attrs); break; case SIW_QP_STATE_CLOSING: siw_qp_nextstate_from_close(qp, attrs); break; default: break; } if (drop_conn) siw_qp_cm_drop(qp, 0); return rv; } void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe) { rreq->id = sqe->id; rreq->opcode = sqe->opcode; rreq->sge[0].laddr = sqe->sge[0].laddr; rreq->sge[0].length = sqe->sge[0].length; rreq->sge[0].lkey = sqe->sge[0].lkey; rreq->sge[1].lkey = sqe->sge[1].lkey; rreq->flags = sqe->flags | SIW_WQE_VALID; rreq->num_sge = 1; } static int siw_activate_tx_from_sq(struct siw_qp *qp) { struct siw_sqe *sqe; struct siw_wqe *wqe = tx_wqe(qp); int rv = 1; sqe = sq_get_next(qp); if (!sqe) return 0; memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); wqe->wr_status = SIW_WR_QUEUED; /* First copy SQE to kernel private memory */ memcpy(&wqe->sqe, sqe, sizeof(*sqe)); if (wqe->sqe.opcode >= SIW_NUM_OPCODES) { rv = -EINVAL; goto out; } if (wqe->sqe.flags & SIW_WQE_INLINE) { if (wqe->sqe.opcode != SIW_OP_SEND && wqe->sqe.opcode != SIW_OP_WRITE) { rv = -EINVAL; goto out; } if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) { rv = -EINVAL; goto out; } wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1]; wqe->sqe.sge[0].lkey = 0; wqe->sqe.num_sge = 1; } if (wqe->sqe.flags & SIW_WQE_READ_FENCE) { /* A READ cannot be fenced */ if (unlikely(wqe->sqe.opcode == SIW_OP_READ || wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV)) { siw_dbg_qp(qp, "cannot fence read\n"); rv = -EINVAL; goto out; } spin_lock(&qp->orq_lock); if (qp->attrs.orq_size && !siw_orq_empty(qp)) { qp->tx_ctx.orq_fence = 1; rv = 0; } spin_unlock(&qp->orq_lock); } else if (wqe->sqe.opcode == SIW_OP_READ || wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) { struct siw_sqe *rreq; if (unlikely(!qp->attrs.orq_size)) { /* We negotiated not to send READ req's */ rv = -EINVAL; goto out; } wqe->sqe.num_sge = 1; spin_lock(&qp->orq_lock); rreq = orq_get_free(qp); if (rreq) { /* * Make an immediate copy in ORQ to be ready * to process loopback READ reply */ siw_read_to_orq(rreq, &wqe->sqe); qp->orq_put++; } else { qp->tx_ctx.orq_fence = 1; rv = 0; } spin_unlock(&qp->orq_lock); } /* Clear SQE, can be re-used by application */ smp_store_mb(sqe->flags, 0); qp->sq_get++; out: if (unlikely(rv < 0)) { siw_dbg_qp(qp, "error %d\n", rv); wqe->wr_status = SIW_WR_IDLE; } return rv; } /* * Must be called with SQ locked. * To avoid complete SQ starvation by constant inbound READ requests, * the active IRQ will not be served after qp->irq_burst, if the * SQ has pending work. */ int siw_activate_tx(struct siw_qp *qp) { struct siw_sqe *irqe; struct siw_wqe *wqe = tx_wqe(qp); if (!qp->attrs.irq_size) return siw_activate_tx_from_sq(qp); irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size]; if (!(irqe->flags & SIW_WQE_VALID)) return siw_activate_tx_from_sq(qp); /* * Avoid local WQE processing starvation in case * of constant inbound READ request stream */ if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) { qp->irq_burst = 0; return siw_activate_tx_from_sq(qp); } memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); wqe->wr_status = SIW_WR_QUEUED; /* start READ RESPONSE */ wqe->sqe.opcode = SIW_OP_READ_RESPONSE; wqe->sqe.flags = 0; if (irqe->num_sge) { wqe->sqe.num_sge = 1; wqe->sqe.sge[0].length = irqe->sge[0].length; wqe->sqe.sge[0].laddr = irqe->sge[0].laddr; wqe->sqe.sge[0].lkey = irqe->sge[0].lkey; } else { wqe->sqe.num_sge = 0; } /* Retain original RREQ's message sequence number for * potential error reporting cases. */ wqe->sqe.sge[1].length = irqe->sge[1].length; wqe->sqe.rkey = irqe->rkey; wqe->sqe.raddr = irqe->raddr; wqe->processed = 0; qp->irq_get++; /* mark current IRQ entry free */ smp_store_mb(irqe->flags, 0); return 1; } /* * Check if current CQ state qualifies for calling CQ completion * handler. Must be called with CQ lock held. */ static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) { u32 cq_notify; if (!cq->base_cq.comp_handler) return false; /* Read application shared notification state */ cq_notify = READ_ONCE(cq->notify->flags); if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) || ((cq_notify & SIW_NOTIFY_SOLICITED) && (flags & SIW_WQE_SOLICITED))) { /* * CQ notification is one-shot: Since the * current CQE causes user notification, * the CQ gets dis-aremd and must be re-aremd * by the user for a new notification. */ WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT); return true; } return false; } int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, enum siw_wc_status status) { struct siw_cq *cq = qp->scq; int rv = 0; if (cq) { u32 sqe_flags = sqe->flags; struct siw_cqe *cqe; u32 idx; unsigned long flags; spin_lock_irqsave(&cq->lock, flags); idx = cq->cq_put % cq->num_cqe; cqe = &cq->queue[idx]; if (!READ_ONCE(cqe->flags)) { bool notify; cqe->id = sqe->id; cqe->opcode = sqe->opcode; cqe->status = status; cqe->imm_data = 0; cqe->bytes = bytes; if (rdma_is_kernel_res(&cq->base_cq.res)) cqe->base_qp = &qp->base_qp; else cqe->qp_id = qp_id(qp); /* mark CQE valid for application */ WRITE_ONCE(cqe->flags, SIW_WQE_VALID); /* recycle SQE */ smp_store_mb(sqe->flags, 0); cq->cq_put++; notify = siw_cq_notify_now(cq, sqe_flags); spin_unlock_irqrestore(&cq->lock, flags); if (notify) { siw_dbg_cq(cq, "Call completion handler\n"); cq->base_cq.comp_handler(&cq->base_cq, cq->base_cq.cq_context); } } else { spin_unlock_irqrestore(&cq->lock, flags); rv = -ENOMEM; siw_cq_event(cq, IB_EVENT_CQ_ERR); } } else { /* recycle SQE */ smp_store_mb(sqe->flags, 0); } return rv; } int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes, u32 inval_stag, enum siw_wc_status status) { struct siw_cq *cq = qp->rcq; int rv = 0; if (cq) { struct siw_cqe *cqe; u32 idx; unsigned long flags; spin_lock_irqsave(&cq->lock, flags); idx = cq->cq_put % cq->num_cqe; cqe = &cq->queue[idx]; if (!READ_ONCE(cqe->flags)) { bool notify; u8 cqe_flags = SIW_WQE_VALID; cqe->id = rqe->id; cqe->opcode = SIW_OP_RECEIVE; cqe->status = status; cqe->imm_data = 0; cqe->bytes = bytes; if (rdma_is_kernel_res(&cq->base_cq.res)) { cqe->base_qp = &qp->base_qp; if (inval_stag) { cqe_flags |= SIW_WQE_REM_INVAL; cqe->inval_stag = inval_stag; } } else { cqe->qp_id = qp_id(qp); } /* mark CQE valid for application */ WRITE_ONCE(cqe->flags, cqe_flags); /* recycle RQE */ smp_store_mb(rqe->flags, 0); cq->cq_put++; notify = siw_cq_notify_now(cq, SIW_WQE_SIGNALLED); spin_unlock_irqrestore(&cq->lock, flags); if (notify) { siw_dbg_cq(cq, "Call completion handler\n"); cq->base_cq.comp_handler(&cq->base_cq, cq->base_cq.cq_context); } } else { spin_unlock_irqrestore(&cq->lock, flags); rv = -ENOMEM; siw_cq_event(cq, IB_EVENT_CQ_ERR); } } else { /* recycle RQE */ smp_store_mb(rqe->flags, 0); } return rv; } /* * siw_sq_flush() * * Flush SQ and ORRQ entries to CQ. * * Must be called with QP state write lock held. * Therefore, SQ and ORQ lock must not be taken. */ void siw_sq_flush(struct siw_qp *qp) { struct siw_sqe *sqe; struct siw_wqe *wqe = tx_wqe(qp); int async_event = 0; /* * Start with completing any work currently on the ORQ */ while (qp->attrs.orq_size) { sqe = &qp->orq[qp->orq_get % qp->attrs.orq_size]; if (!READ_ONCE(sqe->flags)) break; if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0) break; WRITE_ONCE(sqe->flags, 0); qp->orq_get++; } /* * Flush an in-progress WQE if present */ if (wqe->wr_status != SIW_WR_IDLE) { siw_dbg_qp(qp, "flush current SQE, type %d, status %d\n", tx_type(wqe), wqe->wr_status); siw_wqe_put_mem(wqe, tx_type(wqe)); if (tx_type(wqe) != SIW_OP_READ_RESPONSE && ((tx_type(wqe) != SIW_OP_READ && tx_type(wqe) != SIW_OP_READ_LOCAL_INV) || wqe->wr_status == SIW_WR_QUEUED)) /* * An in-progress Read Request is already in * the ORQ */ siw_sqe_complete(qp, &wqe->sqe, wqe->bytes, SIW_WC_WR_FLUSH_ERR); wqe->wr_status = SIW_WR_IDLE; } /* * Flush the Send Queue */ while (qp->attrs.sq_size) { sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; if (!READ_ONCE(sqe->flags)) break; async_event = 1; if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0) /* * Shall IB_EVENT_SQ_DRAINED be supressed if work * completion fails? */ break; WRITE_ONCE(sqe->flags, 0); qp->sq_get++; } if (async_event) siw_qp_event(qp, IB_EVENT_SQ_DRAINED); } /* * siw_rq_flush() * * Flush recv queue entries to CQ. Also * takes care of pending active tagged and untagged * inbound transfers, which have target memory * referenced. * * Must be called with QP state write lock held. * Therefore, RQ lock must not be taken. */ void siw_rq_flush(struct siw_qp *qp) { struct siw_wqe *wqe = &qp->rx_untagged.wqe_active; /* * Flush an in-progress untagged operation if present */ if (wqe->wr_status != SIW_WR_IDLE) { siw_dbg_qp(qp, "flush current rqe, type %d, status %d\n", rx_type(wqe), wqe->wr_status); siw_wqe_put_mem(wqe, rx_type(wqe)); if (rx_type(wqe) == SIW_OP_RECEIVE) { siw_rqe_complete(qp, &wqe->rqe, wqe->bytes, 0, SIW_WC_WR_FLUSH_ERR); } else if (rx_type(wqe) != SIW_OP_READ && rx_type(wqe) != SIW_OP_READ_RESPONSE && rx_type(wqe) != SIW_OP_WRITE) { siw_sqe_complete(qp, &wqe->sqe, 0, SIW_WC_WR_FLUSH_ERR); } wqe->wr_status = SIW_WR_IDLE; } wqe = &qp->rx_tagged.wqe_active; if (wqe->wr_status != SIW_WR_IDLE) { siw_wqe_put_mem(wqe, rx_type(wqe)); wqe->wr_status = SIW_WR_IDLE; } /* * Flush the Receive Queue */ while (qp->attrs.rq_size) { struct siw_rqe *rqe = &qp->recvq[qp->rq_get % qp->attrs.rq_size]; if (!READ_ONCE(rqe->flags)) break; if (siw_rqe_complete(qp, rqe, 0, 0, SIW_WC_WR_FLUSH_ERR) != 0) break; WRITE_ONCE(rqe->flags, 0); qp->rq_get++; } } int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp) { int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b, GFP_KERNEL); if (!rv) { kref_init(&qp->ref); qp->sdev = sdev; siw_dbg_qp(qp, "new QP\n"); } return rv; } void siw_free_qp(struct kref *ref) { struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref); struct siw_device *sdev = qp->sdev; unsigned long flags; if (qp->cep) siw_cep_put(qp->cep); found = xa_erase(&sdev->qp_xa, qp_id(qp)); WARN_ON(found != qp); spin_lock_irqsave(&sdev->lock, flags); list_del(&qp->devq); spin_unlock_irqrestore(&sdev->lock, flags); vfree(qp->sendq); vfree(qp->recvq); vfree(qp->irq); vfree(qp->orq); siw_put_tx_cpu(qp->tx_cpu); complete(&qp->qp_free); atomic_dec(&sdev->num_qp); }
linux-master
drivers/infiniband/sw/siw/siw_qp.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ #include <linux/gfp.h> #include <rdma/ib_verbs.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/sched/mm.h> #include <linux/resource.h> #include "siw.h" #include "siw_mem.h" /* * Stag lookup is based on its index part only (24 bits). * The code avoids special Stag of zero and tries to randomize * STag values between 1 and SIW_STAG_MAX_INDEX. */ int siw_mem_add(struct siw_device *sdev, struct siw_mem *m) { struct xa_limit limit = XA_LIMIT(1, 0x00ffffff); u32 id, next; get_random_bytes(&next, 4); next &= 0x00ffffff; if (xa_alloc_cyclic(&sdev->mem_xa, &id, m, limit, &next, GFP_KERNEL) < 0) return -ENOMEM; /* Set the STag index part */ m->stag = id << 8; siw_dbg_mem(m, "new MEM object\n"); return 0; } /* * siw_mem_id2obj() * * resolves memory from stag given by id. might be called from: * o process context before sending out of sgl, or * o in softirq when resolving target memory */ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index) { struct siw_mem *mem; rcu_read_lock(); mem = xa_load(&sdev->mem_xa, stag_index); if (likely(mem && kref_get_unless_zero(&mem->ref))) { rcu_read_unlock(); return mem; } rcu_read_unlock(); return NULL; } static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages, bool dirty) { unpin_user_pages_dirty_lock(chunk->plist, num_pages, dirty); } void siw_umem_release(struct siw_umem *umem, bool dirty) { struct mm_struct *mm_s = umem->owning_mm; int i, num_pages = umem->num_pages; for (i = 0; num_pages; i++) { int to_free = min_t(int, PAGES_PER_CHUNK, num_pages); siw_free_plist(&umem->page_chunk[i], to_free, umem->writable && dirty); kfree(umem->page_chunk[i].plist); num_pages -= to_free; } atomic64_sub(umem->num_pages, &mm_s->pinned_vm); mmdrop(mm_s); kfree(umem->page_chunk); kfree(umem); } int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj, u64 start, u64 len, int rights) { struct siw_device *sdev = to_siw_dev(pd->device); struct siw_mem *mem = kzalloc(sizeof(*mem), GFP_KERNEL); struct xa_limit limit = XA_LIMIT(1, 0x00ffffff); u32 id, next; if (!mem) return -ENOMEM; mem->mem_obj = mem_obj; mem->stag_valid = 0; mem->sdev = sdev; mem->va = start; mem->len = len; mem->pd = pd; mem->perms = rights & IWARP_ACCESS_MASK; kref_init(&mem->ref); get_random_bytes(&next, 4); next &= 0x00ffffff; if (xa_alloc_cyclic(&sdev->mem_xa, &id, mem, limit, &next, GFP_KERNEL) < 0) { kfree(mem); return -ENOMEM; } mr->mem = mem; /* Set the STag index part */ mem->stag = id << 8; mr->base_mr.lkey = mr->base_mr.rkey = mem->stag; return 0; } void siw_mr_drop_mem(struct siw_mr *mr) { struct siw_mem *mem = mr->mem, *found; mem->stag_valid = 0; /* make STag invalid visible asap */ smp_mb(); found = xa_erase(&mem->sdev->mem_xa, mem->stag >> 8); WARN_ON(found != mem); siw_mem_put(mem); } void siw_free_mem(struct kref *ref) { struct siw_mem *mem = container_of(ref, struct siw_mem, ref); siw_dbg_mem(mem, "free mem, pbl: %s\n", mem->is_pbl ? "y" : "n"); if (!mem->is_mw && mem->mem_obj) { if (mem->is_pbl == 0) siw_umem_release(mem->umem, true); else kfree(mem->pbl); } kfree(mem); } /* * siw_check_mem() * * Check protection domain, STAG state, access permissions and * address range for memory object. * * @pd: Protection Domain memory should belong to * @mem: memory to be checked * @addr: starting addr of mem * @perms: requested access permissions * @len: len of memory interval to be checked * */ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr, enum ib_access_flags perms, int len) { if (!mem->stag_valid) { siw_dbg_pd(pd, "STag 0x%08x invalid\n", mem->stag); return -E_STAG_INVALID; } if (mem->pd != pd) { siw_dbg_pd(pd, "STag 0x%08x: PD mismatch\n", mem->stag); return -E_PD_MISMATCH; } /* * check access permissions */ if ((mem->perms & perms) < perms) { siw_dbg_pd(pd, "permissions 0x%08x < 0x%08x\n", mem->perms, perms); return -E_ACCESS_PERM; } /* * Check if access falls into valid memory interval. */ if (addr < mem->va || addr + len > mem->va + mem->len) { siw_dbg_pd(pd, "MEM interval len %d\n", len); siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n", (void *)(uintptr_t)addr, (void *)(uintptr_t)(addr + len)); siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n", (void *)(uintptr_t)mem->va, (void *)(uintptr_t)(mem->va + mem->len), mem->stag); return -E_BASE_BOUNDS; } return E_ACCESS_OK; } /* * siw_check_sge() * * Check SGE for access rights in given interval * * @pd: Protection Domain memory should belong to * @sge: SGE to be checked * @mem: location of memory reference within array * @perms: requested access permissions * @off: starting offset in SGE * @len: len of memory interval to be checked * * NOTE: Function references SGE's memory object (mem->obj) * if not yet done. New reference is kept if check went ok and * released if check failed. If mem->obj is already valid, no new * lookup is being done and mem is not released it check fails. */ int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge, struct siw_mem *mem[], enum ib_access_flags perms, u32 off, int len) { struct siw_device *sdev = to_siw_dev(pd->device); struct siw_mem *new = NULL; int rv = E_ACCESS_OK; if (len + off > sge->length) { rv = -E_BASE_BOUNDS; goto fail; } if (*mem == NULL) { new = siw_mem_id2obj(sdev, sge->lkey >> 8); if (unlikely(!new)) { siw_dbg_pd(pd, "STag unknown: 0x%08x\n", sge->lkey); rv = -E_STAG_INVALID; goto fail; } *mem = new; } /* Check if user re-registered with different STag key */ if (unlikely((*mem)->stag != sge->lkey)) { siw_dbg_mem((*mem), "STag mismatch: 0x%08x\n", sge->lkey); rv = -E_STAG_INVALID; goto fail; } rv = siw_check_mem(pd, *mem, sge->laddr + off, perms, len); if (unlikely(rv)) goto fail; return 0; fail: if (new) { *mem = NULL; siw_mem_put(new); } return rv; } void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op) { switch (op) { case SIW_OP_SEND: case SIW_OP_WRITE: case SIW_OP_SEND_WITH_IMM: case SIW_OP_SEND_REMOTE_INV: case SIW_OP_READ: case SIW_OP_READ_LOCAL_INV: if (!(wqe->sqe.flags & SIW_WQE_INLINE)) siw_unref_mem_sgl(wqe->mem, wqe->sqe.num_sge); break; case SIW_OP_RECEIVE: siw_unref_mem_sgl(wqe->mem, wqe->rqe.num_sge); break; case SIW_OP_READ_RESPONSE: siw_unref_mem_sgl(wqe->mem, 1); break; default: /* * SIW_OP_INVAL_STAG and SIW_OP_REG_MR * do not hold memory references */ break; } } int siw_invalidate_stag(struct ib_pd *pd, u32 stag) { struct siw_device *sdev = to_siw_dev(pd->device); struct siw_mem *mem = siw_mem_id2obj(sdev, stag >> 8); int rv = 0; if (unlikely(!mem)) { siw_dbg_pd(pd, "STag 0x%08x unknown\n", stag); return -EINVAL; } if (unlikely(mem->pd != pd)) { siw_dbg_pd(pd, "PD mismatch for STag 0x%08x\n", stag); rv = -EACCES; goto out; } /* * Per RDMA verbs definition, an STag may already be in invalid * state if invalidation is requested. So no state check here. */ mem->stag_valid = 0; siw_dbg_pd(pd, "STag 0x%08x now invalid\n", stag); out: siw_mem_put(mem); return rv; } /* * Gets physical address backed by PBL element. Address is referenced * by linear byte offset into list of variably sized PB elements. * Optionally, provides remaining len within current element, and * current PBL index for later resume at same element. */ dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx) { int i = idx ? *idx : 0; while (i < pbl->num_buf) { struct siw_pble *pble = &pbl->pbe[i]; if (pble->pbl_off + pble->size > off) { u64 pble_off = off - pble->pbl_off; if (len) *len = pble->size - pble_off; if (idx) *idx = i; return pble->addr + pble_off; } i++; } if (len) *len = 0; return 0; } struct siw_pbl *siw_pbl_alloc(u32 num_buf) { struct siw_pbl *pbl; if (num_buf == 0) return ERR_PTR(-EINVAL); pbl = kzalloc(struct_size(pbl, pbe, num_buf), GFP_KERNEL); if (!pbl) return ERR_PTR(-ENOMEM); pbl->max_buf = num_buf; return pbl; } struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) { struct siw_umem *umem; struct mm_struct *mm_s; u64 first_page_va; unsigned long mlock_limit; unsigned int foll_flags = FOLL_LONGTERM; int num_pages, num_chunks, i, rv = 0; if (!can_do_mlock()) return ERR_PTR(-EPERM); if (!len) return ERR_PTR(-EINVAL); first_page_va = start & PAGE_MASK; num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT; num_chunks = (num_pages >> CHUNK_SHIFT) + 1; umem = kzalloc(sizeof(*umem), GFP_KERNEL); if (!umem) return ERR_PTR(-ENOMEM); mm_s = current->mm; umem->owning_mm = mm_s; umem->writable = writable; mmgrab(mm_s); if (writable) foll_flags |= FOLL_WRITE; mmap_read_lock(mm_s); mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (atomic64_add_return(num_pages, &mm_s->pinned_vm) > mlock_limit) { rv = -ENOMEM; goto out_sem_up; } umem->fp_addr = first_page_va; umem->page_chunk = kcalloc(num_chunks, sizeof(struct siw_page_chunk), GFP_KERNEL); if (!umem->page_chunk) { rv = -ENOMEM; goto out_sem_up; } for (i = 0; num_pages; i++) { int nents = min_t(int, num_pages, PAGES_PER_CHUNK); struct page **plist = kcalloc(nents, sizeof(struct page *), GFP_KERNEL); if (!plist) { rv = -ENOMEM; goto out_sem_up; } umem->page_chunk[i].plist = plist; while (nents) { rv = pin_user_pages(first_page_va, nents, foll_flags, plist); if (rv < 0) goto out_sem_up; umem->num_pages += rv; first_page_va += rv * PAGE_SIZE; plist += rv; nents -= rv; num_pages -= rv; } } out_sem_up: mmap_read_unlock(mm_s); if (rv > 0) return umem; /* Adjust accounting for pages not pinned */ if (num_pages) atomic64_sub(num_pages, &mm_s->pinned_vm); siw_umem_release(umem, false); return ERR_PTR(rv); }
linux-master
drivers/infiniband/sw/siw/siw_mem.c
/* * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2014 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <rdma/ib_smi.h> #include "smi.h" #include "opa_smi.h" static enum smi_action __smi_handle_dr_smp_send(bool is_switch, u32 port_num, u8 *hop_ptr, u8 hop_cnt, const u8 *initial_path, const u8 *return_path, u8 direction, bool dr_dlid_is_permissive, bool dr_slid_is_permissive) { /* See section 14.2.2.2, Vol 1 IB spec */ /* C14-6 -- valid hop_cnt values are from 0 to 63 */ if (hop_cnt >= IB_SMP_MAX_PATH_HOPS) return IB_SMI_DISCARD; if (!direction) { /* C14-9:1 */ if (hop_cnt && *hop_ptr == 0) { (*hop_ptr)++; return (initial_path[*hop_ptr] == port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:2 */ if (*hop_ptr && *hop_ptr < hop_cnt) { if (!is_switch) return IB_SMI_DISCARD; /* return_path set when received */ (*hop_ptr)++; return (initial_path[*hop_ptr] == port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:3 -- We're at the end of the DR segment of path */ if (*hop_ptr == hop_cnt) { /* return_path set when received */ (*hop_ptr)++; return (is_switch || dr_dlid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ /* C14-9:5 -- Fail unreasonable hop pointer */ return (*hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD); } else { /* C14-13:1 */ if (hop_cnt && *hop_ptr == hop_cnt + 1) { (*hop_ptr)--; return (return_path[*hop_ptr] == port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:2 */ if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { if (!is_switch) return IB_SMI_DISCARD; (*hop_ptr)--; return (return_path[*hop_ptr] == port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:3 -- at the end of the DR segment of path */ if (*hop_ptr == 1) { (*hop_ptr)--; /* C14-13:3 -- SMPs destined for SM shouldn't be here */ return (is_switch || dr_slid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:4 -- hop_ptr = 0 -> should have gone to SM */ if (*hop_ptr == 0) return IB_SMI_HANDLE; /* C14-13:5 -- Check for unreasonable hop pointer */ return IB_SMI_DISCARD; } } /* * Fixup a directed route SMP for sending * Return IB_SMI_DISCARD if the SMP should be discarded */ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, bool is_switch, u32 port_num) { return __smi_handle_dr_smp_send(is_switch, port_num, &smp->hop_ptr, smp->hop_cnt, smp->initial_path, smp->return_path, ib_get_smp_direction(smp), smp->dr_dlid == IB_LID_PERMISSIVE, smp->dr_slid == IB_LID_PERMISSIVE); } enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, bool is_switch, u32 port_num) { return __smi_handle_dr_smp_send(is_switch, port_num, &smp->hop_ptr, smp->hop_cnt, smp->route.dr.initial_path, smp->route.dr.return_path, opa_get_smp_direction(smp), smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE, smp->route.dr.dr_slid == OPA_LID_PERMISSIVE); } static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, u32 port_num, int phys_port_cnt, u8 *hop_ptr, u8 hop_cnt, const u8 *initial_path, u8 *return_path, u8 direction, bool dr_dlid_is_permissive, bool dr_slid_is_permissive) { /* See section 14.2.2.2, Vol 1 IB spec */ /* C14-6 -- valid hop_cnt values are from 0 to 63 */ if (hop_cnt >= IB_SMP_MAX_PATH_HOPS) return IB_SMI_DISCARD; if (!direction) { /* C14-9:1 -- sender should have incremented hop_ptr */ if (hop_cnt && *hop_ptr == 0) return IB_SMI_DISCARD; /* C14-9:2 -- intermediate hop */ if (*hop_ptr && *hop_ptr < hop_cnt) { if (!is_switch) return IB_SMI_DISCARD; return_path[*hop_ptr] = port_num; /* hop_ptr updated when sending */ return (initial_path[*hop_ptr+1] <= phys_port_cnt ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:3 -- We're at the end of the DR segment of path */ if (*hop_ptr == hop_cnt) { if (hop_cnt) return_path[*hop_ptr] = port_num; /* hop_ptr updated when sending */ return (is_switch || dr_dlid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ /* C14-9:5 -- fail unreasonable hop pointer */ return (*hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD); } else { /* C14-13:1 */ if (hop_cnt && *hop_ptr == hop_cnt + 1) { (*hop_ptr)--; return (return_path[*hop_ptr] == port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:2 */ if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { if (!is_switch) return IB_SMI_DISCARD; /* hop_ptr updated when sending */ return (return_path[*hop_ptr-1] <= phys_port_cnt ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:3 -- We're at the end of the DR segment of path */ if (*hop_ptr == 1) { if (dr_slid_is_permissive) { /* giving SMP to SM - update hop_ptr */ (*hop_ptr)--; return IB_SMI_HANDLE; } /* hop_ptr updated when sending */ return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:4 -- hop_ptr = 0 -> give to SM */ /* C14-13:5 -- Check for unreasonable hop pointer */ return (*hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD); } } /* * Adjust information for a received SMP * Return IB_SMI_DISCARD if the SMP should be dropped */ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, u32 port_num, int phys_port_cnt) { return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, &smp->hop_ptr, smp->hop_cnt, smp->initial_path, smp->return_path, ib_get_smp_direction(smp), smp->dr_dlid == IB_LID_PERMISSIVE, smp->dr_slid == IB_LID_PERMISSIVE); } /* * Adjust information for a received SMP * Return IB_SMI_DISCARD if the SMP should be dropped */ enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, u32 port_num, int phys_port_cnt) { return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, &smp->hop_ptr, smp->hop_cnt, smp->route.dr.initial_path, smp->route.dr.return_path, opa_get_smp_direction(smp), smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE, smp->route.dr.dr_slid == OPA_LID_PERMISSIVE); } static enum smi_forward_action __smi_check_forward_dr_smp(u8 hop_ptr, u8 hop_cnt, u8 direction, bool dr_dlid_is_permissive, bool dr_slid_is_permissive) { if (!direction) { /* C14-9:2 -- intermediate hop */ if (hop_ptr && hop_ptr < hop_cnt) return IB_SMI_FORWARD; /* C14-9:3 -- at the end of the DR segment of path */ if (hop_ptr == hop_cnt) return (dr_dlid_is_permissive ? IB_SMI_SEND : IB_SMI_LOCAL); /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ if (hop_ptr == hop_cnt + 1) return IB_SMI_SEND; } else { /* C14-13:2 -- intermediate hop */ if (2 <= hop_ptr && hop_ptr <= hop_cnt) return IB_SMI_FORWARD; /* C14-13:3 -- at the end of the DR segment of path */ if (hop_ptr == 1) return (!dr_slid_is_permissive ? IB_SMI_SEND : IB_SMI_LOCAL); } return IB_SMI_LOCAL; } enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp) { return __smi_check_forward_dr_smp(smp->hop_ptr, smp->hop_cnt, ib_get_smp_direction(smp), smp->dr_dlid == IB_LID_PERMISSIVE, smp->dr_slid == IB_LID_PERMISSIVE); } enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp) { return __smi_check_forward_dr_smp(smp->hop_ptr, smp->hop_cnt, opa_get_smp_direction(smp), smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE, smp->route.dr.dr_slid == OPA_LID_PERMISSIVE); } /* * Return the forwarding port number from initial_path for outgoing SMP and * from return_path for returning SMP */ int smi_get_fwd_port(struct ib_smp *smp) { return (!ib_get_smp_direction(smp) ? smp->initial_path[smp->hop_ptr+1] : smp->return_path[smp->hop_ptr-1]); } /* * Return the forwarding port number from initial_path for outgoing SMP and * from return_path for returning SMP */ int opa_smi_get_fwd_port(struct opa_smp *smp) { return !opa_get_smp_direction(smp) ? smp->route.dr.initial_path[smp->hop_ptr+1] : smp->route.dr.return_path[smp->hop_ptr-1]; }
linux-master
drivers/infiniband/core/smi.c
/* * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/sysctl.h> #include <rdma/iw_cm.h> #include <rdma/ib_addr.h> #include <rdma/iw_portmap.h> #include <rdma/rdma_netlink.h> #include "iwcm.h" MODULE_AUTHOR("Tom Tucker"); MODULE_DESCRIPTION("iWARP CM"); MODULE_LICENSE("Dual BSD/GPL"); static const char * const iwcm_rej_reason_strs[] = { [ECONNRESET] = "reset by remote host", [ECONNREFUSED] = "refused by remote application", [ETIMEDOUT] = "setup timeout", }; const char *__attribute_const__ iwcm_reject_msg(int reason) { size_t index; /* iWARP uses negative errnos */ index = -reason; if (index < ARRAY_SIZE(iwcm_rej_reason_strs) && iwcm_rej_reason_strs[index]) return iwcm_rej_reason_strs[index]; else return "unrecognized reason"; } EXPORT_SYMBOL(iwcm_reject_msg); static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = { [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}, [RDMA_NL_IWPM_HELLO] = {.dump = iwpm_hello_cb} }; static struct workqueue_struct *iwcm_wq; struct iwcm_work { struct work_struct work; struct iwcm_id_private *cm_id; struct list_head list; struct iw_cm_event event; struct list_head free_list; }; static unsigned int default_backlog = 256; static struct ctl_table_header *iwcm_ctl_table_hdr; static struct ctl_table iwcm_ctl_table[] = { { .procname = "default_backlog", .data = &default_backlog, .maxlen = sizeof(default_backlog), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; /* * The following services provide a mechanism for pre-allocating iwcm_work * elements. The design pre-allocates them based on the cm_id type: * LISTENING IDS: Get enough elements preallocated to handle the * listen backlog. * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE * * Allocating them in connect and listen avoids having to deal * with allocation failures on the event upcall from the provider (which * is called in the interrupt context). * * One exception is when creating the cm_id for incoming connection requests. * There are two cases: * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If * the backlog is exceeded, then no more connection request events will * be processed. cm_event_handler() returns -ENOMEM in this case. Its up * to the provider to reject the connection request. * 2) in the connection request workqueue handler, cm_conn_req_handler(). * If work elements cannot be allocated for the new connect request cm_id, * then IWCM will call the provider reject method. This is ok since * cm_conn_req_handler() runs in the workqueue thread context. */ static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) { struct iwcm_work *work; if (list_empty(&cm_id_priv->work_free_list)) return NULL; work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, free_list); list_del_init(&work->free_list); return work; } static void put_work(struct iwcm_work *work) { list_add(&work->free_list, &work->cm_id->work_free_list); } static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) { struct list_head *e, *tmp; list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) { list_del(e); kfree(list_entry(e, struct iwcm_work, free_list)); } } static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) { struct iwcm_work *work; BUG_ON(!list_empty(&cm_id_priv->work_free_list)); while (count--) { work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); if (!work) { dealloc_work_entries(cm_id_priv); return -ENOMEM; } work->cm_id = cm_id_priv; INIT_LIST_HEAD(&work->list); put_work(work); } return 0; } /* * Save private data from incoming connection requests to * iw_cm_event, so the low level driver doesn't have to. Adjust * the event ptr to point to the local copy. */ static int copy_private_data(struct iw_cm_event *event) { void *p; p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC); if (!p) return -ENOMEM; event->private_data = p; return 0; } static void free_cm_id(struct iwcm_id_private *cm_id_priv) { dealloc_work_entries(cm_id_priv); kfree(cm_id_priv); } /* * Release a reference on cm_id. If the last reference is being * released, free the cm_id and return 1. */ static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) { if (refcount_dec_and_test(&cm_id_priv->refcount)) { BUG_ON(!list_empty(&cm_id_priv->work_list)); free_cm_id(cm_id_priv); return 1; } return 0; } static void add_ref(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); refcount_inc(&cm_id_priv->refcount); } static void rem_ref(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); (void)iwcm_deref_id(cm_id_priv); } static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); struct iw_cm_id *iw_create_cm_id(struct ib_device *device, iw_cm_handler cm_handler, void *context) { struct iwcm_id_private *cm_id_priv; cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL); if (!cm_id_priv) return ERR_PTR(-ENOMEM); cm_id_priv->state = IW_CM_STATE_IDLE; cm_id_priv->id.device = device; cm_id_priv->id.cm_handler = cm_handler; cm_id_priv->id.context = context; cm_id_priv->id.event_handler = cm_event_handler; cm_id_priv->id.add_ref = add_ref; cm_id_priv->id.rem_ref = rem_ref; spin_lock_init(&cm_id_priv->lock); refcount_set(&cm_id_priv->refcount, 1); init_waitqueue_head(&cm_id_priv->connect_wait); init_completion(&cm_id_priv->destroy_comp); INIT_LIST_HEAD(&cm_id_priv->work_list); INIT_LIST_HEAD(&cm_id_priv->work_free_list); return &cm_id_priv->id; } EXPORT_SYMBOL(iw_create_cm_id); static int iwcm_modify_qp_err(struct ib_qp *qp) { struct ib_qp_attr qp_attr; if (!qp) return -EINVAL; qp_attr.qp_state = IB_QPS_ERR; return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); } /* * This is really the RDMAC CLOSING state. It is most similar to the * IB SQD QP state. */ static int iwcm_modify_qp_sqd(struct ib_qp *qp) { struct ib_qp_attr qp_attr; BUG_ON(qp == NULL); qp_attr.qp_state = IB_QPS_SQD; return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); } /* * CM_ID <-- CLOSING * * Block if a passive or active connection is currently being processed. Then * process the event as follows: * - If we are ESTABLISHED, move to CLOSING and modify the QP state * based on the abrupt flag * - If the connection is already in the CLOSING or IDLE state, the peer is * disconnecting concurrently with us and we've already seen the * DISCONNECT event -- ignore the request and return 0 * - Disconnect on a listening endpoint returns -EINVAL */ int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) { struct iwcm_id_private *cm_id_priv; unsigned long flags; int ret = 0; struct ib_qp *qp = NULL; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); /* Wait if we're currently in a connect or accept downcall */ wait_event(cm_id_priv->connect_wait, !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->state) { case IW_CM_STATE_ESTABLISHED: cm_id_priv->state = IW_CM_STATE_CLOSING; /* QP could be <nul> for user-mode client */ if (cm_id_priv->qp) qp = cm_id_priv->qp; else ret = -EINVAL; break; case IW_CM_STATE_LISTEN: ret = -EINVAL; break; case IW_CM_STATE_CLOSING: /* remote peer closed first */ case IW_CM_STATE_IDLE: /* accept or connect returned !0 */ break; case IW_CM_STATE_CONN_RECV: /* * App called disconnect before/without calling accept after * connect_request event delivered. */ break; case IW_CM_STATE_CONN_SENT: /* Can only get here if wait above fails */ default: BUG(); } spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (qp) { if (abrupt) ret = iwcm_modify_qp_err(qp); else ret = iwcm_modify_qp_sqd(qp); /* * If both sides are disconnecting the QP could * already be in ERR or SQD states */ ret = 0; } return ret; } EXPORT_SYMBOL(iw_cm_disconnect); /* * CM_ID <-- DESTROYING * * Clean up all resources associated with the connection and release * the initial reference taken by iw_create_cm_id. */ static void destroy_cm_id(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; struct ib_qp *qp; unsigned long flags; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); /* * Wait if we're currently in a connect or accept downcall. A * listening endpoint should never block here. */ wait_event(cm_id_priv->connect_wait, !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); /* * Since we're deleting the cm_id, drop any events that * might arrive before the last dereference. */ set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags); spin_lock_irqsave(&cm_id_priv->lock, flags); qp = cm_id_priv->qp; cm_id_priv->qp = NULL; switch (cm_id_priv->state) { case IW_CM_STATE_LISTEN: cm_id_priv->state = IW_CM_STATE_DESTROYING; spin_unlock_irqrestore(&cm_id_priv->lock, flags); /* destroy the listening endpoint */ cm_id->device->ops.iw_destroy_listen(cm_id); spin_lock_irqsave(&cm_id_priv->lock, flags); break; case IW_CM_STATE_ESTABLISHED: cm_id_priv->state = IW_CM_STATE_DESTROYING; spin_unlock_irqrestore(&cm_id_priv->lock, flags); /* Abrupt close of the connection */ (void)iwcm_modify_qp_err(qp); spin_lock_irqsave(&cm_id_priv->lock, flags); break; case IW_CM_STATE_IDLE: case IW_CM_STATE_CLOSING: cm_id_priv->state = IW_CM_STATE_DESTROYING; break; case IW_CM_STATE_CONN_RECV: /* * App called destroy before/without calling accept after * receiving connection request event notification or * returned non zero from the event callback function. * In either case, must tell the provider to reject. */ cm_id_priv->state = IW_CM_STATE_DESTROYING; spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_id->device->ops.iw_reject(cm_id, NULL, 0); spin_lock_irqsave(&cm_id_priv->lock, flags); break; case IW_CM_STATE_CONN_SENT: case IW_CM_STATE_DESTROYING: default: BUG(); break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (qp) cm_id_priv->id.device->ops.iw_rem_ref(qp); if (cm_id->mapped) { iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr); iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM); } (void)iwcm_deref_id(cm_id_priv); } /* * This function is only called by the application thread and cannot * be called by the event thread. The function will wait for all * references to be released on the cm_id and then kfree the cm_id * object. */ void iw_destroy_cm_id(struct iw_cm_id *cm_id) { destroy_cm_id(cm_id); } EXPORT_SYMBOL(iw_destroy_cm_id); /** * iw_cm_check_wildcard - If IP address is 0 then use original * @pm_addr: sockaddr containing the ip to check for wildcard * @cm_addr: sockaddr containing the actual IP address * @cm_outaddr: sockaddr to set IP addr which leaving port * * Checks the pm_addr for wildcard and then sets cm_outaddr's * IP to the actual (cm_addr). */ static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr, struct sockaddr_storage *cm_addr, struct sockaddr_storage *cm_outaddr) { if (pm_addr->ss_family == AF_INET) { struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr; if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) { struct sockaddr_in *cm4_addr = (struct sockaddr_in *)cm_addr; struct sockaddr_in *cm4_outaddr = (struct sockaddr_in *)cm_outaddr; cm4_outaddr->sin_addr = cm4_addr->sin_addr; } } else { struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr; if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) { struct sockaddr_in6 *cm6_addr = (struct sockaddr_in6 *)cm_addr; struct sockaddr_in6 *cm6_outaddr = (struct sockaddr_in6 *)cm_outaddr; cm6_outaddr->sin6_addr = cm6_addr->sin6_addr; } } } /** * iw_cm_map - Use portmapper to map the ports * @cm_id: connection manager pointer * @active: Indicates the active side when true * returns nonzero for error only if iwpm_create_mapinfo() fails * * Tries to add a mapping for a port using the Portmapper. If * successful in mapping the IP/Port it will check the remote * mapped IP address for a wildcard IP address and replace the * zero IP address with the remote_addr. */ static int iw_cm_map(struct iw_cm_id *cm_id, bool active) { const char *devname = dev_name(&cm_id->device->dev); const char *ifname = cm_id->device->iw_ifname; struct iwpm_dev_data pm_reg_msg = {}; struct iwpm_sa_data pm_msg; int status; if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) || strlen(ifname) >= sizeof(pm_reg_msg.if_name)) return -EINVAL; cm_id->m_local_addr = cm_id->local_addr; cm_id->m_remote_addr = cm_id->remote_addr; strcpy(pm_reg_msg.dev_name, devname); strcpy(pm_reg_msg.if_name, ifname); if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) || !iwpm_valid_pid()) return 0; cm_id->mapped = true; pm_msg.loc_addr = cm_id->local_addr; pm_msg.rem_addr = cm_id->remote_addr; pm_msg.flags = (cm_id->device->iw_driver_flags & IW_F_NO_PORT_MAP) ? IWPM_FLAGS_NO_PORT_MAP : 0; if (active) status = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_IWCM); else status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM); if (!status) { cm_id->m_local_addr = pm_msg.mapped_loc_addr; if (active) { cm_id->m_remote_addr = pm_msg.mapped_rem_addr; iw_cm_check_wildcard(&pm_msg.mapped_rem_addr, &cm_id->remote_addr, &cm_id->m_remote_addr); } } return iwpm_create_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr, RDMA_NL_IWCM, pm_msg.flags); } /* * CM_ID <-- LISTEN * * Start listening for connect requests. Generates one CONNECT_REQUEST * event for each inbound connect request. */ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) { struct iwcm_id_private *cm_id_priv; unsigned long flags; int ret; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); if (!backlog) backlog = default_backlog; ret = alloc_work_entries(cm_id_priv, backlog); if (ret) return ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->state) { case IW_CM_STATE_IDLE: cm_id_priv->state = IW_CM_STATE_LISTEN; spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = iw_cm_map(cm_id, false); if (!ret) ret = cm_id->device->ops.iw_create_listen(cm_id, backlog); if (ret) cm_id_priv->state = IW_CM_STATE_IDLE; spin_lock_irqsave(&cm_id_priv->lock, flags); break; default: ret = -EINVAL; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(iw_cm_listen); /* * CM_ID <-- IDLE * * Rejects an inbound connection request. No events are generated. */ int iw_cm_reject(struct iw_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct iwcm_id_private *cm_id_priv; unsigned long flags; int ret; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); wake_up_all(&cm_id_priv->connect_wait); return -EINVAL; } cm_id_priv->state = IW_CM_STATE_IDLE; spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = cm_id->device->ops.iw_reject(cm_id, private_data, private_data_len); clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); wake_up_all(&cm_id_priv->connect_wait); return ret; } EXPORT_SYMBOL(iw_cm_reject); /* * CM_ID <-- ESTABLISHED * * Accepts an inbound connection request and generates an ESTABLISHED * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block * until the ESTABLISHED event is received from the provider. */ int iw_cm_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) { struct iwcm_id_private *cm_id_priv; struct ib_qp *qp; unsigned long flags; int ret; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); wake_up_all(&cm_id_priv->connect_wait); return -EINVAL; } /* Get the ib_qp given the QPN */ qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn); if (!qp) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); wake_up_all(&cm_id_priv->connect_wait); return -EINVAL; } cm_id->device->ops.iw_add_ref(qp); cm_id_priv->qp = qp; spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = cm_id->device->ops.iw_accept(cm_id, iw_param); if (ret) { /* An error on accept precludes provider events */ BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); cm_id_priv->state = IW_CM_STATE_IDLE; spin_lock_irqsave(&cm_id_priv->lock, flags); qp = cm_id_priv->qp; cm_id_priv->qp = NULL; spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (qp) cm_id->device->ops.iw_rem_ref(qp); clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); wake_up_all(&cm_id_priv->connect_wait); } return ret; } EXPORT_SYMBOL(iw_cm_accept); /* * Active Side: CM_ID <-- CONN_SENT * * If successful, results in the generation of a CONNECT_REPLY * event. iw_cm_disconnect and iw_cm_destroy will block until the * CONNECT_REPLY event is received from the provider. */ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) { struct iwcm_id_private *cm_id_priv; int ret; unsigned long flags; struct ib_qp *qp = NULL; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); ret = alloc_work_entries(cm_id_priv, 4); if (ret) return ret; set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id_priv->state != IW_CM_STATE_IDLE) { ret = -EINVAL; goto err; } /* Get the ib_qp given the QPN */ qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn); if (!qp) { ret = -EINVAL; goto err; } cm_id->device->ops.iw_add_ref(qp); cm_id_priv->qp = qp; cm_id_priv->state = IW_CM_STATE_CONN_SENT; spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = iw_cm_map(cm_id, true); if (!ret) ret = cm_id->device->ops.iw_connect(cm_id, iw_param); if (!ret) return 0; /* success */ spin_lock_irqsave(&cm_id_priv->lock, flags); qp = cm_id_priv->qp; cm_id_priv->qp = NULL; cm_id_priv->state = IW_CM_STATE_IDLE; err: spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (qp) cm_id->device->ops.iw_rem_ref(qp); clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); wake_up_all(&cm_id_priv->connect_wait); return ret; } EXPORT_SYMBOL(iw_cm_connect); /* * Passive Side: new CM_ID <-- CONN_RECV * * Handles an inbound connect request. The function creates a new * iw_cm_id to represent the new connection and inherits the client * callback function and other attributes from the listening parent. * * The work item contains a pointer to the listen_cm_id and the event. The * listen_cm_id contains the client cm_handler, context and * device. These are copied when the device is cloned. The event * contains the new four tuple. * * An error on the child should not affect the parent, so this * function does not return a value. */ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, struct iw_cm_event *iw_event) { unsigned long flags; struct iw_cm_id *cm_id; struct iwcm_id_private *cm_id_priv; int ret; /* * The provider should never generate a connection request * event with a bad status. */ BUG_ON(iw_event->status); cm_id = iw_create_cm_id(listen_id_priv->id.device, listen_id_priv->id.cm_handler, listen_id_priv->id.context); /* If the cm_id could not be created, ignore the request */ if (IS_ERR(cm_id)) goto out; cm_id->provider_data = iw_event->provider_data; cm_id->m_local_addr = iw_event->local_addr; cm_id->m_remote_addr = iw_event->remote_addr; cm_id->local_addr = listen_id_priv->id.local_addr; ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr, &iw_event->remote_addr, &cm_id->remote_addr, RDMA_NL_IWCM); if (ret) { cm_id->remote_addr = iw_event->remote_addr; } else { iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr, &iw_event->local_addr, &cm_id->local_addr); iw_event->local_addr = cm_id->local_addr; iw_event->remote_addr = cm_id->remote_addr; } cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv->state = IW_CM_STATE_CONN_RECV; /* * We could be destroying the listening id. If so, ignore this * upcall. */ spin_lock_irqsave(&listen_id_priv->lock, flags); if (listen_id_priv->state != IW_CM_STATE_LISTEN) { spin_unlock_irqrestore(&listen_id_priv->lock, flags); iw_cm_reject(cm_id, NULL, 0); iw_destroy_cm_id(cm_id); goto out; } spin_unlock_irqrestore(&listen_id_priv->lock, flags); ret = alloc_work_entries(cm_id_priv, 3); if (ret) { iw_cm_reject(cm_id, NULL, 0); iw_destroy_cm_id(cm_id); goto out; } /* Call the client CM handler */ ret = cm_id->cm_handler(cm_id, iw_event); if (ret) { iw_cm_reject(cm_id, NULL, 0); iw_destroy_cm_id(cm_id); } out: if (iw_event->private_data_len) kfree(iw_event->private_data); } /* * Passive Side: CM_ID <-- ESTABLISHED * * The provider generated an ESTABLISHED event which means that * the MPA negotion has completed successfully and we are now in MPA * FPDU mode. * * This event can only be received in the CONN_RECV state. If the * remote peer closed, the ESTABLISHED event would be received followed * by the CLOSE event. If the app closes, it will block until we wake * it up after processing this event. */ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, struct iw_cm_event *iw_event) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); /* * We clear the CONNECT_WAIT bit here to allow the callback * function to call iw_cm_disconnect. Calling iw_destroy_cm_id * from a callback handler is not allowed. */ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); cm_id_priv->state = IW_CM_STATE_ESTABLISHED; spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); wake_up_all(&cm_id_priv->connect_wait); return ret; } /* * Active Side: CM_ID <-- ESTABLISHED * * The app has called connect and is waiting for the established event to * post it's requests to the server. This event will wake up anyone * blocked in iw_cm_disconnect or iw_destroy_id. */ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, struct iw_cm_event *iw_event) { struct ib_qp *qp = NULL; unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); /* * Clear the connect wait bit so a callback function calling * iw_cm_disconnect will not wait and deadlock this thread */ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); if (iw_event->status == 0) { cm_id_priv->id.m_local_addr = iw_event->local_addr; cm_id_priv->id.m_remote_addr = iw_event->remote_addr; iw_event->local_addr = cm_id_priv->id.local_addr; iw_event->remote_addr = cm_id_priv->id.remote_addr; cm_id_priv->state = IW_CM_STATE_ESTABLISHED; } else { /* REJECTED or RESET */ qp = cm_id_priv->qp; cm_id_priv->qp = NULL; cm_id_priv->state = IW_CM_STATE_IDLE; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (qp) cm_id_priv->id.device->ops.iw_rem_ref(qp); ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); if (iw_event->private_data_len) kfree(iw_event->private_data); /* Wake up waiters on connect complete */ wake_up_all(&cm_id_priv->connect_wait); return ret; } /* * CM_ID <-- CLOSING * * If in the ESTABLISHED state, move to CLOSING. */ static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, struct iw_cm_event *iw_event) { unsigned long flags; spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) cm_id_priv->state = IW_CM_STATE_CLOSING; spin_unlock_irqrestore(&cm_id_priv->lock, flags); } /* * CM_ID <-- IDLE * * If in the ESTBLISHED or CLOSING states, the QP will have have been * moved by the provider to the ERR state. Disassociate the CM_ID from * the QP, move to IDLE, and remove the 'connected' reference. * * If in some other state, the cm_id was destroyed asynchronously. * This is the last reference that will result in waking up * the app thread blocked in iw_destroy_cm_id. */ static int cm_close_handler(struct iwcm_id_private *cm_id_priv, struct iw_cm_event *iw_event) { struct ib_qp *qp; unsigned long flags; int ret = 0, notify_event = 0; spin_lock_irqsave(&cm_id_priv->lock, flags); qp = cm_id_priv->qp; cm_id_priv->qp = NULL; switch (cm_id_priv->state) { case IW_CM_STATE_ESTABLISHED: case IW_CM_STATE_CLOSING: cm_id_priv->state = IW_CM_STATE_IDLE; notify_event = 1; break; case IW_CM_STATE_DESTROYING: break; default: BUG(); } spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (qp) cm_id_priv->id.device->ops.iw_rem_ref(qp); if (notify_event) ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); return ret; } static int process_event(struct iwcm_id_private *cm_id_priv, struct iw_cm_event *iw_event) { int ret = 0; switch (iw_event->event) { case IW_CM_EVENT_CONNECT_REQUEST: cm_conn_req_handler(cm_id_priv, iw_event); break; case IW_CM_EVENT_CONNECT_REPLY: ret = cm_conn_rep_handler(cm_id_priv, iw_event); break; case IW_CM_EVENT_ESTABLISHED: ret = cm_conn_est_handler(cm_id_priv, iw_event); break; case IW_CM_EVENT_DISCONNECT: cm_disconnect_handler(cm_id_priv, iw_event); break; case IW_CM_EVENT_CLOSE: ret = cm_close_handler(cm_id_priv, iw_event); break; default: BUG(); } return ret; } /* * Process events on the work_list for the cm_id. If the callback * function requests that the cm_id be deleted, a flag is set in the * cm_id flags to indicate that when the last reference is * removed, the cm_id is to be destroyed. This is necessary to * distinguish between an object that will be destroyed by the app * thread asleep on the destroy_comp list vs. an object destroyed * here synchronously when the last reference is removed. */ static void cm_work_handler(struct work_struct *_work) { struct iwcm_work *work = container_of(_work, struct iwcm_work, work); struct iw_cm_event levent; struct iwcm_id_private *cm_id_priv = work->cm_id; unsigned long flags; int empty; int ret = 0; spin_lock_irqsave(&cm_id_priv->lock, flags); empty = list_empty(&cm_id_priv->work_list); while (!empty) { work = list_entry(cm_id_priv->work_list.next, struct iwcm_work, list); list_del_init(&work->list); empty = list_empty(&cm_id_priv->work_list); levent = work->event; put_work(work); spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { ret = process_event(cm_id_priv, &levent); if (ret) destroy_cm_id(&cm_id_priv->id); } else pr_debug("dropping event %d\n", levent.event); if (iwcm_deref_id(cm_id_priv)) return; if (empty) return; spin_lock_irqsave(&cm_id_priv->lock, flags); } spin_unlock_irqrestore(&cm_id_priv->lock, flags); } /* * This function is called on interrupt context. Schedule events on * the iwcm_wq thread to allow callback functions to downcall into * the CM and/or block. Events are queued to a per-CM_ID * work_list. If this is the first event on the work_list, the work * element is also queued on the iwcm_wq thread. * * Each event holds a reference on the cm_id. Until the last posted * event has been delivered and processed, the cm_id cannot be * deleted. * * Returns: * 0 - the event was handled. * -ENOMEM - the event was not handled due to lack of resources. */ static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *iw_event) { struct iwcm_work *work; struct iwcm_id_private *cm_id_priv; unsigned long flags; int ret = 0; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); work = get_work(cm_id_priv); if (!work) { ret = -ENOMEM; goto out; } INIT_WORK(&work->work, cm_work_handler); work->cm_id = cm_id_priv; work->event = *iw_event; if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || work->event.event == IW_CM_EVENT_CONNECT_REPLY) && work->event.private_data_len) { ret = copy_private_data(&work->event); if (ret) { put_work(work); goto out; } } refcount_inc(&cm_id_priv->refcount); if (list_empty(&cm_id_priv->work_list)) { list_add_tail(&work->list, &cm_id_priv->work_list); queue_work(iwcm_wq, &work->work); } else list_add_tail(&work->list, &cm_id_priv->work_list); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->state) { case IW_CM_STATE_IDLE: case IW_CM_STATE_CONN_SENT: case IW_CM_STATE_CONN_RECV: case IW_CM_STATE_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE| IB_ACCESS_REMOTE_READ; ret = 0; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->state) { case IW_CM_STATE_IDLE: case IW_CM_STATE_CONN_SENT: case IW_CM_STATE_CONN_RECV: case IW_CM_STATE_ESTABLISHED: *qp_attr_mask = 0; ret = 0; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct iwcm_id_private *cm_id_priv; int ret; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); switch (qp_attr->qp_state) { case IB_QPS_INIT: case IB_QPS_RTR: ret = iwcm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTS: ret = iwcm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); break; default: ret = -EINVAL; break; } return ret; } EXPORT_SYMBOL(iw_cm_init_qp_attr); static int __init iw_cm_init(void) { int ret; ret = iwpm_init(RDMA_NL_IWCM); if (ret) return ret; iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0); if (!iwcm_wq) goto err_alloc; iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm", iwcm_ctl_table); if (!iwcm_ctl_table_hdr) { pr_err("iw_cm: couldn't register sysctl paths\n"); goto err_sysctl; } rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table); return 0; err_sysctl: destroy_workqueue(iwcm_wq); err_alloc: iwpm_exit(RDMA_NL_IWCM); return -ENOMEM; } static void __exit iw_cm_cleanup(void) { rdma_nl_unregister(RDMA_NL_IWCM); unregister_net_sysctl_table(iwcm_ctl_table_hdr); destroy_workqueue(iwcm_wq); iwpm_exit(RDMA_NL_IWCM); } MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2); module_init(iw_cm_init); module_exit(iw_cm_cleanup);
linux-master
drivers/infiniband/core/iwcm.c
/* * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/slab.h> #include <linux/string.h> #include "agent.h" #include "smi.h" #include "mad_priv.h" #define SPFX "ib_agent: " struct ib_agent_port_private { struct list_head port_list; struct ib_mad_agent *agent[2]; }; static DEFINE_SPINLOCK(ib_agent_port_list_lock); static LIST_HEAD(ib_agent_port_list); static struct ib_agent_port_private * __ib_get_agent_port(const struct ib_device *device, int port_num) { struct ib_agent_port_private *entry; list_for_each_entry(entry, &ib_agent_port_list, port_list) { if (entry->agent[1]->device == device && entry->agent[1]->port_num == port_num) return entry; } return NULL; } static struct ib_agent_port_private * ib_get_agent_port(const struct ib_device *device, int port_num) { struct ib_agent_port_private *entry; unsigned long flags; spin_lock_irqsave(&ib_agent_port_list_lock, flags); entry = __ib_get_agent_port(device, port_num); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); return entry; } void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh, const struct ib_wc *wc, const struct ib_device *device, int port_num, int qpn, size_t resp_mad_len, bool opa) { struct ib_agent_port_private *port_priv; struct ib_mad_agent *agent; struct ib_mad_send_buf *send_buf; struct ib_ah *ah; struct ib_mad_send_wr_private *mad_send_wr; if (rdma_cap_ib_switch(device)) port_priv = ib_get_agent_port(device, 0); else port_priv = ib_get_agent_port(device, port_num); if (!port_priv) { dev_err(&device->dev, "Unable to find port agent\n"); return; } agent = port_priv->agent[qpn]; ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); if (IS_ERR(ah)) { dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n", PTR_ERR(ah)); return; } if (opa && mad_hdr->base_version != OPA_MGMT_BASE_VERSION) resp_mad_len = IB_MGMT_MAD_SIZE; send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, IB_MGMT_MAD_HDR, resp_mad_len - IB_MGMT_MAD_HDR, GFP_KERNEL, mad_hdr->base_version); if (IS_ERR(send_buf)) { dev_err(&device->dev, "ib_create_send_mad error\n"); goto err1; } memcpy(send_buf->mad, mad_hdr, resp_mad_len); send_buf->ah = ah; if (rdma_cap_ib_switch(device)) { mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_send_wr->send_wr.port_num = port_num; } if (ib_post_send_mad(send_buf, NULL)) { dev_err(&device->dev, "ib_post_send_mad error\n"); goto err2; } return; err2: ib_free_send_mad(send_buf); err1: rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); } static void agent_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { rdma_destroy_ah(mad_send_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(mad_send_wc->send_buf); } int ib_agent_port_open(struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; unsigned long flags; int ret; /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { ret = -ENOMEM; goto error1; } if (rdma_cap_ib_smi(device, port_num)) { /* Obtain send only MAD agent for SMI QP */ port_priv->agent[0] = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, &agent_send_handler, NULL, NULL, 0); if (IS_ERR(port_priv->agent[0])) { ret = PTR_ERR(port_priv->agent[0]); goto error2; } } /* Obtain send only MAD agent for GSI QP */ port_priv->agent[1] = ib_register_mad_agent(device, port_num, IB_QPT_GSI, NULL, 0, &agent_send_handler, NULL, NULL, 0); if (IS_ERR(port_priv->agent[1])) { ret = PTR_ERR(port_priv->agent[1]); goto error3; } spin_lock_irqsave(&ib_agent_port_list_lock, flags); list_add_tail(&port_priv->port_list, &ib_agent_port_list); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); return 0; error3: if (port_priv->agent[0]) ib_unregister_mad_agent(port_priv->agent[0]); error2: kfree(port_priv); error1: return ret; } int ib_agent_port_close(struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; unsigned long flags; spin_lock_irqsave(&ib_agent_port_list_lock, flags); port_priv = __ib_get_agent_port(device, port_num); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); dev_err(&device->dev, "Port %d not found\n", port_num); return -ENODEV; } list_del(&port_priv->port_list); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); ib_unregister_mad_agent(port_priv->agent[1]); if (port_priv->agent[0]) ib_unregister_mad_agent(port_priv->agent[0]); kfree(port_priv); return 0; }
linux-master
drivers/infiniband/core/agent.c
/* * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/file.h> #include <linux/anon_inodes.h> #include <linux/sched/mm.h> #include <rdma/ib_verbs.h> #include <rdma/uverbs_types.h> #include <linux/rcupdate.h> #include <rdma/uverbs_ioctl.h> #include <rdma/rdma_user_ioctl.h> #include "uverbs.h" #include "core_priv.h" #include "rdma_core.h" static void uverbs_uobject_free(struct kref *ref) { kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu); } /* * In order to indicate we no longer needs this uobject, uverbs_uobject_put * is called. When the reference count is decreased, the uobject is freed. * For example, this is used when attaching a completion channel to a CQ. */ void uverbs_uobject_put(struct ib_uobject *uobject) { kref_put(&uobject->ref, uverbs_uobject_free); } EXPORT_SYMBOL(uverbs_uobject_put); static int uverbs_try_lock_object(struct ib_uobject *uobj, enum rdma_lookup_mode mode) { /* * When a shared access is required, we use a positive counter. Each * shared access request checks that the value != -1 and increment it. * Exclusive access is required for operations like write or destroy. * In exclusive access mode, we check that the counter is zero (nobody * claimed this object) and we set it to -1. Releasing a shared access * lock is done simply by decreasing the counter. As for exclusive * access locks, since only a single one of them is allowed * concurrently, setting the counter to zero is enough for releasing * this lock. */ switch (mode) { case UVERBS_LOOKUP_READ: return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ? -EBUSY : 0; case UVERBS_LOOKUP_WRITE: /* lock is exclusive */ return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY; case UVERBS_LOOKUP_DESTROY: return 0; } return 0; } static void assert_uverbs_usecnt(struct ib_uobject *uobj, enum rdma_lookup_mode mode) { #ifdef CONFIG_LOCKDEP switch (mode) { case UVERBS_LOOKUP_READ: WARN_ON(atomic_read(&uobj->usecnt) <= 0); break; case UVERBS_LOOKUP_WRITE: WARN_ON(atomic_read(&uobj->usecnt) != -1); break; case UVERBS_LOOKUP_DESTROY: break; } #endif } /* * This must be called with the hw_destroy_rwsem locked for read or write, * also the uobject itself must be locked for write. * * Upon return the HW object is guaranteed to be destroyed. * * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held, * however the type's allocat_commit function cannot have been called and the * uobject cannot be on the uobjects_lists * * For RDMA_REMOVE_DESTROY the caller should be holding a kref (eg via * rdma_lookup_get_uobject) and the object is left in a state where the caller * needs to call rdma_lookup_put_uobject. * * For all other destroy modes this function internally unlocks the uobject * and consumes the kref on the uobj. */ static int uverbs_destroy_uobject(struct ib_uobject *uobj, enum rdma_remove_reason reason, struct uverbs_attr_bundle *attrs) { struct ib_uverbs_file *ufile = attrs->ufile; unsigned long flags; int ret; lockdep_assert_held(&ufile->hw_destroy_rwsem); assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE); if (reason == RDMA_REMOVE_ABORT) { WARN_ON(!list_empty(&uobj->list)); WARN_ON(!uobj->context); uobj->uapi_object->type_class->alloc_abort(uobj); } else if (uobj->object) { ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason, attrs); if (ret) /* Nothing to be done, wait till ucontext will clean it */ return ret; uobj->object = NULL; } uobj->context = NULL; /* * For DESTROY the usecnt is not changed, the caller is expected to * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR * handle. */ if (reason != RDMA_REMOVE_DESTROY) atomic_set(&uobj->usecnt, 0); else uobj->uapi_object->type_class->remove_handle(uobj); if (!list_empty(&uobj->list)) { spin_lock_irqsave(&ufile->uobjects_lock, flags); list_del_init(&uobj->list); spin_unlock_irqrestore(&ufile->uobjects_lock, flags); /* * Pairs with the get in rdma_alloc_commit_uobject(), could * destroy uobj. */ uverbs_uobject_put(uobj); } /* * When aborting the stack kref remains owned by the core code, and is * not transferred into the type. Pairs with the get in alloc_uobj */ if (reason == RDMA_REMOVE_ABORT) uverbs_uobject_put(uobj); return 0; } /* * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY * sequence. It should only be used from command callbacks. On success the * caller must pair this with uobj_put_destroy(). This * version requires the caller to have already obtained an * LOOKUP_DESTROY uobject kref. */ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) { struct ib_uverbs_file *ufile = attrs->ufile; int ret; down_read(&ufile->hw_destroy_rwsem); /* * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY. * This is because any other concurrent thread can still see the object * in the xarray due to RCU. Leaving it locked ensures nothing else will * touch it. */ ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE); if (ret) goto out_unlock; ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs); if (ret) { atomic_set(&uobj->usecnt, 0); goto out_unlock; } out_unlock: up_read(&ufile->hw_destroy_rwsem); return ret; } /* * uobj_get_destroy destroys the HW object and returns a handle to the uobj * with a NULL object pointer. The caller must pair this with * uobj_put_destroy(). */ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, u32 id, struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj; int ret; uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id, UVERBS_LOOKUP_DESTROY, attrs); if (IS_ERR(uobj)) return uobj; ret = uobj_destroy(uobj, attrs); if (ret) { rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); return ERR_PTR(ret); } return uobj; } /* * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success * (negative errno on failure). For use by callers that do not need the uobj. */ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj; uobj = __uobj_get_destroy(obj, id, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); uobj_put_destroy(uobj); return 0; } /* alloc_uobj must be undone by uverbs_destroy_uobject() */ static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs, const struct uverbs_api_object *obj) { struct ib_uverbs_file *ufile = attrs->ufile; struct ib_uobject *uobj; if (!attrs->context) { struct ib_ucontext *ucontext = ib_uverbs_get_ucontext_file(ufile); if (IS_ERR(ucontext)) return ERR_CAST(ucontext); attrs->context = ucontext; } uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL); if (!uobj) return ERR_PTR(-ENOMEM); /* * user_handle should be filled by the handler, * The object is added to the list in the commit stage. */ uobj->ufile = ufile; uobj->context = attrs->context; INIT_LIST_HEAD(&uobj->list); uobj->uapi_object = obj; /* * Allocated objects start out as write locked to deny any other * syscalls from accessing them until they are committed. See * rdma_alloc_commit_uobject */ atomic_set(&uobj->usecnt, -1); kref_init(&uobj->ref); return uobj; } static int idr_add_uobj(struct ib_uobject *uobj) { /* * We start with allocating an idr pointing to NULL. This represents an * object which isn't initialized yet. We'll replace it later on with * the real object once we commit. */ return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b, GFP_KERNEL); } /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */ static struct ib_uobject * lookup_get_idr_uobject(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile, s64 id, enum rdma_lookup_mode mode) { struct ib_uobject *uobj; if (id < 0 || id > ULONG_MAX) return ERR_PTR(-EINVAL); rcu_read_lock(); /* * The idr_find is guaranteed to return a pointer to something that * isn't freed yet, or NULL, as the free after idr_remove goes through * kfree_rcu(). However the object may still have been released and * kfree() could be called at any time. */ uobj = xa_load(&ufile->idr, id); if (!uobj || !kref_get_unless_zero(&uobj->ref)) uobj = ERR_PTR(-ENOENT); rcu_read_unlock(); return uobj; } static struct ib_uobject * lookup_get_fd_uobject(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile, s64 id, enum rdma_lookup_mode mode) { const struct uverbs_obj_fd_type *fd_type; struct file *f; struct ib_uobject *uobject; int fdno = id; if (fdno != id) return ERR_PTR(-EINVAL); if (mode != UVERBS_LOOKUP_READ) return ERR_PTR(-EOPNOTSUPP); if (!obj->type_attrs) return ERR_PTR(-EIO); fd_type = container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); f = fget(fdno); if (!f) return ERR_PTR(-EBADF); uobject = f->private_data; /* * fget(id) ensures we are not currently running * uverbs_uobject_fd_release(), and the caller is expected to ensure * that release is never done while a call to lookup is possible. */ if (f->f_op != fd_type->fops || uobject->ufile != ufile) { fput(f); return ERR_PTR(-EBADF); } uverbs_uobject_get(uobject); return uobject; } struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile, s64 id, enum rdma_lookup_mode mode, struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj; int ret; if (obj == ERR_PTR(-ENOMSG)) { /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */ uobj = lookup_get_idr_uobject(NULL, ufile, id, mode); if (IS_ERR(uobj)) return uobj; } else { if (IS_ERR(obj)) return ERR_PTR(-EINVAL); uobj = obj->type_class->lookup_get(obj, ufile, id, mode); if (IS_ERR(uobj)) return uobj; if (uobj->uapi_object != obj) { ret = -EINVAL; goto free; } } /* * If we have been disassociated block every command except for * DESTROY based commands. */ if (mode != UVERBS_LOOKUP_DESTROY && !srcu_dereference(ufile->device->ib_dev, &ufile->device->disassociate_srcu)) { ret = -EIO; goto free; } ret = uverbs_try_lock_object(uobj, mode); if (ret) goto free; if (attrs) attrs->context = uobj->context; return uobj; free: uobj->uapi_object->type_class->lookup_put(uobj, mode); uverbs_uobject_put(uobj); return ERR_PTR(ret); } static struct ib_uobject * alloc_begin_idr_uobject(const struct uverbs_api_object *obj, struct uverbs_attr_bundle *attrs) { int ret; struct ib_uobject *uobj; uobj = alloc_uobj(attrs, obj); if (IS_ERR(uobj)) return uobj; ret = idr_add_uobj(uobj); if (ret) goto uobj_put; ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device, RDMACG_RESOURCE_HCA_OBJECT); if (ret) goto remove; return uobj; remove: xa_erase(&attrs->ufile->idr, uobj->id); uobj_put: uverbs_uobject_put(uobj); return ERR_PTR(ret); } static struct ib_uobject * alloc_begin_fd_uobject(const struct uverbs_api_object *obj, struct uverbs_attr_bundle *attrs) { const struct uverbs_obj_fd_type *fd_type; int new_fd; struct ib_uobject *uobj, *ret; struct file *filp; uobj = alloc_uobj(attrs, obj); if (IS_ERR(uobj)) return uobj; fd_type = container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && fd_type->fops->release != &uverbs_async_event_release)) { ret = ERR_PTR(-EINVAL); goto err_fd; } new_fd = get_unused_fd_flags(O_CLOEXEC); if (new_fd < 0) { ret = ERR_PTR(new_fd); goto err_fd; } /* Note that uverbs_uobject_fd_release() is called during abort */ filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, fd_type->flags); if (IS_ERR(filp)) { ret = ERR_CAST(filp); goto err_getfile; } uobj->object = filp; uobj->id = new_fd; return uobj; err_getfile: put_unused_fd(new_fd); err_fd: uverbs_uobject_put(uobj); return ret; } struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, struct uverbs_attr_bundle *attrs) { struct ib_uverbs_file *ufile = attrs->ufile; struct ib_uobject *ret; if (IS_ERR(obj)) return ERR_PTR(-EINVAL); /* * The hw_destroy_rwsem is held across the entire object creation and * released during rdma_alloc_commit_uobject or * rdma_alloc_abort_uobject */ if (!down_read_trylock(&ufile->hw_destroy_rwsem)) return ERR_PTR(-EIO); ret = obj->type_class->alloc_begin(obj, attrs); if (IS_ERR(ret)) { up_read(&ufile->hw_destroy_rwsem); return ret; } return ret; } static void alloc_abort_idr_uobject(struct ib_uobject *uobj) { ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device, RDMACG_RESOURCE_HCA_OBJECT); xa_erase(&uobj->ufile->idr, uobj->id); } static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { const struct uverbs_obj_idr_type *idr_type = container_of(uobj->uapi_object->type_attrs, struct uverbs_obj_idr_type, type); int ret = idr_type->destroy_object(uobj, why, attrs); if (ret) return ret; if (why == RDMA_REMOVE_ABORT) return 0; ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device, RDMACG_RESOURCE_HCA_OBJECT); return 0; } static void remove_handle_idr_uobject(struct ib_uobject *uobj) { xa_erase(&uobj->ufile->idr, uobj->id); /* Matches the kref in alloc_commit_idr_uobject */ uverbs_uobject_put(uobj); } static void alloc_abort_fd_uobject(struct ib_uobject *uobj) { struct file *filp = uobj->object; fput(filp); put_unused_fd(uobj->id); } static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { const struct uverbs_obj_fd_type *fd_type = container_of( uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type); fd_type->destroy_object(uobj, why); return 0; } static void remove_handle_fd_uobject(struct ib_uobject *uobj) { } static void alloc_commit_idr_uobject(struct ib_uobject *uobj) { struct ib_uverbs_file *ufile = uobj->ufile; void *old; /* * We already allocated this IDR with a NULL object, so * this shouldn't fail. * * NOTE: Storing the uobj transfers our kref on uobj to the XArray. * It will be put by remove_commit_idr_uobject() */ old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL); WARN_ON(old != NULL); } static void swap_idr_uobjects(struct ib_uobject *obj_old, struct ib_uobject *obj_new) { struct ib_uverbs_file *ufile = obj_old->ufile; void *old; /* * New must be an object that been allocated but not yet committed, this * moves the pre-committed state to obj_old, new still must be comitted. */ old = xa_cmpxchg(&ufile->idr, obj_old->id, obj_old, XA_ZERO_ENTRY, GFP_KERNEL); if (WARN_ON(old != obj_old)) return; swap(obj_old->id, obj_new->id); old = xa_cmpxchg(&ufile->idr, obj_old->id, NULL, obj_old, GFP_KERNEL); WARN_ON(old != NULL); } static void alloc_commit_fd_uobject(struct ib_uobject *uobj) { int fd = uobj->id; struct file *filp = uobj->object; /* Matching put will be done in uverbs_uobject_fd_release() */ kref_get(&uobj->ufile->ref); /* This shouldn't be used anymore. Use the file object instead */ uobj->id = 0; /* * NOTE: Once we install the file we loose ownership of our kref on * uobj. It will be put by uverbs_uobject_fd_release() */ filp->private_data = uobj; fd_install(fd, filp); } /* * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the * caller can no longer assume uobj is valid. If this function fails it * destroys the uboject, including the attached HW object. */ void rdma_alloc_commit_uobject(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) { struct ib_uverbs_file *ufile = attrs->ufile; /* kref is held so long as the uobj is on the uobj list. */ uverbs_uobject_get(uobj); spin_lock_irq(&ufile->uobjects_lock); list_add(&uobj->list, &ufile->uobjects); spin_unlock_irq(&ufile->uobjects_lock); /* matches atomic_set(-1) in alloc_uobj */ atomic_set(&uobj->usecnt, 0); /* alloc_commit consumes the uobj kref */ uobj->uapi_object->type_class->alloc_commit(uobj); /* Matches the down_read in rdma_alloc_begin_uobject */ up_read(&ufile->hw_destroy_rwsem); } /* * new_uobj will be assigned to the handle currently used by to_uobj, and * to_uobj will be destroyed. * * Upon return the caller must do: * rdma_alloc_commit_uobject(new_uobj) * uobj_put_destroy(to_uobj) * * to_uobj must have a write get but the put mode switches to destroy once * this is called. */ void rdma_assign_uobject(struct ib_uobject *to_uobj, struct ib_uobject *new_uobj, struct uverbs_attr_bundle *attrs) { assert_uverbs_usecnt(new_uobj, UVERBS_LOOKUP_WRITE); if (WARN_ON(to_uobj->uapi_object != new_uobj->uapi_object || !to_uobj->uapi_object->type_class->swap_uobjects)) return; to_uobj->uapi_object->type_class->swap_uobjects(to_uobj, new_uobj); /* * If this fails then the uobject is still completely valid (though with * a new ID) and we leak it until context close. */ uverbs_destroy_uobject(to_uobj, RDMA_REMOVE_DESTROY, attrs); } /* * This consumes the kref for uobj. It is up to the caller to unwind the HW * object and anything else connected to uobj before calling this. */ void rdma_alloc_abort_uobject(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs, bool hw_obj_valid) { struct ib_uverbs_file *ufile = uobj->ufile; int ret; if (hw_obj_valid) { ret = uobj->uapi_object->type_class->destroy_hw( uobj, RDMA_REMOVE_ABORT, attrs); /* * If the driver couldn't destroy the object then go ahead and * commit it. Leaking objects that can't be destroyed is only * done during FD close after the driver has a few more tries to * destroy it. */ if (WARN_ON(ret)) return rdma_alloc_commit_uobject(uobj, attrs); } uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs); /* Matches the down_read in rdma_alloc_begin_uobject */ up_read(&ufile->hw_destroy_rwsem); } static void lookup_put_idr_uobject(struct ib_uobject *uobj, enum rdma_lookup_mode mode) { } static void lookup_put_fd_uobject(struct ib_uobject *uobj, enum rdma_lookup_mode mode) { struct file *filp = uobj->object; WARN_ON(mode != UVERBS_LOOKUP_READ); /* * This indirectly calls uverbs_uobject_fd_release() and free the * object */ fput(filp); } void rdma_lookup_put_uobject(struct ib_uobject *uobj, enum rdma_lookup_mode mode) { assert_uverbs_usecnt(uobj, mode); /* * In order to unlock an object, either decrease its usecnt for * read access or zero it in case of exclusive access. See * uverbs_try_lock_object for locking schema information. */ switch (mode) { case UVERBS_LOOKUP_READ: atomic_dec(&uobj->usecnt); break; case UVERBS_LOOKUP_WRITE: atomic_set(&uobj->usecnt, 0); break; case UVERBS_LOOKUP_DESTROY: break; } uobj->uapi_object->type_class->lookup_put(uobj, mode); /* Pairs with the kref obtained by type->lookup_get */ uverbs_uobject_put(uobj); } void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile) { xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC); } void release_ufile_idr_uobject(struct ib_uverbs_file *ufile) { struct ib_uobject *entry; unsigned long id; /* * At this point uverbs_cleanup_ufile() is guaranteed to have run, and * there are no HW objects left, however the xarray is still populated * with anything that has not been cleaned up by userspace. Since the * kref on ufile is 0, nothing is allowed to call lookup_get. * * This is an optimized equivalent to remove_handle_idr_uobject */ xa_for_each(&ufile->idr, id, entry) { WARN_ON(entry->object); uverbs_uobject_put(entry); } xa_destroy(&ufile->idr); } const struct uverbs_obj_type_class uverbs_idr_class = { .alloc_begin = alloc_begin_idr_uobject, .lookup_get = lookup_get_idr_uobject, .alloc_commit = alloc_commit_idr_uobject, .alloc_abort = alloc_abort_idr_uobject, .lookup_put = lookup_put_idr_uobject, .destroy_hw = destroy_hw_idr_uobject, .remove_handle = remove_handle_idr_uobject, .swap_uobjects = swap_idr_uobjects, }; EXPORT_SYMBOL(uverbs_idr_class); /* * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct * file_operations release method. */ int uverbs_uobject_fd_release(struct inode *inode, struct file *filp) { struct ib_uverbs_file *ufile; struct ib_uobject *uobj; /* * This can only happen if the fput came from alloc_abort_fd_uobject() */ if (!filp->private_data) return 0; uobj = filp->private_data; ufile = uobj->ufile; if (down_read_trylock(&ufile->hw_destroy_rwsem)) { struct uverbs_attr_bundle attrs = { .context = uobj->context, .ufile = ufile, }; /* * lookup_get_fd_uobject holds the kref on the struct file any * time a FD uobj is locked, which prevents this release * method from being invoked. Meaning we can always get the * write lock here, or we have a kernel bug. */ WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE)); uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs); up_read(&ufile->hw_destroy_rwsem); } /* Matches the get in alloc_commit_fd_uobject() */ kref_put(&ufile->ref, ib_uverbs_release_file); /* Pairs with filp->private_data in alloc_begin_fd_uobject */ uverbs_uobject_put(uobj); return 0; } EXPORT_SYMBOL(uverbs_uobject_fd_release); /* * Drop the ucontext off the ufile and completely disconnect it from the * ib_device */ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile, enum rdma_remove_reason reason) { struct ib_ucontext *ucontext = ufile->ucontext; struct ib_device *ib_dev = ucontext->device; /* * If we are closing the FD then the user mmap VMAs must have * already been destroyed as they hold on to the filep, otherwise * they need to be zap'd. */ if (reason == RDMA_REMOVE_DRIVER_REMOVE) { uverbs_user_mmap_disassociate(ufile); if (ib_dev->ops.disassociate_ucontext) ib_dev->ops.disassociate_ucontext(ucontext); } ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); rdma_restrack_del(&ucontext->res); ib_dev->ops.dealloc_ucontext(ucontext); WARN_ON(!xa_empty(&ucontext->mmap_xa)); kfree(ucontext); ufile->ucontext = NULL; } static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile, enum rdma_remove_reason reason) { struct ib_uobject *obj, *next_obj; int ret = -EINVAL; struct uverbs_attr_bundle attrs = { .ufile = ufile }; /* * This shouldn't run while executing other commands on this * context. Thus, the only thing we should take care of is * releasing a FD while traversing this list. The FD could be * closed and released from the _release fop of this FD. * In order to mitigate this, we add a lock. * We take and release the lock per traversal in order to let * other threads (which might still use the FDs) chance to run. */ list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) { attrs.context = obj->context; /* * if we hit this WARN_ON, that means we are * racing with a lookup_get. */ WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); if (reason == RDMA_REMOVE_DRIVER_FAILURE) obj->object = NULL; if (!uverbs_destroy_uobject(obj, reason, &attrs)) ret = 0; else atomic_set(&obj->usecnt, 0); } if (reason == RDMA_REMOVE_DRIVER_FAILURE) { WARN_ON(!list_empty(&ufile->uobjects)); return 0; } return ret; } /* * Destroy the ucontext and every uobject associated with it. * * This is internally locked and can be called in parallel from multiple * contexts. */ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile, enum rdma_remove_reason reason) { down_write(&ufile->hw_destroy_rwsem); /* * If a ucontext was never created then we can't have any uobjects to * cleanup, nothing to do. */ if (!ufile->ucontext) goto done; while (!list_empty(&ufile->uobjects) && !__uverbs_cleanup_ufile(ufile, reason)) { } if (WARN_ON(!list_empty(&ufile->uobjects))) __uverbs_cleanup_ufile(ufile, RDMA_REMOVE_DRIVER_FAILURE); ufile_destroy_ucontext(ufile, reason); done: up_write(&ufile->hw_destroy_rwsem); } const struct uverbs_obj_type_class uverbs_fd_class = { .alloc_begin = alloc_begin_fd_uobject, .lookup_get = lookup_get_fd_uobject, .alloc_commit = alloc_commit_fd_uobject, .alloc_abort = alloc_abort_fd_uobject, .lookup_put = lookup_put_fd_uobject, .destroy_hw = destroy_hw_fd_uobject, .remove_handle = remove_handle_fd_uobject, }; EXPORT_SYMBOL(uverbs_fd_class); struct ib_uobject * uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access, s64 id, struct uverbs_attr_bundle *attrs) { const struct uverbs_api_object *obj = uapi_get_object(attrs->ufile->device->uapi, object_id); switch (access) { case UVERBS_ACCESS_READ: return rdma_lookup_get_uobject(obj, attrs->ufile, id, UVERBS_LOOKUP_READ, attrs); case UVERBS_ACCESS_DESTROY: /* Actual destruction is done inside uverbs_handle_method */ return rdma_lookup_get_uobject(obj, attrs->ufile, id, UVERBS_LOOKUP_DESTROY, attrs); case UVERBS_ACCESS_WRITE: return rdma_lookup_get_uobject(obj, attrs->ufile, id, UVERBS_LOOKUP_WRITE, attrs); case UVERBS_ACCESS_NEW: return rdma_alloc_begin_uobject(obj, attrs); default: WARN_ON(true); return ERR_PTR(-EOPNOTSUPP); } } void uverbs_finalize_object(struct ib_uobject *uobj, enum uverbs_obj_access access, bool hw_obj_valid, bool commit, struct uverbs_attr_bundle *attrs) { /* * refcounts should be handled at the object level and not at the * uobject level. Refcounts of the objects themselves are done in * handlers. */ switch (access) { case UVERBS_ACCESS_READ: rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ); break; case UVERBS_ACCESS_WRITE: rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); break; case UVERBS_ACCESS_DESTROY: if (uobj) rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); break; case UVERBS_ACCESS_NEW: if (commit) rdma_alloc_commit_uobject(uobj, attrs); else rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid); break; default: WARN_ON(true); } }
linux-master
drivers/infiniband/core/rdma_core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Trace points for core RDMA functions. * * Author: Chuck Lever <chuck.lever@oracle.com> * * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. */ #define CREATE_TRACE_POINTS #include <trace/events/rdma_core.h>
linux-master
drivers/infiniband/core/trace.c
/* * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2009 HNR Consulting. All rights reserved. * Copyright (c) 2014,2018 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/security.h> #include <linux/xarray.h> #include <rdma/ib_cache.h> #include "mad_priv.h" #include "core_priv.h" #include "mad_rmpp.h" #include "smi.h" #include "opa_smi.h" #include "agent.h" #define CREATE_TRACE_POINTS #include <trace/events/ib_mad.h> #ifdef CONFIG_TRACEPOINTS static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_qp_info *qp_info, struct trace_event_raw_ib_mad_send_template *entry) { struct ib_ud_wr *wr = &mad_send_wr->send_wr; struct rdma_ah_attr attr = {}; rdma_query_ah(wr->ah, &attr); /* These are common */ entry->sl = attr.sl; entry->rqpn = wr->remote_qpn; entry->rqkey = wr->remote_qkey; entry->dlid = rdma_ah_get_dlid(&attr); } #endif static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; module_param_named(send_queue_size, mad_sendq_size, int, 0444); MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); module_param_named(recv_queue_size, mad_recvq_size, int, 0444); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); static DEFINE_XARRAY_ALLOC1(ib_mad_clients); static u32 ib_mad_client_next; static struct list_head ib_mad_port_list; /* Port list lock */ static DEFINE_SPINLOCK(ib_mad_port_list_lock); /* Forward declarations */ static int method_in_use(struct ib_mad_mgmt_method_table **method, struct ib_mad_reg_req *mad_reg_req); static void remove_mad_reg_req(struct ib_mad_agent_private *priv); static struct ib_mad_agent_private *find_mad_agent( struct ib_mad_port_private *port_priv, const struct ib_mad_hdr *mad); static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad); static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); static void timeout_sends(struct work_struct *work); static void local_completions(struct work_struct *work); static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv, u8 mgmt_class); static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv); static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, struct ib_wc *wc); static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); /* * Returns a ib_mad_port_private structure or NULL for a device/port * Assumes ib_mad_port_list_lock is being held */ static inline struct ib_mad_port_private * __ib_get_mad_port(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *entry; list_for_each_entry(entry, &ib_mad_port_list, port_list) { if (entry->device == device && entry->port_num == port_num) return entry; } return NULL; } /* * Wrapper function to return a ib_mad_port_private structure or NULL * for a device/port */ static inline struct ib_mad_port_private * ib_get_mad_port(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *entry; unsigned long flags; spin_lock_irqsave(&ib_mad_port_list_lock, flags); entry = __ib_get_mad_port(device, port_num); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); return entry; } static inline u8 convert_mgmt_class(u8 mgmt_class) { /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 0 : mgmt_class; } static int get_spl_qp_index(enum ib_qp_type qp_type) { switch (qp_type) { case IB_QPT_SMI: return 0; case IB_QPT_GSI: return 1; default: return -1; } } static int vendor_class_index(u8 mgmt_class) { return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; } static int is_vendor_class(u8 mgmt_class) { if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) return 0; return 1; } static int is_vendor_oui(char *oui) { if (oui[0] || oui[1] || oui[2]) return 1; return 0; } static int is_vendor_method_in_use( struct ib_mad_mgmt_vendor_class *vendor_class, struct ib_mad_reg_req *mad_reg_req) { struct ib_mad_mgmt_method_table *method; int i; for (i = 0; i < MAX_MGMT_OUI; i++) { if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { method = vendor_class->method_table[i]; if (method) { if (method_in_use(&method, mad_reg_req)) return 1; else break; } } } return 0; } int ib_response_mad(const struct ib_mad_hdr *hdr) { return ((hdr->method & IB_MGMT_METHOD_RESP) || (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); } EXPORT_SYMBOL(ib_response_mad); /* * ib_register_mad_agent - Register to send/receive MADs * * Context: Process context. */ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, u32 port_num, enum ib_qp_type qp_type, struct ib_mad_reg_req *mad_reg_req, u8 rmpp_version, ib_mad_send_handler send_handler, ib_mad_recv_handler recv_handler, void *context, u32 registration_flags) { struct ib_mad_port_private *port_priv; struct ib_mad_agent *ret = ERR_PTR(-EINVAL); struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_reg_req *reg_req = NULL; struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; struct ib_mad_mgmt_method_table *method; int ret2, qpn; u8 mgmt_class, vclass; if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) || (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num))) return ERR_PTR(-EPROTONOSUPPORT); /* Validate parameters */ qpn = get_spl_qp_index(qp_type); if (qpn == -1) { dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n", __func__, qp_type); goto error1; } if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { dev_dbg_ratelimited(&device->dev, "%s: invalid RMPP Version %u\n", __func__, rmpp_version); goto error1; } /* Validate MAD registration request if supplied */ if (mad_reg_req) { if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { dev_dbg_ratelimited(&device->dev, "%s: invalid Class Version %u\n", __func__, mad_reg_req->mgmt_class_version); goto error1; } if (!recv_handler) { dev_dbg_ratelimited(&device->dev, "%s: no recv_handler\n", __func__); goto error1; } if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { /* * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only * one in this range currently allowed */ if (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { dev_dbg_ratelimited(&device->dev, "%s: Invalid Mgmt Class 0x%x\n", __func__, mad_reg_req->mgmt_class); goto error1; } } else if (mad_reg_req->mgmt_class == 0) { /* * Class 0 is reserved in IBA and is used for * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE */ dev_dbg_ratelimited(&device->dev, "%s: Invalid Mgmt Class 0\n", __func__); goto error1; } else if (is_vendor_class(mad_reg_req->mgmt_class)) { /* * If class is in "new" vendor range, * ensure supplied OUI is not zero */ if (!is_vendor_oui(mad_reg_req->oui)) { dev_dbg_ratelimited(&device->dev, "%s: No OUI specified for class 0x%x\n", __func__, mad_reg_req->mgmt_class); goto error1; } } /* Make sure class supplied is consistent with RMPP */ if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (rmpp_version) { dev_dbg_ratelimited(&device->dev, "%s: RMPP version for non-RMPP class 0x%x\n", __func__, mad_reg_req->mgmt_class); goto error1; } } /* Make sure class supplied is consistent with QP type */ if (qp_type == IB_QPT_SMI) { if ((mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) && (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { dev_dbg_ratelimited(&device->dev, "%s: Invalid SM QP type: class 0x%x\n", __func__, mad_reg_req->mgmt_class); goto error1; } } else { if ((mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { dev_dbg_ratelimited(&device->dev, "%s: Invalid GS QP type: class 0x%x\n", __func__, mad_reg_req->mgmt_class); goto error1; } } } else { /* No registration request supplied */ if (!send_handler) goto error1; if (registration_flags & IB_MAD_USER_RMPP) goto error1; } /* Validate device and port */ port_priv = ib_get_mad_port(device, port_num); if (!port_priv) { dev_dbg_ratelimited(&device->dev, "%s: Invalid port %u\n", __func__, port_num); ret = ERR_PTR(-ENODEV); goto error1; } /* Verify the QP requested is supported. For example, Ethernet devices * will not have QP0. */ if (!port_priv->qp_info[qpn].qp) { dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n", __func__, qpn); ret = ERR_PTR(-EPROTONOSUPPORT); goto error1; } /* Allocate structures */ mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); if (!mad_agent_priv) { ret = ERR_PTR(-ENOMEM); goto error1; } if (mad_reg_req) { reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); if (!reg_req) { ret = ERR_PTR(-ENOMEM); goto error3; } } /* Now, fill in the various structures */ mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; mad_agent_priv->reg_req = reg_req; mad_agent_priv->agent.rmpp_version = rmpp_version; mad_agent_priv->agent.device = device; mad_agent_priv->agent.recv_handler = recv_handler; mad_agent_priv->agent.send_handler = send_handler; mad_agent_priv->agent.context = context; mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_agent_priv->agent.port_num = port_num; mad_agent_priv->agent.flags = registration_flags; spin_lock_init(&mad_agent_priv->lock); INIT_LIST_HEAD(&mad_agent_priv->send_list); INIT_LIST_HEAD(&mad_agent_priv->wait_list); INIT_LIST_HEAD(&mad_agent_priv->done_list); INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); INIT_LIST_HEAD(&mad_agent_priv->local_list); INIT_WORK(&mad_agent_priv->local_work, local_completions); refcount_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); if (ret2) { ret = ERR_PTR(ret2); goto error4; } /* * The mlx4 driver uses the top byte to distinguish which virtual * function generated the MAD, so we must avoid using it. */ ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid, mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1), &ib_mad_client_next, GFP_KERNEL); if (ret2 < 0) { ret = ERR_PTR(ret2); goto error5; } /* * Make sure MAD registration (if supplied) * is non overlapping with any existing ones */ spin_lock_irq(&port_priv->reg_lock); if (mad_reg_req) { mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); if (!is_vendor_class(mgmt_class)) { class = port_priv->version[mad_reg_req-> mgmt_class_version].class; if (class) { method = class->method_table[mgmt_class]; if (method) { if (method_in_use(&method, mad_reg_req)) goto error6; } } ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, mgmt_class); } else { /* "New" vendor class range */ vendor = port_priv->version[mad_reg_req-> mgmt_class_version].vendor; if (vendor) { vclass = vendor_class_index(mgmt_class); vendor_class = vendor->vendor_class[vclass]; if (vendor_class) { if (is_vendor_method_in_use( vendor_class, mad_reg_req)) goto error6; } } ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); } if (ret2) { ret = ERR_PTR(ret2); goto error6; } } spin_unlock_irq(&port_priv->reg_lock); trace_ib_mad_create_agent(mad_agent_priv); return &mad_agent_priv->agent; error6: spin_unlock_irq(&port_priv->reg_lock); xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); error5: ib_mad_agent_security_cleanup(&mad_agent_priv->agent); error4: kfree(reg_req); error3: kfree(mad_agent_priv); error1: return ret; } EXPORT_SYMBOL(ib_register_mad_agent); static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) { if (refcount_dec_and_test(&mad_agent_priv->refcount)) complete(&mad_agent_priv->comp); } static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_port_private *port_priv; /* Note that we could still be handling received MADs */ trace_ib_mad_unregister_agent(mad_agent_priv); /* * Canceling all sends results in dropping received response * MADs, preventing us from queuing additional work */ cancel_mads(mad_agent_priv); port_priv = mad_agent_priv->qp_info->port_priv; cancel_delayed_work(&mad_agent_priv->timed_work); spin_lock_irq(&port_priv->reg_lock); remove_mad_reg_req(mad_agent_priv); spin_unlock_irq(&port_priv->reg_lock); xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); flush_workqueue(port_priv->wq); deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); ib_cancel_rmpp_recvs(mad_agent_priv); ib_mad_agent_security_cleanup(&mad_agent_priv->agent); kfree(mad_agent_priv->reg_req); kfree_rcu(mad_agent_priv, rcu); } /* * ib_unregister_mad_agent - Unregisters a client from using MAD services * * Context: Process context. */ void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) { struct ib_mad_agent_private *mad_agent_priv; mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, agent); unregister_mad_agent(mad_agent_priv); } EXPORT_SYMBOL(ib_unregister_mad_agent); static void dequeue_mad(struct ib_mad_list_head *mad_list) { struct ib_mad_queue *mad_queue; unsigned long flags; mad_queue = mad_list->mad_queue; spin_lock_irqsave(&mad_queue->lock, flags); list_del(&mad_list->list); mad_queue->count--; spin_unlock_irqrestore(&mad_queue->lock, flags); } static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, u16 pkey_index, u32 port_num, struct ib_wc *wc) { memset(wc, 0, sizeof *wc); wc->wr_cqe = cqe; wc->status = IB_WC_SUCCESS; wc->opcode = IB_WC_RECV; wc->pkey_index = pkey_index; wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); wc->src_qp = IB_QP0; wc->qp = qp; wc->slid = slid; wc->sl = 0; wc->dlid_path_bits = 0; wc->port_num = port_num; } static size_t mad_priv_size(const struct ib_mad_private *mp) { return sizeof(struct ib_mad_private) + mp->mad_size; } static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) { size_t size = sizeof(struct ib_mad_private) + mad_size; struct ib_mad_private *ret = kzalloc(size, flags); if (ret) ret->mad_size = mad_size; return ret; } static size_t port_mad_size(const struct ib_mad_port_private *port_priv) { return rdma_max_mad_size(port_priv->device, port_priv->port_num); } static size_t mad_priv_dma_size(const struct ib_mad_private *mp) { return sizeof(struct ib_grh) + mp->mad_size; } /* * Return 0 if SMP is to be sent * Return 1 if SMP was consumed locally (whether or not solicited) * Return < 0 if error */ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr) { int ret = 0; struct ib_smp *smp = mad_send_wr->send_buf.mad; struct opa_smp *opa_smp = (struct opa_smp *)smp; unsigned long flags; struct ib_mad_local_private *local; struct ib_mad_private *mad_priv; struct ib_mad_port_private *port_priv; struct ib_mad_agent_private *recv_mad_agent = NULL; struct ib_device *device = mad_agent_priv->agent.device; u32 port_num; struct ib_wc mad_wc; struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); u16 out_mad_pkey_index = 0; u16 drslid; bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, mad_agent_priv->qp_info->port_priv->port_num); if (rdma_cap_ib_switch(device) && smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) port_num = send_wr->port_num; else port_num = mad_agent_priv->agent.port_num; /* * Directed route handling starts if the initial LID routed part of * a request or the ending LID routed part of a response is empty. * If we are at the start of the LID routed part, don't update the * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. */ if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { u32 opa_drslid; trace_ib_mad_handle_out_opa_smi(opa_smp); if ((opa_get_smp_direction(opa_smp) ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == OPA_LID_PERMISSIVE && opa_smi_handle_dr_smp_send(opa_smp, rdma_cap_ib_switch(device), port_num) == IB_SMI_DISCARD) { ret = -EINVAL; dev_err(&device->dev, "OPA Invalid directed route\n"); goto out; } opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && opa_drslid & 0xffff0000) { ret = -EINVAL; dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", opa_drslid); goto out; } drslid = (u16)(opa_drslid & 0x0000ffff); /* Check to post send on QP or process locally */ if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) goto out; } else { trace_ib_mad_handle_out_ib_smi(smp); if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == IB_LID_PERMISSIVE && smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == IB_SMI_DISCARD) { ret = -EINVAL; dev_err(&device->dev, "Invalid directed route\n"); goto out; } drslid = be16_to_cpu(smp->dr_slid); /* Check to post send on QP or process locally */ if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) goto out; } local = kmalloc(sizeof *local, GFP_ATOMIC); if (!local) { ret = -ENOMEM; goto out; } local->mad_priv = NULL; local->recv_mad_agent = NULL; mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); if (!mad_priv) { ret = -ENOMEM; kfree(local); goto out; } build_smp_wc(mad_agent_priv->agent.qp, send_wr->wr.wr_cqe, drslid, send_wr->pkey_index, send_wr->port_num, &mad_wc); if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { mad_wc.byte_len = mad_send_wr->send_buf.hdr_len + mad_send_wr->send_buf.data_len + sizeof(struct ib_grh); } /* No GRH for DR SMP */ ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL, (const struct ib_mad *)smp, (struct ib_mad *)mad_priv->mad, &mad_size, &out_mad_pkey_index); switch (ret) { case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && mad_agent_priv->agent.recv_handler) { local->mad_priv = mad_priv; local->recv_mad_agent = mad_agent_priv; /* * Reference MAD agent until receive * side of local completion handled */ refcount_inc(&mad_agent_priv->refcount); } else kfree(mad_priv); break; case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: kfree(mad_priv); break; case IB_MAD_RESULT_SUCCESS: /* Treat like an incoming receive MAD */ port_priv = ib_get_mad_port(mad_agent_priv->agent.device, mad_agent_priv->agent.port_num); if (port_priv) { memcpy(mad_priv->mad, smp, mad_priv->mad_size); recv_mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)mad_priv->mad); } if (!port_priv || !recv_mad_agent) { /* * No receiving agent so drop packet and * generate send completion. */ kfree(mad_priv); break; } local->mad_priv = mad_priv; local->recv_mad_agent = recv_mad_agent; break; default: kfree(mad_priv); kfree(local); ret = -EINVAL; goto out; } local->mad_send_wr = mad_send_wr; if (opa) { local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; local->return_wc_byte_len = mad_size; } /* Reference MAD agent until send side of local completion handled */ refcount_inc(&mad_agent_priv->refcount); /* Queue local completion to local list */ spin_lock_irqsave(&mad_agent_priv->lock, flags); list_add_tail(&local->completion_list, &mad_agent_priv->local_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); queue_work(mad_agent_priv->qp_info->port_priv->wq, &mad_agent_priv->local_work); ret = 1; out: return ret; } static int get_pad_size(int hdr_len, int data_len, size_t mad_size) { int seg_size, pad; seg_size = mad_size - hdr_len; if (data_len && seg_size) { pad = seg_size - data_len % seg_size; return pad == seg_size ? 0 : pad; } else return seg_size; } static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_segment *s, *t; list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { list_del(&s->list); kfree(s); } } static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, size_t mad_size, gfp_t gfp_mask) { struct ib_mad_send_buf *send_buf = &send_wr->send_buf; struct ib_rmpp_mad *rmpp_mad = send_buf->mad; struct ib_rmpp_segment *seg = NULL; int left, seg_size, pad; send_buf->seg_size = mad_size - send_buf->hdr_len; send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; seg_size = send_buf->seg_size; pad = send_wr->pad; /* Allocate data segments. */ for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask); if (!seg) { free_send_rmpp_list(send_wr); return -ENOMEM; } seg->num = ++send_buf->seg_count; list_add_tail(&seg->list, &send_wr->rmpp_list); } /* Zero any padding */ if (pad) memset(seg->data + seg_size - pad, 0, pad); rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> agent.rmpp_version; rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); send_wr->cur_seg = container_of(send_wr->rmpp_list.next, struct ib_rmpp_segment, list); send_wr->last_ack_seg = send_wr->cur_seg; return 0; } int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) { return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); } EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, int rmpp_active, int hdr_len, int data_len, gfp_t gfp_mask, u8 base_version) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; int pad, message_size, ret, size; void *buf; size_t mad_size; bool opa; mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, agent); opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); if (opa && base_version == OPA_MGMT_BASE_VERSION) mad_size = sizeof(struct opa_mad); else mad_size = sizeof(struct ib_mad); pad = get_pad_size(hdr_len, data_len, mad_size); message_size = hdr_len + data_len + pad; if (ib_mad_kernel_rmpp_agent(mad_agent)) { if (!rmpp_active && message_size > mad_size) return ERR_PTR(-EINVAL); } else if (rmpp_active || message_size > mad_size) return ERR_PTR(-EINVAL); size = rmpp_active ? hdr_len : mad_size; buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); if (!buf) return ERR_PTR(-ENOMEM); mad_send_wr = buf + size; INIT_LIST_HEAD(&mad_send_wr->rmpp_list); mad_send_wr->send_buf.mad = buf; mad_send_wr->send_buf.hdr_len = hdr_len; mad_send_wr->send_buf.data_len = data_len; mad_send_wr->pad = pad; mad_send_wr->mad_agent_priv = mad_agent_priv; mad_send_wr->sg_list[0].length = hdr_len; mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; /* OPA MADs don't have to be the full 2048 bytes */ if (opa && base_version == OPA_MGMT_BASE_VERSION && data_len < mad_size - hdr_len) mad_send_wr->sg_list[1].length = data_len; else mad_send_wr->sg_list[1].length = mad_size - hdr_len; mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; mad_send_wr->mad_list.cqe.done = ib_mad_send_done; mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; mad_send_wr->send_wr.wr.num_sge = 2; mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; mad_send_wr->send_wr.remote_qpn = remote_qpn; mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; mad_send_wr->send_wr.pkey_index = pkey_index; if (rmpp_active) { ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); if (ret) { kfree(buf); return ERR_PTR(ret); } } mad_send_wr->send_buf.mad_agent = mad_agent; refcount_inc(&mad_agent_priv->refcount); return &mad_send_wr->send_buf; } EXPORT_SYMBOL(ib_create_send_mad); int ib_get_mad_data_offset(u8 mgmt_class) { if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) return IB_MGMT_SA_HDR; else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || (mgmt_class == IB_MGMT_CLASS_BIS)) return IB_MGMT_DEVICE_HDR; else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) return IB_MGMT_VENDOR_HDR; else return IB_MGMT_MAD_HDR; } EXPORT_SYMBOL(ib_get_mad_data_offset); int ib_is_mad_class_rmpp(u8 mgmt_class) { if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || (mgmt_class == IB_MGMT_CLASS_BIS) || ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) return 1; return 0; } EXPORT_SYMBOL(ib_is_mad_class_rmpp); void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) { struct ib_mad_send_wr_private *mad_send_wr; struct list_head *list; mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); list = &mad_send_wr->cur_seg->list; if (mad_send_wr->cur_seg->num < seg_num) { list_for_each_entry(mad_send_wr->cur_seg, list, list) if (mad_send_wr->cur_seg->num == seg_num) break; } else if (mad_send_wr->cur_seg->num > seg_num) { list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) if (mad_send_wr->cur_seg->num == seg_num) break; } return mad_send_wr->cur_seg->data; } EXPORT_SYMBOL(ib_get_rmpp_segment); static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) { if (mad_send_wr->send_buf.seg_count) return ib_get_rmpp_segment(&mad_send_wr->send_buf, mad_send_wr->seg_num); else return mad_send_wr->send_buf.mad + mad_send_wr->send_buf.hdr_len; } void ib_free_send_mad(struct ib_mad_send_buf *send_buf) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; mad_agent_priv = container_of(send_buf->mad_agent, struct ib_mad_agent_private, agent); mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); free_send_rmpp_list(mad_send_wr); kfree(send_buf->mad); deref_mad_agent(mad_agent_priv); } EXPORT_SYMBOL(ib_free_send_mad); int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_qp_info *qp_info; struct list_head *list; struct ib_mad_agent *mad_agent; struct ib_sge *sge; unsigned long flags; int ret; /* Set WR ID to find mad_send_wr upon completion */ qp_info = mad_send_wr->mad_agent_priv->qp_info; mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; mad_send_wr->mad_list.cqe.done = ib_mad_send_done; mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; mad_agent = mad_send_wr->send_buf.mad_agent; sge = mad_send_wr->sg_list; sge[0].addr = ib_dma_map_single(mad_agent->device, mad_send_wr->send_buf.mad, sge[0].length, DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) return -ENOMEM; mad_send_wr->header_mapping = sge[0].addr; sge[1].addr = ib_dma_map_single(mad_agent->device, ib_get_payload(mad_send_wr), sge[1].length, DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { ib_dma_unmap_single(mad_agent->device, mad_send_wr->header_mapping, sge[0].length, DMA_TO_DEVICE); return -ENOMEM; } mad_send_wr->payload_mapping = sge[1].addr; spin_lock_irqsave(&qp_info->send_queue.lock, flags); if (qp_info->send_queue.count < qp_info->send_queue.max_active) { trace_ib_mad_ib_send_mad(mad_send_wr, qp_info); ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, NULL); list = &qp_info->send_queue.list; } else { ret = 0; list = &qp_info->overflow_list; } if (!ret) { qp_info->send_queue.count++; list_add_tail(&mad_send_wr->mad_list.list, list); } spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); if (ret) { ib_dma_unmap_single(mad_agent->device, mad_send_wr->header_mapping, sge[0].length, DMA_TO_DEVICE); ib_dma_unmap_single(mad_agent->device, mad_send_wr->payload_mapping, sge[1].length, DMA_TO_DEVICE); } return ret; } /* * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated * with the registered client */ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, struct ib_mad_send_buf **bad_send_buf) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_buf *next_send_buf; struct ib_mad_send_wr_private *mad_send_wr; unsigned long flags; int ret = -EINVAL; /* Walk list of send WRs and post each on send list */ for (; send_buf; send_buf = next_send_buf) { mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_agent_priv = mad_send_wr->mad_agent_priv; ret = ib_mad_enforce_security(mad_agent_priv, mad_send_wr->send_wr.pkey_index); if (ret) goto error; if (!send_buf->mad_agent->send_handler || (send_buf->timeout_ms && !send_buf->mad_agent->recv_handler)) { ret = -EINVAL; goto error; } if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { if (mad_agent_priv->agent.rmpp_version) { ret = -EINVAL; goto error; } } /* * Save pointer to next work request to post in case the * current one completes, and the user modifies the work * request associated with the completion */ next_send_buf = send_buf->next; mad_send_wr->send_wr.ah = send_buf->ah; if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { ret = handle_outgoing_dr_smp(mad_agent_priv, mad_send_wr); if (ret < 0) /* error */ goto error; else if (ret == 1) /* locally consumed */ continue; } mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; /* Timeout will be updated after send completes */ mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); mad_send_wr->max_retries = send_buf->retries; mad_send_wr->retries_left = send_buf->retries; send_buf->retries = 0; /* Reference for work request to QP + response */ mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); mad_send_wr->status = IB_WC_SUCCESS; /* Reference MAD agent until send completes */ refcount_inc(&mad_agent_priv->refcount); spin_lock_irqsave(&mad_agent_priv->lock, flags); list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->send_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { ret = ib_send_rmpp_mad(mad_send_wr); if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) ret = ib_send_mad(mad_send_wr); } else ret = ib_send_mad(mad_send_wr); if (ret < 0) { /* Fail send request */ spin_lock_irqsave(&mad_agent_priv->lock, flags); list_del(&mad_send_wr->agent_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); deref_mad_agent(mad_agent_priv); goto error; } } return 0; error: if (bad_send_buf) *bad_send_buf = send_buf; return ret; } EXPORT_SYMBOL(ib_post_send_mad); /* * ib_free_recv_mad - Returns data buffers used to receive * a MAD to the access layer */ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *priv; struct list_head free_list; INIT_LIST_HEAD(&free_list); list_splice_init(&mad_recv_wc->rmpp_list, &free_list); list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, &free_list, list) { mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, recv_buf); mad_priv_hdr = container_of(mad_recv_wc, struct ib_mad_private_header, recv_wc); priv = container_of(mad_priv_hdr, struct ib_mad_private, header); kfree(priv); } } EXPORT_SYMBOL(ib_free_recv_mad); static int method_in_use(struct ib_mad_mgmt_method_table **method, struct ib_mad_reg_req *mad_reg_req) { int i; for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { if ((*method)->agent[i]) { pr_err("Method %d already in use\n", i); return -EINVAL; } } return 0; } static int allocate_method_table(struct ib_mad_mgmt_method_table **method) { /* Allocate management method table */ *method = kzalloc(sizeof **method, GFP_ATOMIC); return (*method) ? 0 : (-ENOMEM); } /* * Check to see if there are any methods still in use */ static int check_method_table(struct ib_mad_mgmt_method_table *method) { int i; for (i = 0; i < IB_MGMT_MAX_METHODS; i++) if (method->agent[i]) return 1; return 0; } /* * Check to see if there are any method tables for this class still in use */ static int check_class_table(struct ib_mad_mgmt_class_table *class) { int i; for (i = 0; i < MAX_MGMT_CLASS; i++) if (class->method_table[i]) return 1; return 0; } static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) { int i; for (i = 0; i < MAX_MGMT_OUI; i++) if (vendor_class->method_table[i]) return 1; return 0; } static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, const char *oui) { int i; for (i = 0; i < MAX_MGMT_OUI; i++) /* Is there matching OUI for this vendor class ? */ if (!memcmp(vendor_class->oui[i], oui, 3)) return i; return -1; } static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) { int i; for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) if (vendor->vendor_class[i]) return 1; return 0; } static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, struct ib_mad_agent_private *agent) { int i; /* Remove any methods for this mad agent */ for (i = 0; i < IB_MGMT_MAX_METHODS; i++) if (method->agent[i] == agent) method->agent[i] = NULL; } static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv, u8 mgmt_class) { struct ib_mad_port_private *port_priv; struct ib_mad_mgmt_class_table **class; struct ib_mad_mgmt_method_table **method; int i, ret; port_priv = agent_priv->qp_info->port_priv; class = &port_priv->version[mad_reg_req->mgmt_class_version].class; if (!*class) { /* Allocate management class table for "new" class version */ *class = kzalloc(sizeof **class, GFP_ATOMIC); if (!*class) { ret = -ENOMEM; goto error1; } /* Allocate method table for this management class */ method = &(*class)->method_table[mgmt_class]; if ((ret = allocate_method_table(method))) goto error2; } else { method = &(*class)->method_table[mgmt_class]; if (!*method) { /* Allocate method table for this management class */ if ((ret = allocate_method_table(method))) goto error1; } } /* Now, make sure methods are not already in use */ if (method_in_use(method, mad_reg_req)) goto error3; /* Finally, add in methods being registered */ for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) (*method)->agent[i] = agent_priv; return 0; error3: /* Remove any methods for this mad agent */ remove_methods_mad_agent(*method, agent_priv); /* Now, check to see if there are any methods in use */ if (!check_method_table(*method)) { /* If not, release management method table */ kfree(*method); *method = NULL; } ret = -EINVAL; goto error1; error2: kfree(*class); *class = NULL; error1: return ret; } static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv) { struct ib_mad_port_private *port_priv; struct ib_mad_mgmt_vendor_class_table **vendor_table; struct ib_mad_mgmt_vendor_class_table *vendor = NULL; struct ib_mad_mgmt_vendor_class *vendor_class = NULL; struct ib_mad_mgmt_method_table **method; int i, ret = -ENOMEM; u8 vclass; /* "New" vendor (with OUI) class */ vclass = vendor_class_index(mad_reg_req->mgmt_class); port_priv = agent_priv->qp_info->port_priv; vendor_table = &port_priv->version[ mad_reg_req->mgmt_class_version].vendor; if (!*vendor_table) { /* Allocate mgmt vendor class table for "new" class version */ vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); if (!vendor) goto error1; *vendor_table = vendor; } if (!(*vendor_table)->vendor_class[vclass]) { /* Allocate table for this management vendor class */ vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); if (!vendor_class) goto error2; (*vendor_table)->vendor_class[vclass] = vendor_class; } for (i = 0; i < MAX_MGMT_OUI; i++) { /* Is there matching OUI for this vendor class ? */ if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], mad_reg_req->oui, 3)) { method = &(*vendor_table)->vendor_class[ vclass]->method_table[i]; if (!*method) goto error3; goto check_in_use; } } for (i = 0; i < MAX_MGMT_OUI; i++) { /* OUI slot available ? */ if (!is_vendor_oui((*vendor_table)->vendor_class[ vclass]->oui[i])) { method = &(*vendor_table)->vendor_class[ vclass]->method_table[i]; /* Allocate method table for this OUI */ if (!*method) { ret = allocate_method_table(method); if (ret) goto error3; } memcpy((*vendor_table)->vendor_class[vclass]->oui[i], mad_reg_req->oui, 3); goto check_in_use; } } dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); goto error3; check_in_use: /* Now, make sure methods are not already in use */ if (method_in_use(method, mad_reg_req)) goto error4; /* Finally, add in methods being registered */ for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) (*method)->agent[i] = agent_priv; return 0; error4: /* Remove any methods for this mad agent */ remove_methods_mad_agent(*method, agent_priv); /* Now, check to see if there are any methods in use */ if (!check_method_table(*method)) { /* If not, release management method table */ kfree(*method); *method = NULL; } ret = -EINVAL; error3: if (vendor_class) { (*vendor_table)->vendor_class[vclass] = NULL; kfree(vendor_class); } error2: if (vendor) { *vendor_table = NULL; kfree(vendor); } error1: return ret; } static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) { struct ib_mad_port_private *port_priv; struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_method_table *method; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; int index; u8 mgmt_class; /* * Was MAD registration request supplied * with original registration ? */ if (!agent_priv->reg_req) goto out; port_priv = agent_priv->qp_info->port_priv; mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); class = port_priv->version[ agent_priv->reg_req->mgmt_class_version].class; if (!class) goto vendor_check; method = class->method_table[mgmt_class]; if (method) { /* Remove any methods for this mad agent */ remove_methods_mad_agent(method, agent_priv); /* Now, check to see if there are any methods still in use */ if (!check_method_table(method)) { /* If not, release management method table */ kfree(method); class->method_table[mgmt_class] = NULL; /* Any management classes left ? */ if (!check_class_table(class)) { /* If not, release management class table */ kfree(class); port_priv->version[ agent_priv->reg_req-> mgmt_class_version].class = NULL; } } } vendor_check: if (!is_vendor_class(mgmt_class)) goto out; /* normalize mgmt_class to vendor range 2 */ mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); vendor = port_priv->version[ agent_priv->reg_req->mgmt_class_version].vendor; if (!vendor) goto out; vendor_class = vendor->vendor_class[mgmt_class]; if (vendor_class) { index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); if (index < 0) goto out; method = vendor_class->method_table[index]; if (method) { /* Remove any methods for this mad agent */ remove_methods_mad_agent(method, agent_priv); /* * Now, check to see if there are * any methods still in use */ if (!check_method_table(method)) { /* If not, release management method table */ kfree(method); vendor_class->method_table[index] = NULL; memset(vendor_class->oui[index], 0, 3); /* Any OUIs left ? */ if (!check_vendor_class(vendor_class)) { /* If not, release vendor class table */ kfree(vendor_class); vendor->vendor_class[mgmt_class] = NULL; /* Any other vendor classes left ? */ if (!check_vendor_table(vendor)) { kfree(vendor); port_priv->version[ agent_priv->reg_req-> mgmt_class_version]. vendor = NULL; } } } } } out: return; } static struct ib_mad_agent_private * find_mad_agent(struct ib_mad_port_private *port_priv, const struct ib_mad_hdr *mad_hdr) { struct ib_mad_agent_private *mad_agent = NULL; unsigned long flags; if (ib_response_mad(mad_hdr)) { u32 hi_tid; /* * Routing is based on high 32 bits of transaction ID * of MAD. */ hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; rcu_read_lock(); mad_agent = xa_load(&ib_mad_clients, hi_tid); if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount)) mad_agent = NULL; rcu_read_unlock(); } else { struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_method_table *method; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; const struct ib_vendor_mad *vendor_mad; int index; spin_lock_irqsave(&port_priv->reg_lock, flags); /* * Routing is based on version, class, and method * For "newer" vendor MADs, also based on OUI */ if (mad_hdr->class_version >= MAX_MGMT_VERSION) goto out; if (!is_vendor_class(mad_hdr->mgmt_class)) { class = port_priv->version[ mad_hdr->class_version].class; if (!class) goto out; if (convert_mgmt_class(mad_hdr->mgmt_class) >= ARRAY_SIZE(class->method_table)) goto out; method = class->method_table[convert_mgmt_class( mad_hdr->mgmt_class)]; if (method) mad_agent = method->agent[mad_hdr->method & ~IB_MGMT_METHOD_RESP]; } else { vendor = port_priv->version[ mad_hdr->class_version].vendor; if (!vendor) goto out; vendor_class = vendor->vendor_class[vendor_class_index( mad_hdr->mgmt_class)]; if (!vendor_class) goto out; /* Find matching OUI */ vendor_mad = (const struct ib_vendor_mad *)mad_hdr; index = find_vendor_oui(vendor_class, vendor_mad->oui); if (index == -1) goto out; method = vendor_class->method_table[index]; if (method) { mad_agent = method->agent[mad_hdr->method & ~IB_MGMT_METHOD_RESP]; } } if (mad_agent) refcount_inc(&mad_agent->refcount); out: spin_unlock_irqrestore(&port_priv->reg_lock, flags); } if (mad_agent && !mad_agent->agent.recv_handler) { dev_notice(&port_priv->device->dev, "No receive handler for client %p on port %u\n", &mad_agent->agent, port_priv->port_num); deref_mad_agent(mad_agent); mad_agent = NULL; } return mad_agent; } static int validate_mad(const struct ib_mad_hdr *mad_hdr, const struct ib_mad_qp_info *qp_info, bool opa) { int valid = 0; u32 qp_num = qp_info->qp->qp_num; /* Make sure MAD base version is understood */ if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { pr_err("MAD received with unsupported base version %u %s\n", mad_hdr->base_version, opa ? "(opa)" : ""); goto out; } /* Filter SMI packets sent to other than QP0 */ if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { if (qp_num == 0) valid = 1; } else { /* CM attributes other than ClassPortInfo only use Send method */ if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && (mad_hdr->method != IB_MGMT_METHOD_SEND)) goto out; /* Filter GSI packets sent to QP0 */ if (qp_num != 0) valid = 1; } out: return valid; } static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, const struct ib_mad_hdr *mad_hdr) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; return !mad_agent_priv->agent.rmpp_version || !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) || (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); } static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, const struct ib_mad_recv_wc *rwc) { return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == rwc->recv_buf.mad->mad_hdr.mgmt_class; } static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, const struct ib_mad_send_wr_private *wr, const struct ib_mad_recv_wc *rwc) { struct rdma_ah_attr attr; u8 send_resp, rcv_resp; union ib_gid sgid; struct ib_device *device = mad_agent_priv->agent.device; u32 port_num = mad_agent_priv->agent.port_num; u8 lmc; bool has_grh; send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); if (send_resp == rcv_resp) /* both requests, or both responses. GIDs different */ return 0; if (rdma_query_ah(wr->send_buf.ah, &attr)) /* Assume not equal, to avoid false positives. */ return 0; has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH); if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH)) /* one has GID, other does not. Assume different */ return 0; if (!send_resp && rcv_resp) { /* is request/response. */ if (!has_grh) { if (ib_get_cached_lmc(device, port_num, &lmc)) return 0; return (!lmc || !((rdma_ah_get_path_bits(&attr) ^ rwc->wc->dlid_path_bits) & ((1 << lmc) - 1))); } else { const struct ib_global_route *grh = rdma_ah_read_grh(&attr); if (rdma_query_gid(device, port_num, grh->sgid_index, &sgid)) return 0; return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 16); } } if (!has_grh) return rdma_ah_get_dlid(&attr) == rwc->wc->slid; else return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw, rwc->recv_buf.grh->sgid.raw, 16); } static inline int is_direct(u8 class) { return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); } struct ib_mad_send_wr_private* ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, const struct ib_mad_recv_wc *wc) { struct ib_mad_send_wr_private *wr; const struct ib_mad_hdr *mad_hdr; mad_hdr = &wc->recv_buf.mad->mad_hdr; list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { if ((wr->tid == mad_hdr->tid) && rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs. */ (is_direct(mad_hdr->mgmt_class) || rcv_has_same_gid(mad_agent_priv, wr, wc))) return (wr->status == IB_WC_SUCCESS) ? wr : NULL; } /* * It's possible to receive the response before we've * been notified that the send has completed */ list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && wr->tid == mad_hdr->tid && wr->timeout && rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs. */ (is_direct(mad_hdr->mgmt_class) || rcv_has_same_gid(mad_agent_priv, wr, wc))) /* Verify request has not been canceled */ return (wr->status == IB_WC_SUCCESS) ? wr : NULL; } return NULL; } void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) { mad_send_wr->timeout = 0; if (mad_send_wr->refcount == 1) list_move_tail(&mad_send_wr->agent_list, &mad_send_wr->mad_agent_priv->done_list); } static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; int ret; INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); ret = ib_mad_enforce_security(mad_agent_priv, mad_recv_wc->wc->pkey_index); if (ret) { ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); return; } list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc); if (!mad_recv_wc) { deref_mad_agent(mad_agent_priv); return; } } /* Complete corresponding request */ if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { /* user rmpp is in effect * and this is an active RMPP MAD */ mad_agent_priv->agent.recv_handler( &mad_agent_priv->agent, NULL, mad_recv_wc); deref_mad_agent(mad_agent_priv); } else { /* not user rmpp, revert to normal behavior and * drop the mad */ ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); return; } } else { ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); /* Defined behavior is to complete response before request */ mad_agent_priv->agent.recv_handler( &mad_agent_priv->agent, &mad_send_wr->send_buf, mad_recv_wc); deref_mad_agent(mad_agent_priv); mad_send_wc.status = IB_WC_SUCCESS; mad_send_wc.vendor_err = 0; mad_send_wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); } } else { mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, mad_recv_wc); deref_mad_agent(mad_agent_priv); } } static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, const struct ib_mad_qp_info *qp_info, const struct ib_wc *wc, u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response) { enum smi_forward_action retsmi; struct ib_smp *smp = (struct ib_smp *)recv->mad; trace_ib_mad_handle_ib_smi(smp); if (smi_handle_dr_smp_recv(smp, rdma_cap_ib_switch(port_priv->device), port_num, port_priv->device->phys_port_cnt) == IB_SMI_DISCARD) return IB_SMI_DISCARD; retsmi = smi_check_forward_dr_smp(smp); if (retsmi == IB_SMI_LOCAL) return IB_SMI_HANDLE; if (retsmi == IB_SMI_SEND) { /* don't forward */ if (smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(port_priv->device), port_num) == IB_SMI_DISCARD) return IB_SMI_DISCARD; if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) return IB_SMI_DISCARD; } else if (rdma_cap_ib_switch(port_priv->device)) { /* forward case for switches */ memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; response->header.recv_wc.recv_buf.grh = &response->grh; agent_send_response((const struct ib_mad_hdr *)response->mad, &response->grh, wc, port_priv->device, smi_get_fwd_port(smp), qp_info->qp->qp_num, response->mad_size, false); return IB_SMI_DISCARD; } return IB_SMI_HANDLE; } static bool generate_unmatched_resp(const struct ib_mad_private *recv, struct ib_mad_private *response, size_t *resp_len, bool opa) { const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; if (recv_hdr->method == IB_MGMT_METHOD_GET || recv_hdr->method == IB_MGMT_METHOD_SET) { memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; response->header.recv_wc.recv_buf.grh = &response->grh; resp_hdr->method = IB_MGMT_METHOD_GET_RESP; resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) resp_hdr->status |= IB_SMP_DIRECTION; if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) *resp_len = opa_get_smp_header_size( (struct opa_smp *)recv->mad); else *resp_len = sizeof(struct ib_mad_hdr); } return true; } else { return false; } } static enum smi_action handle_opa_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response) { enum smi_forward_action retsmi; struct opa_smp *smp = (struct opa_smp *)recv->mad; trace_ib_mad_handle_opa_smi(smp); if (opa_smi_handle_dr_smp_recv(smp, rdma_cap_ib_switch(port_priv->device), port_num, port_priv->device->phys_port_cnt) == IB_SMI_DISCARD) return IB_SMI_DISCARD; retsmi = opa_smi_check_forward_dr_smp(smp); if (retsmi == IB_SMI_LOCAL) return IB_SMI_HANDLE; if (retsmi == IB_SMI_SEND) { /* don't forward */ if (opa_smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(port_priv->device), port_num) == IB_SMI_DISCARD) return IB_SMI_DISCARD; if (opa_smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) return IB_SMI_DISCARD; } else if (rdma_cap_ib_switch(port_priv->device)) { /* forward case for switches */ memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.recv_buf.opa_mad = (struct opa_mad *)response->mad; response->header.recv_wc.recv_buf.grh = &response->grh; agent_send_response((const struct ib_mad_hdr *)response->mad, &response->grh, wc, port_priv->device, opa_smi_get_fwd_port(smp), qp_info->qp->qp_num, recv->header.wc.byte_len, true); return IB_SMI_DISCARD; } return IB_SMI_HANDLE; } static enum smi_action handle_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response, bool opa) { struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && mad_hdr->class_version == OPA_SM_CLASS_VERSION) return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, response); return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); } static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct ib_mad_port_private *port_priv = cq->cq_context; struct ib_mad_list_head *mad_list = container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); struct ib_mad_qp_info *qp_info; struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *recv, *response = NULL; struct ib_mad_agent_private *mad_agent; u32 port_num; int ret = IB_MAD_RESULT_SUCCESS; size_t mad_size; u16 resp_mad_pkey_index = 0; bool opa; if (list_empty_careful(&port_priv->port_list)) return; if (wc->status != IB_WC_SUCCESS) { /* * Receive errors indicate that the QP has entered the error * state - error handling/shutdown code will cleanup */ return; } qp_info = mad_list->mad_queue->qp_info; dequeue_mad(mad_list); opa = rdma_cap_opa_mad(qp_info->port_priv->device, qp_info->port_priv->port_num); mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); recv = container_of(mad_priv_hdr, struct ib_mad_private, header); ib_dma_unmap_single(port_priv->device, recv->header.mapping, mad_priv_dma_size(recv), DMA_FROM_DEVICE); /* Setup MAD receive work completion from "normal" work completion */ recv->header.wc = *wc; recv->header.recv_wc.wc = &recv->header.wc; if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); } else { recv->header.recv_wc.mad_len = sizeof(struct ib_mad); recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); } recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; recv->header.recv_wc.recv_buf.grh = &recv->grh; /* Validate MAD */ if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) goto out; trace_ib_mad_recv_done_handler(qp_info, wc, (struct ib_mad_hdr *)recv->mad); mad_size = recv->mad_size; response = alloc_mad_private(mad_size, GFP_KERNEL); if (!response) goto out; if (rdma_cap_ib_switch(port_priv->device)) port_num = wc->port_num; else port_num = port_priv->port_num; if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { if (handle_smi(port_priv, qp_info, wc, port_num, recv, response, opa) == IB_SMI_DISCARD) goto out; } /* Give driver "right of first refusal" on incoming MAD */ if (port_priv->device->ops.process_mad) { ret = port_priv->device->ops.process_mad( port_priv->device, 0, port_priv->port_num, wc, &recv->grh, (const struct ib_mad *)recv->mad, (struct ib_mad *)response->mad, &mad_size, &resp_mad_pkey_index); if (opa) wc->pkey_index = resp_mad_pkey_index; if (ret & IB_MAD_RESULT_SUCCESS) { if (ret & IB_MAD_RESULT_CONSUMED) goto out; if (ret & IB_MAD_RESULT_REPLY) { agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, port_priv->device, port_num, qp_info->qp->qp_num, mad_size, opa); goto out; } } } mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); if (mad_agent) { trace_ib_mad_recv_done_agent(mad_agent); ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); /* * recv is freed up in error cases in ib_mad_complete_recv * or via recv_handler in ib_mad_complete_recv() */ recv = NULL; } else if ((ret & IB_MAD_RESULT_SUCCESS) && generate_unmatched_resp(recv, response, &mad_size, opa)) { agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, port_priv->device, port_num, qp_info->qp->qp_num, mad_size, opa); } out: /* Post another receive request for this QP */ if (response) { ib_mad_post_receive_mads(qp_info, response); kfree(recv); } else ib_mad_post_receive_mads(qp_info, recv); } static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_send_wr_private *mad_send_wr; unsigned long delay; if (list_empty(&mad_agent_priv->wait_list)) { cancel_delayed_work(&mad_agent_priv->timed_work); } else { mad_send_wr = list_entry(mad_agent_priv->wait_list.next, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_agent_priv->timeout, mad_send_wr->timeout)) { mad_agent_priv->timeout = mad_send_wr->timeout; delay = mad_send_wr->timeout - jiffies; if ((long)delay <= 0) delay = 1; mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, &mad_agent_priv->timed_work, delay); } } } static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *temp_mad_send_wr; struct list_head *list_item; unsigned long delay; mad_agent_priv = mad_send_wr->mad_agent_priv; list_del(&mad_send_wr->agent_list); delay = mad_send_wr->timeout; mad_send_wr->timeout += jiffies; if (delay) { list_for_each_prev(list_item, &mad_agent_priv->wait_list) { temp_mad_send_wr = list_entry(list_item, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_send_wr->timeout, temp_mad_send_wr->timeout)) break; } } else { list_item = &mad_agent_priv->wait_list; } list_add(&mad_send_wr->agent_list, list_item); /* Reschedule a work item if we have a shorter timeout */ if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, &mad_agent_priv->timed_work, delay); } void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, unsigned long timeout_ms) { mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); wait_for_response(mad_send_wr); } /* * Process a send work completion */ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc) { struct ib_mad_agent_private *mad_agent_priv; unsigned long flags; int ret; mad_agent_priv = mad_send_wr->mad_agent_priv; spin_lock_irqsave(&mad_agent_priv->lock, flags); if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); if (ret == IB_RMPP_RESULT_CONSUMED) goto done; } else ret = IB_RMPP_RESULT_UNHANDLED; if (mad_send_wc->status != IB_WC_SUCCESS && mad_send_wr->status == IB_WC_SUCCESS) { mad_send_wr->status = mad_send_wc->status; mad_send_wr->refcount -= (mad_send_wr->timeout > 0); } if (--mad_send_wr->refcount > 0) { if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && mad_send_wr->status == IB_WC_SUCCESS) { wait_for_response(mad_send_wr); } goto done; } /* Remove send from MAD agent and notify client of completion */ list_del(&mad_send_wr->agent_list); adjust_timeout(mad_agent_priv); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (mad_send_wr->status != IB_WC_SUCCESS) mad_send_wc->status = mad_send_wr->status; if (ret == IB_RMPP_RESULT_INTERNAL) ib_rmpp_send_handler(mad_send_wc); else mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, mad_send_wc); /* Release reference on agent taken when sending */ deref_mad_agent(mad_agent_priv); return; done: spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) { struct ib_mad_port_private *port_priv = cq->cq_context; struct ib_mad_list_head *mad_list = container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; struct ib_mad_qp_info *qp_info; struct ib_mad_queue *send_queue; struct ib_mad_send_wc mad_send_wc; unsigned long flags; int ret; if (list_empty_careful(&port_priv->port_list)) return; if (wc->status != IB_WC_SUCCESS) { if (!ib_mad_send_error(port_priv, wc)) return; } mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); send_queue = mad_list->mad_queue; qp_info = send_queue->qp_info; trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv); trace_ib_mad_send_done_handler(mad_send_wr, wc); retry: ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, mad_send_wr->header_mapping, mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, mad_send_wr->payload_mapping, mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); queued_send_wr = NULL; spin_lock_irqsave(&send_queue->lock, flags); list_del(&mad_list->list); /* Move queued send to the send queue */ if (send_queue->count-- > send_queue->max_active) { mad_list = container_of(qp_info->overflow_list.next, struct ib_mad_list_head, list); queued_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); list_move_tail(&mad_list->list, &send_queue->list); } spin_unlock_irqrestore(&send_queue->lock, flags); mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_send_wc.status = wc->status; mad_send_wc.vendor_err = wc->vendor_err; ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); if (queued_send_wr) { trace_ib_mad_send_done_resend(queued_send_wr, qp_info); ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, NULL); if (ret) { dev_err(&port_priv->device->dev, "ib_post_send failed: %d\n", ret); mad_send_wr = queued_send_wr; wc->status = IB_WC_LOC_QP_OP_ERR; goto retry; } } } static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_list_head *mad_list; unsigned long flags; spin_lock_irqsave(&qp_info->send_queue.lock, flags); list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); mad_send_wr->retry = 1; } spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); } static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, struct ib_wc *wc) { struct ib_mad_list_head *mad_list = container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; struct ib_mad_send_wr_private *mad_send_wr; int ret; /* * Send errors will transition the QP to SQE - move * QP to RTS and repost flushed work requests */ mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); if (wc->status == IB_WC_WR_FLUSH_ERR) { if (mad_send_wr->retry) { /* Repost send */ mad_send_wr->retry = 0; trace_ib_mad_error_handler(mad_send_wr, qp_info); ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, NULL); if (!ret) return false; } } else { struct ib_qp_attr *attr; /* Transition QP to RTS and fail offending send */ attr = kmalloc(sizeof *attr, GFP_KERNEL); if (attr) { attr->qp_state = IB_QPS_RTS; attr->cur_qp_state = IB_QPS_SQE; ret = ib_modify_qp(qp_info->qp, attr, IB_QP_STATE | IB_QP_CUR_STATE); kfree(attr); if (ret) dev_err(&port_priv->device->dev, "%s - ib_modify_qp to RTS: %d\n", __func__, ret); else mark_sends_for_retry(qp_info); } } return true; } static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) { unsigned long flags; struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; struct ib_mad_send_wc mad_send_wc; struct list_head cancel_list; INIT_LIST_HEAD(&cancel_list); spin_lock_irqsave(&mad_agent_priv->lock, flags); list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, &mad_agent_priv->send_list, agent_list) { if (mad_send_wr->status == IB_WC_SUCCESS) { mad_send_wr->status = IB_WC_WR_FLUSH_ERR; mad_send_wr->refcount -= (mad_send_wr->timeout > 0); } } /* Empty wait list to prevent receives from finding a request */ list_splice_init(&mad_agent_priv->wait_list, &cancel_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); /* Report all cancelled requests */ mad_send_wc.status = IB_WC_WR_FLUSH_ERR; mad_send_wc.vendor_err = 0; list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, &cancel_list, agent_list) { mad_send_wc.send_buf = &mad_send_wr->send_buf; list_del(&mad_send_wr->agent_list); mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); deref_mad_agent(mad_agent_priv); } } static struct ib_mad_send_wr_private* find_send_wr(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_buf *send_buf) { struct ib_mad_send_wr_private *mad_send_wr; list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, agent_list) { if (&mad_send_wr->send_buf == send_buf) return mad_send_wr; } list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, agent_list) { if (is_rmpp_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && &mad_send_wr->send_buf == send_buf) return mad_send_wr; } return NULL; } int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; unsigned long flags; int active; if (!send_buf) return -EINVAL; mad_agent_priv = container_of(send_buf->mad_agent, struct ib_mad_agent_private, agent); spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = find_send_wr(mad_agent_priv, send_buf); if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); return -EINVAL; } active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); if (!timeout_ms) { mad_send_wr->status = IB_WC_WR_FLUSH_ERR; mad_send_wr->refcount -= (mad_send_wr->timeout > 0); } mad_send_wr->send_buf.timeout_ms = timeout_ms; if (active) mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); else ib_reset_mad_timeout(mad_send_wr, timeout_ms); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); return 0; } EXPORT_SYMBOL(ib_modify_mad); static void local_completions(struct work_struct *work) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_local_private *local; struct ib_mad_agent_private *recv_mad_agent; unsigned long flags; int free_mad; struct ib_wc wc; struct ib_mad_send_wc mad_send_wc; bool opa; mad_agent_priv = container_of(work, struct ib_mad_agent_private, local_work); opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, mad_agent_priv->qp_info->port_priv->port_num); spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->local_list)) { local = list_entry(mad_agent_priv->local_list.next, struct ib_mad_local_private, completion_list); list_del(&local->completion_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); free_mad = 0; if (local->mad_priv) { u8 base_version; recv_mad_agent = local->recv_mad_agent; if (!recv_mad_agent) { dev_err(&mad_agent_priv->agent.device->dev, "No receive MAD agent for local completion\n"); free_mad = 1; goto local_send_completion; } /* * Defined behavior is to complete response * before request */ build_smp_wc(recv_mad_agent->agent.qp, local->mad_send_wr->send_wr.wr.wr_cqe, be16_to_cpu(IB_LID_PERMISSIVE), local->mad_send_wr->send_wr.pkey_index, recv_mad_agent->agent.port_num, &wc); local->mad_priv->header.recv_wc.wc = &wc; base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; if (opa && base_version == OPA_MGMT_BASE_VERSION) { local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); } else { local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); } INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); list_add(&local->mad_priv->header.recv_wc.recv_buf.list, &local->mad_priv->header.recv_wc.rmpp_list); local->mad_priv->header.recv_wc.recv_buf.grh = NULL; local->mad_priv->header.recv_wc.recv_buf.mad = (struct ib_mad *)local->mad_priv->mad; recv_mad_agent->agent.recv_handler( &recv_mad_agent->agent, &local->mad_send_wr->send_buf, &local->mad_priv->header.recv_wc); spin_lock_irqsave(&recv_mad_agent->lock, flags); deref_mad_agent(recv_mad_agent); spin_unlock_irqrestore(&recv_mad_agent->lock, flags); } local_send_completion: /* Complete send */ mad_send_wc.status = IB_WC_SUCCESS; mad_send_wc.vendor_err = 0; mad_send_wc.send_buf = &local->mad_send_wr->send_buf; mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); spin_lock_irqsave(&mad_agent_priv->lock, flags); deref_mad_agent(mad_agent_priv); if (free_mad) kfree(local->mad_priv); kfree(local); } spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) { int ret; if (!mad_send_wr->retries_left) return -ETIMEDOUT; mad_send_wr->retries_left--; mad_send_wr->send_buf.retries++; mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { ret = ib_retry_rmpp(mad_send_wr); switch (ret) { case IB_RMPP_RESULT_UNHANDLED: ret = ib_send_mad(mad_send_wr); break; case IB_RMPP_RESULT_CONSUMED: ret = 0; break; default: ret = -ECOMM; break; } } else ret = ib_send_mad(mad_send_wr); if (!ret) { mad_send_wr->refcount++; list_add_tail(&mad_send_wr->agent_list, &mad_send_wr->mad_agent_priv->send_list); } return ret; } static void timeout_sends(struct work_struct *work) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags, delay; mad_agent_priv = container_of(work, struct ib_mad_agent_private, timed_work.work); mad_send_wc.vendor_err = 0; spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->wait_list)) { mad_send_wr = list_entry(mad_agent_priv->wait_list.next, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_send_wr->timeout, jiffies)) { delay = mad_send_wr->timeout - jiffies; if ((long)delay <= 0) delay = 1; queue_delayed_work(mad_agent_priv->qp_info-> port_priv->wq, &mad_agent_priv->timed_work, delay); break; } list_del(&mad_send_wr->agent_list); if (mad_send_wr->status == IB_WC_SUCCESS && !retry_send(mad_send_wr)) continue; spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (mad_send_wr->status == IB_WC_SUCCESS) mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; else mad_send_wc.status = mad_send_wr->status; mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); deref_mad_agent(mad_agent_priv); spin_lock_irqsave(&mad_agent_priv->lock, flags); } spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } /* * Allocate receive MADs and post receive WRs for them */ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad) { unsigned long flags; int post, ret; struct ib_mad_private *mad_priv; struct ib_sge sg_list; struct ib_recv_wr recv_wr; struct ib_mad_queue *recv_queue = &qp_info->recv_queue; /* Initialize common scatter list fields */ sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; /* Initialize common receive WR fields */ recv_wr.next = NULL; recv_wr.sg_list = &sg_list; recv_wr.num_sge = 1; do { /* Allocate and map receive buffer */ if (mad) { mad_priv = mad; mad = NULL; } else { mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), GFP_ATOMIC); if (!mad_priv) { ret = -ENOMEM; break; } } sg_list.length = mad_priv_dma_size(mad_priv); sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, &mad_priv->grh, mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { kfree(mad_priv); ret = -ENOMEM; break; } mad_priv->header.mapping = sg_list.addr; mad_priv->header.mad_list.mad_queue = recv_queue; mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; /* Post receive WR */ spin_lock_irqsave(&recv_queue->lock, flags); post = (++recv_queue->count < recv_queue->max_active); list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); spin_unlock_irqrestore(&recv_queue->lock, flags); ret = ib_post_recv(qp_info->qp, &recv_wr, NULL); if (ret) { spin_lock_irqsave(&recv_queue->lock, flags); list_del(&mad_priv->header.mad_list.list); recv_queue->count--; spin_unlock_irqrestore(&recv_queue->lock, flags); ib_dma_unmap_single(qp_info->port_priv->device, mad_priv->header.mapping, mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE); kfree(mad_priv); dev_err(&qp_info->port_priv->device->dev, "ib_post_recv failed: %d\n", ret); break; } } while (post); return ret; } /* * Return all the posted receive MADs */ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) { struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *recv; struct ib_mad_list_head *mad_list; if (!qp_info->qp) return; while (!list_empty(&qp_info->recv_queue.list)) { mad_list = list_entry(qp_info->recv_queue.list.next, struct ib_mad_list_head, list); mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); recv = container_of(mad_priv_hdr, struct ib_mad_private, header); /* Remove from posted receive MAD list */ list_del(&mad_list->list); ib_dma_unmap_single(qp_info->port_priv->device, recv->header.mapping, mad_priv_dma_size(recv), DMA_FROM_DEVICE); kfree(recv); } qp_info->recv_queue.count = 0; } /* * Start the port */ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) { int ret, i; struct ib_qp_attr *attr; struct ib_qp *qp; u16 pkey_index; attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) return -ENOMEM; ret = ib_find_pkey(port_priv->device, port_priv->port_num, IB_DEFAULT_PKEY_FULL, &pkey_index); if (ret) pkey_index = 0; for (i = 0; i < IB_MAD_QPS_CORE; i++) { qp = port_priv->qp_info[i].qp; if (!qp) continue; /* * PKey index for QP1 is irrelevant but * one is needed for the Reset to Init transition */ attr->qp_state = IB_QPS_INIT; attr->pkey_index = pkey_index; attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY); if (ret) { dev_err(&port_priv->device->dev, "Couldn't change QP%d state to INIT: %d\n", i, ret); goto out; } attr->qp_state = IB_QPS_RTR; ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { dev_err(&port_priv->device->dev, "Couldn't change QP%d state to RTR: %d\n", i, ret); goto out; } attr->qp_state = IB_QPS_RTS; attr->sq_psn = IB_MAD_SEND_Q_PSN; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); if (ret) { dev_err(&port_priv->device->dev, "Couldn't change QP%d state to RTS: %d\n", i, ret); goto out; } } ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); if (ret) { dev_err(&port_priv->device->dev, "Failed to request completion notification: %d\n", ret); goto out; } for (i = 0; i < IB_MAD_QPS_CORE; i++) { if (!port_priv->qp_info[i].qp) continue; ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); if (ret) { dev_err(&port_priv->device->dev, "Couldn't post receive WRs\n"); goto out; } } out: kfree(attr); return ret; } static void qp_event_handler(struct ib_event *event, void *qp_context) { struct ib_mad_qp_info *qp_info = qp_context; /* It's worse than that! He's dead, Jim! */ dev_err(&qp_info->port_priv->device->dev, "Fatal error (%d) on MAD QP (%u)\n", event->event, qp_info->qp->qp_num); } static void init_mad_queue(struct ib_mad_qp_info *qp_info, struct ib_mad_queue *mad_queue) { mad_queue->qp_info = qp_info; mad_queue->count = 0; spin_lock_init(&mad_queue->lock); INIT_LIST_HEAD(&mad_queue->list); } static void init_mad_qp(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info) { qp_info->port_priv = port_priv; init_mad_queue(qp_info, &qp_info->send_queue); init_mad_queue(qp_info, &qp_info->recv_queue); INIT_LIST_HEAD(&qp_info->overflow_list); } static int create_mad_qp(struct ib_mad_qp_info *qp_info, enum ib_qp_type qp_type) { struct ib_qp_init_attr qp_init_attr; int ret; memset(&qp_init_attr, 0, sizeof qp_init_attr); qp_init_attr.send_cq = qp_info->port_priv->cq; qp_init_attr.recv_cq = qp_info->port_priv->cq; qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; qp_init_attr.cap.max_send_wr = mad_sendq_size; qp_init_attr.cap.max_recv_wr = mad_recvq_size; qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; qp_init_attr.qp_type = qp_type; qp_init_attr.port_num = qp_info->port_priv->port_num; qp_init_attr.qp_context = qp_info; qp_init_attr.event_handler = qp_event_handler; qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); if (IS_ERR(qp_info->qp)) { dev_err(&qp_info->port_priv->device->dev, "Couldn't create ib_mad QP%d\n", get_spl_qp_index(qp_type)); ret = PTR_ERR(qp_info->qp); goto error; } /* Use minimum queue sizes unless the CQ is resized */ qp_info->send_queue.max_active = mad_sendq_size; qp_info->recv_queue.max_active = mad_recvq_size; return 0; error: return ret; } static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) { if (!qp_info->qp) return; ib_destroy_qp(qp_info->qp); } /* * Open the port * Create the QP, PD, MR, and CQ if needed */ static int ib_mad_port_open(struct ib_device *device, u32 port_num) { int ret, cq_size; struct ib_mad_port_private *port_priv; unsigned long flags; char name[sizeof "ib_mad123"]; int has_smi; if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) return -EFAULT; if (WARN_ON(rdma_cap_opa_mad(device, port_num) && rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) return -EFAULT; /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) return -ENOMEM; port_priv->device = device; port_priv->port_num = port_num; spin_lock_init(&port_priv->reg_lock); init_mad_qp(port_priv, &port_priv->qp_info[0]); init_mad_qp(port_priv, &port_priv->qp_info[1]); cq_size = mad_sendq_size + mad_recvq_size; has_smi = rdma_cap_ib_smi(device, port_num); if (has_smi) cq_size *= 2; port_priv->pd = ib_alloc_pd(device, 0); if (IS_ERR(port_priv->pd)) { dev_err(&device->dev, "Couldn't create ib_mad PD\n"); ret = PTR_ERR(port_priv->pd); goto error3; } port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, IB_POLL_UNBOUND_WORKQUEUE); if (IS_ERR(port_priv->cq)) { dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); goto error4; } if (has_smi) { ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); if (ret) goto error6; } ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); if (ret) goto error7; snprintf(name, sizeof(name), "ib_mad%u", port_num); port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); if (!port_priv->wq) { ret = -ENOMEM; goto error8; } spin_lock_irqsave(&ib_mad_port_list_lock, flags); list_add_tail(&port_priv->port_list, &ib_mad_port_list); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); ret = ib_mad_port_start(port_priv); if (ret) { dev_err(&device->dev, "Couldn't start port\n"); goto error9; } return 0; error9: spin_lock_irqsave(&ib_mad_port_list_lock, flags); list_del_init(&port_priv->port_list); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); destroy_workqueue(port_priv->wq); error8: destroy_mad_qp(&port_priv->qp_info[1]); error7: destroy_mad_qp(&port_priv->qp_info[0]); error6: ib_free_cq(port_priv->cq); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); error4: ib_dealloc_pd(port_priv->pd); error3: kfree(port_priv); return ret; } /* * Close the port * If there are no classes using the port, free the port * resources (CQ, MR, PD, QP) and remove the port's info structure */ static int ib_mad_port_close(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *port_priv; unsigned long flags; spin_lock_irqsave(&ib_mad_port_list_lock, flags); port_priv = __ib_get_mad_port(device, port_num); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); dev_err(&device->dev, "Port %u not found\n", port_num); return -ENODEV; } list_del_init(&port_priv->port_list); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); destroy_workqueue(port_priv->wq); destroy_mad_qp(&port_priv->qp_info[1]); destroy_mad_qp(&port_priv->qp_info[0]); ib_free_cq(port_priv->cq); ib_dealloc_pd(port_priv->pd); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); /* XXX: Handle deallocation of MAD registration tables */ kfree(port_priv); return 0; } static int ib_mad_init_device(struct ib_device *device) { int start, i; unsigned int count = 0; int ret; start = rdma_start_port(device); for (i = start; i <= rdma_end_port(device); i++) { if (!rdma_cap_ib_mad(device, i)) continue; ret = ib_mad_port_open(device, i); if (ret) { dev_err(&device->dev, "Couldn't open port %d\n", i); goto error; } ret = ib_agent_port_open(device, i); if (ret) { dev_err(&device->dev, "Couldn't open port %d for agents\n", i); goto error_agent; } count++; } if (!count) return -EOPNOTSUPP; return 0; error_agent: if (ib_mad_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %d\n", i); error: while (--i >= start) { if (!rdma_cap_ib_mad(device, i)) continue; if (ib_agent_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %d for agents\n", i); if (ib_mad_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %d\n", i); } return ret; } static void ib_mad_remove_device(struct ib_device *device, void *client_data) { unsigned int i; rdma_for_each_port (device, i) { if (!rdma_cap_ib_mad(device, i)) continue; if (ib_agent_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %u for agents\n", i); if (ib_mad_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %u\n", i); } } static struct ib_client mad_client = { .name = "mad", .add = ib_mad_init_device, .remove = ib_mad_remove_device }; int ib_mad_init(void) { mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); INIT_LIST_HEAD(&ib_mad_port_list); if (ib_register_client(&mad_client)) { pr_err("Couldn't register ib_mad client\n"); return -EINVAL; } return 0; } void ib_mad_cleanup(void) { ib_unregister_client(&mad_client); }
linux-master
drivers/infiniband/core/mad.c
/* * Copyright (c) 2014 Chelsio, Inc. All rights reserved. * Copyright (c) 2014 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "iwpm_util.h" #define IWPM_MAPINFO_HASH_SIZE 512 #define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1) #define IWPM_REMINFO_HASH_SIZE 64 #define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1) #define IWPM_MSG_SIZE 512 static LIST_HEAD(iwpm_nlmsg_req_list); static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); static struct hlist_head *iwpm_hash_bucket; static DEFINE_SPINLOCK(iwpm_mapinfo_lock); static struct hlist_head *iwpm_reminfo_bucket; static DEFINE_SPINLOCK(iwpm_reminfo_lock); static struct iwpm_admin_data iwpm_admin; /** * iwpm_init - Allocate resources for the iwarp port mapper * @nl_client: The index of the netlink client * * Should be called when network interface goes up. */ int iwpm_init(u8 nl_client) { iwpm_hash_bucket = kcalloc(IWPM_MAPINFO_HASH_SIZE, sizeof(struct hlist_head), GFP_KERNEL); if (!iwpm_hash_bucket) return -ENOMEM; iwpm_reminfo_bucket = kcalloc(IWPM_REMINFO_HASH_SIZE, sizeof(struct hlist_head), GFP_KERNEL); if (!iwpm_reminfo_bucket) { kfree(iwpm_hash_bucket); return -ENOMEM; } iwpm_set_registration(nl_client, IWPM_REG_UNDEF); pr_debug("%s: Mapinfo and reminfo tables are created\n", __func__); return 0; } static void free_hash_bucket(void); static void free_reminfo_bucket(void); /** * iwpm_exit - Deallocate resources for the iwarp port mapper * @nl_client: The index of the netlink client * * Should be called when network interface goes down. */ int iwpm_exit(u8 nl_client) { free_hash_bucket(); free_reminfo_bucket(); pr_debug("%s: Resources are destroyed\n", __func__); iwpm_set_registration(nl_client, IWPM_REG_UNDEF); return 0; } static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *, struct sockaddr_storage *); /** * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address * info in a hash table * @local_sockaddr: Local ip/tcp address * @mapped_sockaddr: Mapped local ip/tcp address * @nl_client: The index of the netlink client * @map_flags: IWPM mapping flags */ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, struct sockaddr_storage *mapped_sockaddr, u8 nl_client, u32 map_flags) { struct hlist_head *hash_bucket_head = NULL; struct iwpm_mapping_info *map_info; unsigned long flags; int ret = -EINVAL; map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); if (!map_info) return -ENOMEM; memcpy(&map_info->local_sockaddr, local_sockaddr, sizeof(struct sockaddr_storage)); memcpy(&map_info->mapped_sockaddr, mapped_sockaddr, sizeof(struct sockaddr_storage)); map_info->nl_client = nl_client; map_info->map_flags = map_flags; spin_lock_irqsave(&iwpm_mapinfo_lock, flags); if (iwpm_hash_bucket) { hash_bucket_head = get_mapinfo_hash_bucket( &map_info->local_sockaddr, &map_info->mapped_sockaddr); if (hash_bucket_head) { hlist_add_head(&map_info->hlist_node, hash_bucket_head); ret = 0; } } spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); if (!hash_bucket_head) kfree(map_info); return ret; } /** * iwpm_remove_mapinfo - Remove local and mapped IPv4/IPv6 address * info from the hash table * @local_sockaddr: Local ip/tcp address * @mapped_local_addr: Mapped local ip/tcp address * * Returns err code if mapping info is not found in the hash table, * otherwise returns 0 */ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr, struct sockaddr_storage *mapped_local_addr) { struct hlist_node *tmp_hlist_node; struct hlist_head *hash_bucket_head; struct iwpm_mapping_info *map_info = NULL; unsigned long flags; int ret = -EINVAL; spin_lock_irqsave(&iwpm_mapinfo_lock, flags); if (iwpm_hash_bucket) { hash_bucket_head = get_mapinfo_hash_bucket( local_sockaddr, mapped_local_addr); if (!hash_bucket_head) goto remove_mapinfo_exit; hlist_for_each_entry_safe(map_info, tmp_hlist_node, hash_bucket_head, hlist_node) { if (!iwpm_compare_sockaddr(&map_info->mapped_sockaddr, mapped_local_addr)) { hlist_del_init(&map_info->hlist_node); kfree(map_info); ret = 0; break; } } } remove_mapinfo_exit: spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); return ret; } static void free_hash_bucket(void) { struct hlist_node *tmp_hlist_node; struct iwpm_mapping_info *map_info; unsigned long flags; int i; /* remove all the mapinfo data from the list */ spin_lock_irqsave(&iwpm_mapinfo_lock, flags); for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { hlist_for_each_entry_safe(map_info, tmp_hlist_node, &iwpm_hash_bucket[i], hlist_node) { hlist_del_init(&map_info->hlist_node); kfree(map_info); } } /* free the hash list */ kfree(iwpm_hash_bucket); iwpm_hash_bucket = NULL; spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); } static void free_reminfo_bucket(void) { struct hlist_node *tmp_hlist_node; struct iwpm_remote_info *rem_info; unsigned long flags; int i; /* remove all the remote info from the list */ spin_lock_irqsave(&iwpm_reminfo_lock, flags); for (i = 0; i < IWPM_REMINFO_HASH_SIZE; i++) { hlist_for_each_entry_safe(rem_info, tmp_hlist_node, &iwpm_reminfo_bucket[i], hlist_node) { hlist_del_init(&rem_info->hlist_node); kfree(rem_info); } } /* free the hash list */ kfree(iwpm_reminfo_bucket); iwpm_reminfo_bucket = NULL; spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); } static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *, struct sockaddr_storage *); void iwpm_add_remote_info(struct iwpm_remote_info *rem_info) { struct hlist_head *hash_bucket_head; unsigned long flags; spin_lock_irqsave(&iwpm_reminfo_lock, flags); if (iwpm_reminfo_bucket) { hash_bucket_head = get_reminfo_hash_bucket( &rem_info->mapped_loc_sockaddr, &rem_info->mapped_rem_sockaddr); if (hash_bucket_head) hlist_add_head(&rem_info->hlist_node, hash_bucket_head); } spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); } /** * iwpm_get_remote_info - Get the remote connecting peer address info * * @mapped_loc_addr: Mapped local address of the listening peer * @mapped_rem_addr: Mapped remote address of the connecting peer * @remote_addr: To store the remote address of the connecting peer * @nl_client: The index of the netlink client * * The remote address info is retrieved and provided to the client in * the remote_addr. After that it is removed from the hash table */ int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr, struct sockaddr_storage *mapped_rem_addr, struct sockaddr_storage *remote_addr, u8 nl_client) { struct hlist_node *tmp_hlist_node; struct hlist_head *hash_bucket_head; struct iwpm_remote_info *rem_info = NULL; unsigned long flags; int ret = -EINVAL; spin_lock_irqsave(&iwpm_reminfo_lock, flags); if (iwpm_reminfo_bucket) { hash_bucket_head = get_reminfo_hash_bucket( mapped_loc_addr, mapped_rem_addr); if (!hash_bucket_head) goto get_remote_info_exit; hlist_for_each_entry_safe(rem_info, tmp_hlist_node, hash_bucket_head, hlist_node) { if (!iwpm_compare_sockaddr(&rem_info->mapped_loc_sockaddr, mapped_loc_addr) && !iwpm_compare_sockaddr(&rem_info->mapped_rem_sockaddr, mapped_rem_addr)) { memcpy(remote_addr, &rem_info->remote_sockaddr, sizeof(struct sockaddr_storage)); iwpm_print_sockaddr(remote_addr, "get_remote_info: Remote sockaddr:"); hlist_del_init(&rem_info->hlist_node); kfree(rem_info); ret = 0; break; } } } get_remote_info_exit: spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); return ret; } struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, u8 nl_client, gfp_t gfp) { struct iwpm_nlmsg_request *nlmsg_request; unsigned long flags; nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp); if (!nlmsg_request) return NULL; spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags); list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list); spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags); kref_init(&nlmsg_request->kref); kref_get(&nlmsg_request->kref); nlmsg_request->nlmsg_seq = nlmsg_seq; nlmsg_request->nl_client = nl_client; nlmsg_request->request_done = 0; nlmsg_request->err_code = 0; sema_init(&nlmsg_request->sem, 1); down(&nlmsg_request->sem); return nlmsg_request; } void iwpm_free_nlmsg_request(struct kref *kref) { struct iwpm_nlmsg_request *nlmsg_request; unsigned long flags; nlmsg_request = container_of(kref, struct iwpm_nlmsg_request, kref); spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags); list_del_init(&nlmsg_request->inprocess_list); spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags); if (!nlmsg_request->request_done) pr_debug("%s Freeing incomplete nlmsg request (seq = %u).\n", __func__, nlmsg_request->nlmsg_seq); kfree(nlmsg_request); } struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq) { struct iwpm_nlmsg_request *nlmsg_request; struct iwpm_nlmsg_request *found_request = NULL; unsigned long flags; spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags); list_for_each_entry(nlmsg_request, &iwpm_nlmsg_req_list, inprocess_list) { if (nlmsg_request->nlmsg_seq == echo_seq) { found_request = nlmsg_request; kref_get(&nlmsg_request->kref); break; } } spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags); return found_request; } int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request) { int ret; ret = down_timeout(&nlmsg_request->sem, IWPM_NL_TIMEOUT); if (ret) { ret = -EINVAL; pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n", __func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq); } else { ret = nlmsg_request->err_code; } kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); return ret; } int iwpm_get_nlmsg_seq(void) { return atomic_inc_return(&iwpm_admin.nlmsg_seq); } /* valid client */ u32 iwpm_get_registration(u8 nl_client) { return iwpm_admin.reg_list[nl_client]; } /* valid client */ void iwpm_set_registration(u8 nl_client, u32 reg) { iwpm_admin.reg_list[nl_client] = reg; } /* valid client */ u32 iwpm_check_registration(u8 nl_client, u32 reg) { return (iwpm_get_registration(nl_client) & reg); } int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr, struct sockaddr_storage *b_sockaddr) { if (a_sockaddr->ss_family != b_sockaddr->ss_family) return 1; if (a_sockaddr->ss_family == AF_INET) { struct sockaddr_in *a4_sockaddr = (struct sockaddr_in *)a_sockaddr; struct sockaddr_in *b4_sockaddr = (struct sockaddr_in *)b_sockaddr; if (!memcmp(&a4_sockaddr->sin_addr, &b4_sockaddr->sin_addr, sizeof(struct in_addr)) && a4_sockaddr->sin_port == b4_sockaddr->sin_port) return 0; } else if (a_sockaddr->ss_family == AF_INET6) { struct sockaddr_in6 *a6_sockaddr = (struct sockaddr_in6 *)a_sockaddr; struct sockaddr_in6 *b6_sockaddr = (struct sockaddr_in6 *)b_sockaddr; if (!memcmp(&a6_sockaddr->sin6_addr, &b6_sockaddr->sin6_addr, sizeof(struct in6_addr)) && a6_sockaddr->sin6_port == b6_sockaddr->sin6_port) return 0; } else { pr_err("%s: Invalid sockaddr family\n", __func__); } return 1; } struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh, int nl_client) { struct sk_buff *skb = NULL; skb = dev_alloc_skb(IWPM_MSG_SIZE); if (!skb) goto create_nlmsg_exit; if (!(ibnl_put_msg(skb, nlh, 0, 0, nl_client, nl_op, NLM_F_REQUEST))) { pr_warn("%s: Unable to put the nlmsg header\n", __func__); dev_kfree_skb(skb); skb = NULL; } create_nlmsg_exit: return skb; } int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max, const struct nla_policy *nlmsg_policy, struct nlattr *nltb[], const char *msg_type) { int nlh_len = 0; int ret; const char *err_str = ""; ret = nlmsg_validate_deprecated(cb->nlh, nlh_len, policy_max - 1, nlmsg_policy, NULL); if (ret) { err_str = "Invalid attribute"; goto parse_nlmsg_error; } ret = nlmsg_parse_deprecated(cb->nlh, nlh_len, nltb, policy_max - 1, nlmsg_policy, NULL); if (ret) { err_str = "Unable to parse the nlmsg"; goto parse_nlmsg_error; } ret = iwpm_validate_nlmsg_attr(nltb, policy_max); if (ret) { err_str = "Invalid NULL attribute"; goto parse_nlmsg_error; } return 0; parse_nlmsg_error: pr_warn("%s: %s (msg type %s ret = %d)\n", __func__, err_str, msg_type, ret); return ret; } void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg) { struct sockaddr_in6 *sockaddr_v6; struct sockaddr_in *sockaddr_v4; switch (sockaddr->ss_family) { case AF_INET: sockaddr_v4 = (struct sockaddr_in *)sockaddr; pr_debug("%s IPV4 %pI4: %u(0x%04X)\n", msg, &sockaddr_v4->sin_addr, ntohs(sockaddr_v4->sin_port), ntohs(sockaddr_v4->sin_port)); break; case AF_INET6: sockaddr_v6 = (struct sockaddr_in6 *)sockaddr; pr_debug("%s IPV6 %pI6: %u(0x%04X)\n", msg, &sockaddr_v6->sin6_addr, ntohs(sockaddr_v6->sin6_port), ntohs(sockaddr_v6->sin6_port)); break; default: break; } } static u32 iwpm_ipv6_jhash(struct sockaddr_in6 *ipv6_sockaddr) { u32 ipv6_hash = jhash(&ipv6_sockaddr->sin6_addr, sizeof(struct in6_addr), 0); u32 hash = jhash_2words(ipv6_hash, (__force u32) ipv6_sockaddr->sin6_port, 0); return hash; } static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr) { u32 ipv4_hash = jhash(&ipv4_sockaddr->sin_addr, sizeof(struct in_addr), 0); u32 hash = jhash_2words(ipv4_hash, (__force u32) ipv4_sockaddr->sin_port, 0); return hash; } static int get_hash_bucket(struct sockaddr_storage *a_sockaddr, struct sockaddr_storage *b_sockaddr, u32 *hash) { u32 a_hash, b_hash; if (a_sockaddr->ss_family == AF_INET) { a_hash = iwpm_ipv4_jhash((struct sockaddr_in *) a_sockaddr); b_hash = iwpm_ipv4_jhash((struct sockaddr_in *) b_sockaddr); } else if (a_sockaddr->ss_family == AF_INET6) { a_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) a_sockaddr); b_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) b_sockaddr); } else { pr_err("%s: Invalid sockaddr family\n", __func__); return -EINVAL; } if (a_hash == b_hash) /* if port mapper isn't available */ *hash = a_hash; else *hash = jhash_2words(a_hash, b_hash, 0); return 0; } static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *local_sockaddr, struct sockaddr_storage *mapped_sockaddr) { u32 hash; int ret; ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash); if (ret) return NULL; return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK]; } static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *mapped_loc_sockaddr, struct sockaddr_storage *mapped_rem_sockaddr) { u32 hash; int ret; ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash); if (ret) return NULL; return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK]; } static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; u32 msg_seq; const char *err_str = ""; int ret = -EINVAL; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_MAPINFO_NUM, &nlh, nl_client); if (!skb) { err_str = "Unable to create a nlmsg"; goto mapinfo_num_error; } nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); msg_seq = 0; err_str = "Unable to put attribute of mapinfo number nlmsg"; ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_MAPINFO_SEQ); if (ret) goto mapinfo_num_error; ret = ibnl_put_attr(skb, nlh, sizeof(u32), &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); if (ret) goto mapinfo_num_error; nlmsg_end(skb, nlh); ret = rdma_nl_unicast(&init_net, skb, iwpm_pid); if (ret) { skb = NULL; err_str = "Unable to send a nlmsg"; goto mapinfo_num_error; } pr_debug("%s: Sent mapping number = %u\n", __func__, mapping_num); return 0; mapinfo_num_error: pr_info("%s: %s\n", __func__, err_str); dev_kfree_skb(skb); return ret; } static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid) { struct nlmsghdr *nlh = NULL; int ret = 0; if (!skb) return ret; if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client, RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) { pr_warn("%s Unable to put NLMSG_DONE\n", __func__); dev_kfree_skb(skb); return -ENOMEM; } nlh->nlmsg_type = NLMSG_DONE; ret = rdma_nl_unicast(&init_net, skb, iwpm_pid); if (ret) pr_warn("%s Unable to send a nlmsg\n", __func__); return ret; } int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) { struct iwpm_mapping_info *map_info; struct sk_buff *skb = NULL; struct nlmsghdr *nlh; int skb_num = 0, mapping_num = 0; int i = 0, nlmsg_bytes = 0; unsigned long flags; const char *err_str = ""; int ret; skb = dev_alloc_skb(NLMSG_GOODSIZE); if (!skb) { ret = -ENOMEM; err_str = "Unable to allocate skb"; goto send_mapping_info_exit; } skb_num++; spin_lock_irqsave(&iwpm_mapinfo_lock, flags); ret = -EINVAL; for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], hlist_node) { if (map_info->nl_client != nl_client) continue; nlh = NULL; if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client, RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) { ret = -ENOMEM; err_str = "Unable to put the nlmsg header"; goto send_mapping_info_unlock; } err_str = "Unable to put attribute of the nlmsg"; ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage), &map_info->local_sockaddr, IWPM_NLA_MAPINFO_LOCAL_ADDR); if (ret) goto send_mapping_info_unlock; ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage), &map_info->mapped_sockaddr, IWPM_NLA_MAPINFO_MAPPED_ADDR); if (ret) goto send_mapping_info_unlock; if (iwpm_ulib_version > IWPM_UABI_VERSION_MIN) { ret = ibnl_put_attr(skb, nlh, sizeof(u32), &map_info->map_flags, IWPM_NLA_MAPINFO_FLAGS); if (ret) goto send_mapping_info_unlock; } nlmsg_end(skb, nlh); iwpm_print_sockaddr(&map_info->local_sockaddr, "send_mapping_info: Local sockaddr:"); iwpm_print_sockaddr(&map_info->mapped_sockaddr, "send_mapping_info: Mapped local sockaddr:"); mapping_num++; nlmsg_bytes += nlh->nlmsg_len; /* check if all mappings can fit in one skb */ if (NLMSG_GOODSIZE - nlmsg_bytes < nlh->nlmsg_len * 2) { /* and leave room for NLMSG_DONE */ nlmsg_bytes = 0; skb_num++; spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); /* send the skb */ ret = send_nlmsg_done(skb, nl_client, iwpm_pid); skb = NULL; if (ret) { err_str = "Unable to send map info"; goto send_mapping_info_exit; } if (skb_num == IWPM_MAPINFO_SKB_COUNT) { ret = -ENOMEM; err_str = "Insufficient skbs for map info"; goto send_mapping_info_exit; } skb = dev_alloc_skb(NLMSG_GOODSIZE); if (!skb) { ret = -ENOMEM; err_str = "Unable to allocate skb"; goto send_mapping_info_exit; } spin_lock_irqsave(&iwpm_mapinfo_lock, flags); } } } send_mapping_info_unlock: spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); send_mapping_info_exit: if (ret) { pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret); dev_kfree_skb(skb); return ret; } send_nlmsg_done(skb, nl_client, iwpm_pid); return send_mapinfo_num(mapping_num, nl_client, iwpm_pid); } int iwpm_mapinfo_available(void) { unsigned long flags; int full_bucket = 0, i = 0; spin_lock_irqsave(&iwpm_mapinfo_lock, flags); if (iwpm_hash_bucket) { for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { if (!hlist_empty(&iwpm_hash_bucket[i])) { full_bucket = 1; break; } } } spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); return full_bucket; } int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; const char *err_str; int ret = -EINVAL; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_HELLO, &nlh, nl_client); if (!skb) { err_str = "Unable to create a nlmsg"; goto hello_num_error; } nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); err_str = "Unable to put attribute of abi_version into nlmsg"; ret = ibnl_put_attr(skb, nlh, sizeof(u16), &abi_version, IWPM_NLA_HELLO_ABI_VERSION); if (ret) goto hello_num_error; nlmsg_end(skb, nlh); ret = rdma_nl_unicast(&init_net, skb, iwpm_pid); if (ret) { skb = NULL; err_str = "Unable to send a nlmsg"; goto hello_num_error; } pr_debug("%s: Sent hello abi_version = %u\n", __func__, abi_version); return 0; hello_num_error: pr_info("%s: %s\n", __func__, err_str); dev_kfree_skb(skb); return ret; }
linux-master
drivers/infiniband/core/iwpm_util.c
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "core_priv.h" #include <linux/slab.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <rdma/ib_mad.h> #include <rdma/ib_pma.h> #include <rdma/ib_cache.h> #include <rdma/rdma_counter.h> #include <rdma/ib_sysfs.h> struct port_table_attribute { struct ib_port_attribute attr; char name[8]; int index; __be16 attr_id; }; struct gid_attr_group { struct ib_port *port; struct kobject kobj; struct attribute_group groups[2]; const struct attribute_group *groups_list[3]; struct port_table_attribute attrs_list[]; }; struct ib_port { struct kobject kobj; struct ib_device *ibdev; struct gid_attr_group *gid_attr_group; struct hw_stats_port_data *hw_stats_data; struct attribute_group groups[3]; const struct attribute_group *groups_list[5]; u32 port_num; struct port_table_attribute attrs_list[]; }; struct hw_stats_device_attribute { struct device_attribute attr; ssize_t (*show)(struct ib_device *ibdev, struct rdma_hw_stats *stats, unsigned int index, unsigned int port_num, char *buf); ssize_t (*store)(struct ib_device *ibdev, struct rdma_hw_stats *stats, unsigned int index, unsigned int port_num, const char *buf, size_t count); }; struct hw_stats_port_attribute { struct ib_port_attribute attr; ssize_t (*show)(struct ib_device *ibdev, struct rdma_hw_stats *stats, unsigned int index, unsigned int port_num, char *buf); ssize_t (*store)(struct ib_device *ibdev, struct rdma_hw_stats *stats, unsigned int index, unsigned int port_num, const char *buf, size_t count); }; struct hw_stats_device_data { struct attribute_group group; struct rdma_hw_stats *stats; struct hw_stats_device_attribute attrs[]; }; struct hw_stats_port_data { struct rdma_hw_stats *stats; struct hw_stats_port_attribute attrs[]; }; static ssize_t port_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct ib_port_attribute *port_attr = container_of(attr, struct ib_port_attribute, attr); struct ib_port *p = container_of(kobj, struct ib_port, kobj); if (!port_attr->show) return -EIO; return port_attr->show(p->ibdev, p->port_num, port_attr, buf); } static ssize_t port_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct ib_port_attribute *port_attr = container_of(attr, struct ib_port_attribute, attr); struct ib_port *p = container_of(kobj, struct ib_port, kobj); if (!port_attr->store) return -EIO; return port_attr->store(p->ibdev, p->port_num, port_attr, buf, count); } struct ib_device *ib_port_sysfs_get_ibdev_kobj(struct kobject *kobj, u32 *port_num) { struct ib_port *port = container_of(kobj, struct ib_port, kobj); *port_num = port->port_num; return port->ibdev; } EXPORT_SYMBOL(ib_port_sysfs_get_ibdev_kobj); static const struct sysfs_ops port_sysfs_ops = { .show = port_attr_show, .store = port_attr_store }; static ssize_t hw_stat_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hw_stats_device_attribute *stat_attr = container_of(attr, struct hw_stats_device_attribute, attr); struct ib_device *ibdev = container_of(dev, struct ib_device, dev); return stat_attr->show(ibdev, ibdev->hw_stats_data->stats, stat_attr - ibdev->hw_stats_data->attrs, 0, buf); } static ssize_t hw_stat_device_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hw_stats_device_attribute *stat_attr = container_of(attr, struct hw_stats_device_attribute, attr); struct ib_device *ibdev = container_of(dev, struct ib_device, dev); return stat_attr->store(ibdev, ibdev->hw_stats_data->stats, stat_attr - ibdev->hw_stats_data->attrs, 0, buf, count); } static ssize_t hw_stat_port_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { struct hw_stats_port_attribute *stat_attr = container_of(attr, struct hw_stats_port_attribute, attr); struct ib_port *port = ibdev->port_data[port_num].sysfs; return stat_attr->show(ibdev, port->hw_stats_data->stats, stat_attr - port->hw_stats_data->attrs, port->port_num, buf); } static ssize_t hw_stat_port_store(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, const char *buf, size_t count) { struct hw_stats_port_attribute *stat_attr = container_of(attr, struct hw_stats_port_attribute, attr); struct ib_port *port = ibdev->port_data[port_num].sysfs; return stat_attr->store(ibdev, port->hw_stats_data->stats, stat_attr - port->hw_stats_data->attrs, port->port_num, buf, count); } static ssize_t gid_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct ib_port_attribute *port_attr = container_of(attr, struct ib_port_attribute, attr); struct ib_port *p = container_of(kobj, struct gid_attr_group, kobj)->port; if (!port_attr->show) return -EIO; return port_attr->show(p->ibdev, p->port_num, port_attr, buf); } static const struct sysfs_ops gid_attr_sysfs_ops = { .show = gid_attr_show }; static ssize_t state_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; static const char *state_name[] = { [IB_PORT_NOP] = "NOP", [IB_PORT_DOWN] = "DOWN", [IB_PORT_INIT] = "INIT", [IB_PORT_ARMED] = "ARMED", [IB_PORT_ACTIVE] = "ACTIVE", [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER" }; ret = ib_query_port(ibdev, port_num, &attr); if (ret) return ret; return sysfs_emit(buf, "%d: %s\n", attr.state, attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ? state_name[attr.state] : "UNKNOWN"); } static ssize_t lid_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(ibdev, port_num, &attr); if (ret) return ret; return sysfs_emit(buf, "0x%x\n", attr.lid); } static ssize_t lid_mask_count_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(ibdev, port_num, &attr); if (ret) return ret; return sysfs_emit(buf, "%u\n", attr.lmc); } static ssize_t sm_lid_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(ibdev, port_num, &attr); if (ret) return ret; return sysfs_emit(buf, "0x%x\n", attr.sm_lid); } static ssize_t sm_sl_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(ibdev, port_num, &attr); if (ret) return ret; return sysfs_emit(buf, "%u\n", attr.sm_sl); } static ssize_t cap_mask_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(ibdev, port_num, &attr); if (ret) return ret; return sysfs_emit(buf, "0x%08x\n", attr.port_cap_flags); } static ssize_t rate_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *unused, char *buf) { struct ib_port_attr attr; char *speed = ""; int rate; /* in deci-Gb/sec */ ssize_t ret; ret = ib_query_port(ibdev, port_num, &attr); if (ret) return ret; switch (attr.active_speed) { case IB_SPEED_DDR: speed = " DDR"; rate = 50; break; case IB_SPEED_QDR: speed = " QDR"; rate = 100; break; case IB_SPEED_FDR10: speed = " FDR10"; rate = 100; break; case IB_SPEED_FDR: speed = " FDR"; rate = 140; break; case IB_SPEED_EDR: speed = " EDR"; rate = 250; break; case IB_SPEED_HDR: speed = " HDR"; rate = 500; break; case IB_SPEED_NDR: speed = " NDR"; rate = 1000; break; case IB_SPEED_SDR: default: /* default to SDR for invalid rates */ speed = " SDR"; rate = 25; break; } rate *= ib_width_enum_to_int(attr.active_width); if (rate < 0) return -EINVAL; return sysfs_emit(buf, "%d%s Gb/sec (%dX%s)\n", rate / 10, rate % 10 ? ".5" : "", ib_width_enum_to_int(attr.active_width), speed); } static const char *phys_state_to_str(enum ib_port_phys_state phys_state) { static const char *phys_state_str[] = { "<unknown>", "Sleep", "Polling", "Disabled", "PortConfigurationTraining", "LinkUp", "LinkErrorRecovery", "Phy Test", }; if (phys_state < ARRAY_SIZE(phys_state_str)) return phys_state_str[phys_state]; return "<unknown>"; } static ssize_t phys_state_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *unused, char *buf) { struct ib_port_attr attr; ssize_t ret; ret = ib_query_port(ibdev, port_num, &attr); if (ret) return ret; return sysfs_emit(buf, "%u: %s\n", attr.phys_state, phys_state_to_str(attr.phys_state)); } static ssize_t link_layer_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *unused, char *buf) { const char *output; switch (rdma_port_get_link_layer(ibdev, port_num)) { case IB_LINK_LAYER_INFINIBAND: output = "InfiniBand"; break; case IB_LINK_LAYER_ETHERNET: output = "Ethernet"; break; default: output = "Unknown"; break; } return sysfs_emit(buf, "%s\n", output); } static IB_PORT_ATTR_RO(state); static IB_PORT_ATTR_RO(lid); static IB_PORT_ATTR_RO(lid_mask_count); static IB_PORT_ATTR_RO(sm_lid); static IB_PORT_ATTR_RO(sm_sl); static IB_PORT_ATTR_RO(cap_mask); static IB_PORT_ATTR_RO(rate); static IB_PORT_ATTR_RO(phys_state); static IB_PORT_ATTR_RO(link_layer); static struct attribute *port_default_attrs[] = { &ib_port_attr_state.attr, &ib_port_attr_lid.attr, &ib_port_attr_lid_mask_count.attr, &ib_port_attr_sm_lid.attr, &ib_port_attr_sm_sl.attr, &ib_port_attr_cap_mask.attr, &ib_port_attr_rate.attr, &ib_port_attr_phys_state.attr, &ib_port_attr_link_layer.attr, NULL }; ATTRIBUTE_GROUPS(port_default); static ssize_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf) { struct net_device *ndev; int ret = -EINVAL; rcu_read_lock(); ndev = rcu_dereference(gid_attr->ndev); if (ndev) ret = sysfs_emit(buf, "%s\n", ndev->name); rcu_read_unlock(); return ret; } static ssize_t print_gid_type(const struct ib_gid_attr *gid_attr, char *buf) { return sysfs_emit(buf, "%s\n", ib_cache_gid_type_str(gid_attr->gid_type)); } static ssize_t _show_port_gid_attr( struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf, ssize_t (*print)(const struct ib_gid_attr *gid_attr, char *buf)) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); const struct ib_gid_attr *gid_attr; ssize_t ret; gid_attr = rdma_get_gid_attr(ibdev, port_num, tab_attr->index); if (IS_ERR(gid_attr)) /* -EINVAL is returned for user space compatibility reasons. */ return -EINVAL; ret = print(gid_attr, buf); rdma_put_gid_attr(gid_attr); return ret; } static ssize_t show_port_gid(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); const struct ib_gid_attr *gid_attr; int len; gid_attr = rdma_get_gid_attr(ibdev, port_num, tab_attr->index); if (IS_ERR(gid_attr)) { const union ib_gid zgid = {}; /* If reading GID fails, it is likely due to GID entry being * empty (invalid) or reserved GID in the table. User space * expects to read GID table entries as long as it given index * is within GID table size. Administrative/debugging tool * fails to query rest of the GID entries if it hits error * while querying a GID of the given index. To avoid user * space throwing such error on fail to read gid, return zero * GID as before. This maintains backward compatibility. */ return sysfs_emit(buf, "%pI6\n", zgid.raw); } len = sysfs_emit(buf, "%pI6\n", gid_attr->gid.raw); rdma_put_gid_attr(gid_attr); return len; } static ssize_t show_port_gid_attr_ndev(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { return _show_port_gid_attr(ibdev, port_num, attr, buf, print_ndev); } static ssize_t show_port_gid_attr_gid_type(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { return _show_port_gid_attr(ibdev, port_num, attr, buf, print_gid_type); } static ssize_t show_port_pkey(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); u16 pkey; int ret; ret = ib_query_pkey(ibdev, port_num, tab_attr->index, &pkey); if (ret) return ret; return sysfs_emit(buf, "0x%04x\n", pkey); } #define PORT_PMA_ATTR(_name, _counter, _width, _offset) \ struct port_table_attribute port_pma_attr_##_name = { \ .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \ .index = (_offset) | ((_width) << 16) | ((_counter) << 24), \ .attr_id = IB_PMA_PORT_COUNTERS, \ } #define PORT_PMA_ATTR_EXT(_name, _width, _offset) \ struct port_table_attribute port_pma_attr_ext_##_name = { \ .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \ .index = (_offset) | ((_width) << 16), \ .attr_id = IB_PMA_PORT_COUNTERS_EXT, \ } /* * Get a Perfmgmt MAD block of data. * Returns error code or the number of bytes retrieved. */ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr, void *data, int offset, size_t size) { struct ib_mad *in_mad; struct ib_mad *out_mad; size_t mad_size = sizeof(*out_mad); u16 out_mad_pkey_index = 0; ssize_t ret; if (!dev->ops.process_mad) return -ENOSYS; in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); out_mad = kzalloc(sizeof(*out_mad), GFP_KERNEL); if (!in_mad || !out_mad) { ret = -ENOMEM; goto out; } in_mad->mad_hdr.base_version = 1; in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT; in_mad->mad_hdr.class_version = 1; in_mad->mad_hdr.method = IB_MGMT_METHOD_GET; in_mad->mad_hdr.attr_id = attr; if (attr != IB_PMA_CLASS_PORT_INFO) in_mad->data[41] = port_num; /* PortSelect field */ if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, port_num, NULL, NULL, in_mad, out_mad, &mad_size, &out_mad_pkey_index) & (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) != (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) { ret = -EINVAL; goto out; } memcpy(data, out_mad->data + offset, size); ret = size; out: kfree(in_mad); kfree(out_mad); return ret; } static ssize_t show_pma_counter(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); int offset = tab_attr->index & 0xffff; int width = (tab_attr->index >> 16) & 0xff; int ret; u8 data[8]; int len; ret = get_perf_mad(ibdev, port_num, tab_attr->attr_id, &data, 40 + offset / 8, sizeof(data)); if (ret < 0) return ret; switch (width) { case 4: len = sysfs_emit(buf, "%d\n", (*data >> (4 - (offset % 8))) & 0xf); break; case 8: len = sysfs_emit(buf, "%u\n", *data); break; case 16: len = sysfs_emit(buf, "%u\n", be16_to_cpup((__be16 *)data)); break; case 32: len = sysfs_emit(buf, "%u\n", be32_to_cpup((__be32 *)data)); break; case 64: len = sysfs_emit(buf, "%llu\n", be64_to_cpup((__be64 *)data)); break; default: len = 0; break; } return len; } static PORT_PMA_ATTR(symbol_error , 0, 16, 32); static PORT_PMA_ATTR(link_error_recovery , 1, 8, 48); static PORT_PMA_ATTR(link_downed , 2, 8, 56); static PORT_PMA_ATTR(port_rcv_errors , 3, 16, 64); static PORT_PMA_ATTR(port_rcv_remote_physical_errors, 4, 16, 80); static PORT_PMA_ATTR(port_rcv_switch_relay_errors , 5, 16, 96); static PORT_PMA_ATTR(port_xmit_discards , 6, 16, 112); static PORT_PMA_ATTR(port_xmit_constraint_errors , 7, 8, 128); static PORT_PMA_ATTR(port_rcv_constraint_errors , 8, 8, 136); static PORT_PMA_ATTR(local_link_integrity_errors , 9, 4, 152); static PORT_PMA_ATTR(excessive_buffer_overrun_errors, 10, 4, 156); static PORT_PMA_ATTR(VL15_dropped , 11, 16, 176); static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192); static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224); static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256); static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288); static PORT_PMA_ATTR(port_xmit_wait , 0, 32, 320); /* * Counters added by extended set */ static PORT_PMA_ATTR_EXT(port_xmit_data , 64, 64); static PORT_PMA_ATTR_EXT(port_rcv_data , 64, 128); static PORT_PMA_ATTR_EXT(port_xmit_packets , 64, 192); static PORT_PMA_ATTR_EXT(port_rcv_packets , 64, 256); static PORT_PMA_ATTR_EXT(unicast_xmit_packets , 64, 320); static PORT_PMA_ATTR_EXT(unicast_rcv_packets , 64, 384); static PORT_PMA_ATTR_EXT(multicast_xmit_packets , 64, 448); static PORT_PMA_ATTR_EXT(multicast_rcv_packets , 64, 512); static struct attribute *pma_attrs[] = { &port_pma_attr_symbol_error.attr.attr, &port_pma_attr_link_error_recovery.attr.attr, &port_pma_attr_link_downed.attr.attr, &port_pma_attr_port_rcv_errors.attr.attr, &port_pma_attr_port_rcv_remote_physical_errors.attr.attr, &port_pma_attr_port_rcv_switch_relay_errors.attr.attr, &port_pma_attr_port_xmit_discards.attr.attr, &port_pma_attr_port_xmit_constraint_errors.attr.attr, &port_pma_attr_port_rcv_constraint_errors.attr.attr, &port_pma_attr_local_link_integrity_errors.attr.attr, &port_pma_attr_excessive_buffer_overrun_errors.attr.attr, &port_pma_attr_VL15_dropped.attr.attr, &port_pma_attr_port_xmit_data.attr.attr, &port_pma_attr_port_rcv_data.attr.attr, &port_pma_attr_port_xmit_packets.attr.attr, &port_pma_attr_port_rcv_packets.attr.attr, &port_pma_attr_port_xmit_wait.attr.attr, NULL }; static struct attribute *pma_attrs_ext[] = { &port_pma_attr_symbol_error.attr.attr, &port_pma_attr_link_error_recovery.attr.attr, &port_pma_attr_link_downed.attr.attr, &port_pma_attr_port_rcv_errors.attr.attr, &port_pma_attr_port_rcv_remote_physical_errors.attr.attr, &port_pma_attr_port_rcv_switch_relay_errors.attr.attr, &port_pma_attr_port_xmit_discards.attr.attr, &port_pma_attr_port_xmit_constraint_errors.attr.attr, &port_pma_attr_port_rcv_constraint_errors.attr.attr, &port_pma_attr_local_link_integrity_errors.attr.attr, &port_pma_attr_excessive_buffer_overrun_errors.attr.attr, &port_pma_attr_VL15_dropped.attr.attr, &port_pma_attr_ext_port_xmit_data.attr.attr, &port_pma_attr_ext_port_rcv_data.attr.attr, &port_pma_attr_ext_port_xmit_packets.attr.attr, &port_pma_attr_port_xmit_wait.attr.attr, &port_pma_attr_ext_port_rcv_packets.attr.attr, &port_pma_attr_ext_unicast_rcv_packets.attr.attr, &port_pma_attr_ext_unicast_xmit_packets.attr.attr, &port_pma_attr_ext_multicast_rcv_packets.attr.attr, &port_pma_attr_ext_multicast_xmit_packets.attr.attr, NULL }; static struct attribute *pma_attrs_noietf[] = { &port_pma_attr_symbol_error.attr.attr, &port_pma_attr_link_error_recovery.attr.attr, &port_pma_attr_link_downed.attr.attr, &port_pma_attr_port_rcv_errors.attr.attr, &port_pma_attr_port_rcv_remote_physical_errors.attr.attr, &port_pma_attr_port_rcv_switch_relay_errors.attr.attr, &port_pma_attr_port_xmit_discards.attr.attr, &port_pma_attr_port_xmit_constraint_errors.attr.attr, &port_pma_attr_port_rcv_constraint_errors.attr.attr, &port_pma_attr_local_link_integrity_errors.attr.attr, &port_pma_attr_excessive_buffer_overrun_errors.attr.attr, &port_pma_attr_VL15_dropped.attr.attr, &port_pma_attr_ext_port_xmit_data.attr.attr, &port_pma_attr_ext_port_rcv_data.attr.attr, &port_pma_attr_ext_port_xmit_packets.attr.attr, &port_pma_attr_ext_port_rcv_packets.attr.attr, &port_pma_attr_port_xmit_wait.attr.attr, NULL }; static const struct attribute_group pma_group = { .name = "counters", .attrs = pma_attrs }; static const struct attribute_group pma_group_ext = { .name = "counters", .attrs = pma_attrs_ext }; static const struct attribute_group pma_group_noietf = { .name = "counters", .attrs = pma_attrs_noietf }; static void ib_port_release(struct kobject *kobj) { struct ib_port *port = container_of(kobj, struct ib_port, kobj); int i; for (i = 0; i != ARRAY_SIZE(port->groups); i++) kfree(port->groups[i].attrs); if (port->hw_stats_data) rdma_free_hw_stats_struct(port->hw_stats_data->stats); kfree(port->hw_stats_data); kvfree(port); } static void ib_port_gid_attr_release(struct kobject *kobj) { struct gid_attr_group *gid_attr_group = container_of(kobj, struct gid_attr_group, kobj); int i; for (i = 0; i != ARRAY_SIZE(gid_attr_group->groups); i++) kfree(gid_attr_group->groups[i].attrs); kfree(gid_attr_group); } static struct kobj_type port_type = { .release = ib_port_release, .sysfs_ops = &port_sysfs_ops, .default_groups = port_default_groups, }; static struct kobj_type gid_attr_type = { .sysfs_ops = &gid_attr_sysfs_ops, .release = ib_port_gid_attr_release }; /* * Figure out which counter table to use depending on * the device capabilities. */ static const struct attribute_group *get_counter_table(struct ib_device *dev, int port_num) { struct ib_class_port_info cpi; if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO, &cpi, 40, sizeof(cpi)) >= 0) { if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH) /* We have extended counters */ return &pma_group_ext; if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF) /* But not the IETF ones */ return &pma_group_noietf; } /* Fall back to normal counters */ return &pma_group; } static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats, u32 port_num, int index) { int ret; if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan)) return 0; ret = dev->ops.get_hw_stats(dev, stats, port_num, index); if (ret < 0) return ret; if (ret == stats->num_counters) stats->timestamp = jiffies; return 0; } static int print_hw_stat(struct ib_device *dev, int port_num, struct rdma_hw_stats *stats, int index, char *buf) { u64 v = rdma_counter_get_hwstat_value(dev, port_num, index); return sysfs_emit(buf, "%llu\n", stats->value[index] + v); } static ssize_t show_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, unsigned int index, unsigned int port_num, char *buf) { int ret; mutex_lock(&stats->lock); ret = update_hw_stats(ibdev, stats, port_num, index); if (ret) goto unlock; ret = print_hw_stat(ibdev, port_num, stats, index, buf); unlock: mutex_unlock(&stats->lock); return ret; } static ssize_t show_stats_lifespan(struct ib_device *ibdev, struct rdma_hw_stats *stats, unsigned int index, unsigned int port_num, char *buf) { int msecs; mutex_lock(&stats->lock); msecs = jiffies_to_msecs(stats->lifespan); mutex_unlock(&stats->lock); return sysfs_emit(buf, "%d\n", msecs); } static ssize_t set_stats_lifespan(struct ib_device *ibdev, struct rdma_hw_stats *stats, unsigned int index, unsigned int port_num, const char *buf, size_t count) { int msecs; int jiffies; int ret; ret = kstrtoint(buf, 10, &msecs); if (ret) return ret; if (msecs < 0 || msecs > 10000) return -EINVAL; jiffies = msecs_to_jiffies(msecs); mutex_lock(&stats->lock); stats->lifespan = jiffies; mutex_unlock(&stats->lock); return count; } static struct hw_stats_device_data * alloc_hw_stats_device(struct ib_device *ibdev) { struct hw_stats_device_data *data; struct rdma_hw_stats *stats; if (!ibdev->ops.alloc_hw_device_stats) return ERR_PTR(-EOPNOTSUPP); stats = ibdev->ops.alloc_hw_device_stats(ibdev); if (!stats) return ERR_PTR(-ENOMEM); if (!stats->descs || stats->num_counters <= 0) goto err_free_stats; /* * Two extra attribue elements here, one for the lifespan entry and * one to NULL terminate the list for the sysfs core code */ data = kzalloc(struct_size(data, attrs, stats->num_counters + 1), GFP_KERNEL); if (!data) goto err_free_stats; data->group.attrs = kcalloc(stats->num_counters + 2, sizeof(*data->group.attrs), GFP_KERNEL); if (!data->group.attrs) goto err_free_data; data->group.name = "hw_counters"; data->stats = stats; return data; err_free_data: kfree(data); err_free_stats: rdma_free_hw_stats_struct(stats); return ERR_PTR(-ENOMEM); } void ib_device_release_hw_stats(struct hw_stats_device_data *data) { kfree(data->group.attrs); rdma_free_hw_stats_struct(data->stats); kfree(data); } int ib_setup_device_attrs(struct ib_device *ibdev) { struct hw_stats_device_attribute *attr; struct hw_stats_device_data *data; bool opstat_skipped = false; int i, ret, pos = 0; data = alloc_hw_stats_device(ibdev); if (IS_ERR(data)) { if (PTR_ERR(data) == -EOPNOTSUPP) return 0; return PTR_ERR(data); } ibdev->hw_stats_data = data; ret = ibdev->ops.get_hw_stats(ibdev, data->stats, 0, data->stats->num_counters); if (ret != data->stats->num_counters) { if (WARN_ON(ret >= 0)) return -EINVAL; return ret; } data->stats->timestamp = jiffies; for (i = 0; i < data->stats->num_counters; i++) { if (data->stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) { opstat_skipped = true; continue; } WARN_ON(opstat_skipped); attr = &data->attrs[pos]; sysfs_attr_init(&attr->attr.attr); attr->attr.attr.name = data->stats->descs[i].name; attr->attr.attr.mode = 0444; attr->attr.show = hw_stat_device_show; attr->show = show_hw_stats; data->group.attrs[pos] = &attr->attr.attr; pos++; } attr = &data->attrs[pos]; sysfs_attr_init(&attr->attr.attr); attr->attr.attr.name = "lifespan"; attr->attr.attr.mode = 0644; attr->attr.show = hw_stat_device_show; attr->show = show_stats_lifespan; attr->attr.store = hw_stat_device_store; attr->store = set_stats_lifespan; data->group.attrs[pos] = &attr->attr.attr; for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++) if (!ibdev->groups[i]) { ibdev->groups[i] = &data->group; return 0; } WARN(true, "struct ib_device->groups is too small"); return -EINVAL; } static struct hw_stats_port_data * alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group) { struct ib_device *ibdev = port->ibdev; struct hw_stats_port_data *data; struct rdma_hw_stats *stats; if (!ibdev->ops.alloc_hw_port_stats) return ERR_PTR(-EOPNOTSUPP); stats = ibdev->ops.alloc_hw_port_stats(port->ibdev, port->port_num); if (!stats) return ERR_PTR(-ENOMEM); if (!stats->descs || stats->num_counters <= 0) goto err_free_stats; /* * Two extra attribue elements here, one for the lifespan entry and * one to NULL terminate the list for the sysfs core code */ data = kzalloc(struct_size(data, attrs, stats->num_counters + 1), GFP_KERNEL); if (!data) goto err_free_stats; group->attrs = kcalloc(stats->num_counters + 2, sizeof(*group->attrs), GFP_KERNEL); if (!group->attrs) goto err_free_data; group->name = "hw_counters"; data->stats = stats; return data; err_free_data: kfree(data); err_free_stats: rdma_free_hw_stats_struct(stats); return ERR_PTR(-ENOMEM); } static int setup_hw_port_stats(struct ib_port *port, struct attribute_group *group) { struct hw_stats_port_attribute *attr; struct hw_stats_port_data *data; bool opstat_skipped = false; int i, ret, pos = 0; data = alloc_hw_stats_port(port, group); if (IS_ERR(data)) return PTR_ERR(data); ret = port->ibdev->ops.get_hw_stats(port->ibdev, data->stats, port->port_num, data->stats->num_counters); if (ret != data->stats->num_counters) { if (WARN_ON(ret >= 0)) return -EINVAL; return ret; } data->stats->timestamp = jiffies; for (i = 0; i < data->stats->num_counters; i++) { if (data->stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) { opstat_skipped = true; continue; } WARN_ON(opstat_skipped); attr = &data->attrs[pos]; sysfs_attr_init(&attr->attr.attr); attr->attr.attr.name = data->stats->descs[i].name; attr->attr.attr.mode = 0444; attr->attr.show = hw_stat_port_show; attr->show = show_hw_stats; group->attrs[pos] = &attr->attr.attr; pos++; } attr = &data->attrs[pos]; sysfs_attr_init(&attr->attr.attr); attr->attr.attr.name = "lifespan"; attr->attr.attr.mode = 0644; attr->attr.show = hw_stat_port_show; attr->show = show_stats_lifespan; attr->attr.store = hw_stat_port_store; attr->store = set_stats_lifespan; group->attrs[pos] = &attr->attr.attr; port->hw_stats_data = data; return 0; } struct rdma_hw_stats *ib_get_hw_stats_port(struct ib_device *ibdev, u32 port_num) { if (!ibdev->port_data || !rdma_is_port_valid(ibdev, port_num) || !ibdev->port_data[port_num].sysfs->hw_stats_data) return NULL; return ibdev->port_data[port_num].sysfs->hw_stats_data->stats; } static int alloc_port_table_group(const char *name, struct attribute_group *group, struct port_table_attribute *attrs, size_t num, ssize_t (*show)(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *, char *buf)) { struct attribute **attr_list; int i; attr_list = kcalloc(num + 1, sizeof(*attr_list), GFP_KERNEL); if (!attr_list) return -ENOMEM; for (i = 0; i < num; i++) { struct port_table_attribute *element = &attrs[i]; if (snprintf(element->name, sizeof(element->name), "%d", i) >= sizeof(element->name)) goto err; sysfs_attr_init(&element->attr.attr); element->attr.attr.name = element->name; element->attr.attr.mode = 0444; element->attr.show = show; element->index = i; attr_list[i] = &element->attr.attr; } group->name = name; group->attrs = attr_list; return 0; err: kfree(attr_list); return -EINVAL; } /* * Create the sysfs: * ibp0s9/ports/XX/gid_attrs/{ndevs,types}/YYY * YYY is the gid table index in decimal */ static int setup_gid_attrs(struct ib_port *port, const struct ib_port_attr *attr) { struct gid_attr_group *gid_attr_group; int ret; gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list, attr->gid_tbl_len * 2), GFP_KERNEL); if (!gid_attr_group) return -ENOMEM; gid_attr_group->port = port; kobject_init(&gid_attr_group->kobj, &gid_attr_type); ret = alloc_port_table_group("ndevs", &gid_attr_group->groups[0], gid_attr_group->attrs_list, attr->gid_tbl_len, show_port_gid_attr_ndev); if (ret) goto err_put; gid_attr_group->groups_list[0] = &gid_attr_group->groups[0]; ret = alloc_port_table_group( "types", &gid_attr_group->groups[1], gid_attr_group->attrs_list + attr->gid_tbl_len, attr->gid_tbl_len, show_port_gid_attr_gid_type); if (ret) goto err_put; gid_attr_group->groups_list[1] = &gid_attr_group->groups[1]; ret = kobject_add(&gid_attr_group->kobj, &port->kobj, "gid_attrs"); if (ret) goto err_put; ret = sysfs_create_groups(&gid_attr_group->kobj, gid_attr_group->groups_list); if (ret) goto err_del; port->gid_attr_group = gid_attr_group; return 0; err_del: kobject_del(&gid_attr_group->kobj); err_put: kobject_put(&gid_attr_group->kobj); return ret; } static void destroy_gid_attrs(struct ib_port *port) { struct gid_attr_group *gid_attr_group = port->gid_attr_group; if (!gid_attr_group) return; sysfs_remove_groups(&gid_attr_group->kobj, gid_attr_group->groups_list); kobject_del(&gid_attr_group->kobj); kobject_put(&gid_attr_group->kobj); } /* * Create the sysfs: * ibp0s9/ports/XX/{gids,pkeys,counters}/YYY */ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num, const struct ib_port_attr *attr) { struct ib_device *device = rdma_device_to_ibdev(&coredev->dev); bool is_full_dev = &device->coredev == coredev; const struct attribute_group **cur_group; struct ib_port *p; int ret; p = kvzalloc(struct_size(p, attrs_list, attr->gid_tbl_len + attr->pkey_tbl_len), GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); p->ibdev = device; p->port_num = port_num; kobject_init(&p->kobj, &port_type); if (device->port_data && is_full_dev) device->port_data[port_num].sysfs = p; cur_group = p->groups_list; ret = alloc_port_table_group("gids", &p->groups[0], p->attrs_list, attr->gid_tbl_len, show_port_gid); if (ret) goto err_put; *cur_group++ = &p->groups[0]; if (attr->pkey_tbl_len) { ret = alloc_port_table_group("pkeys", &p->groups[1], p->attrs_list + attr->gid_tbl_len, attr->pkey_tbl_len, show_port_pkey); if (ret) goto err_put; *cur_group++ = &p->groups[1]; } /* * If port == 0, it means hw_counters are per device and not per * port, so holder should be device. Therefore skip per port * counter initialization. */ if (port_num && is_full_dev) { ret = setup_hw_port_stats(p, &p->groups[2]); if (ret && ret != -EOPNOTSUPP) goto err_put; if (!ret) *cur_group++ = &p->groups[2]; } if (device->ops.process_mad && is_full_dev) *cur_group++ = get_counter_table(device, port_num); ret = kobject_add(&p->kobj, coredev->ports_kobj, "%d", port_num); if (ret) goto err_put; ret = sysfs_create_groups(&p->kobj, p->groups_list); if (ret) goto err_del; if (is_full_dev) { ret = sysfs_create_groups(&p->kobj, device->ops.port_groups); if (ret) goto err_groups; } list_add_tail(&p->kobj.entry, &coredev->port_list); return p; err_groups: sysfs_remove_groups(&p->kobj, p->groups_list); err_del: kobject_del(&p->kobj); err_put: if (device->port_data && is_full_dev) device->port_data[port_num].sysfs = NULL; kobject_put(&p->kobj); return ERR_PTR(ret); } static void destroy_port(struct ib_core_device *coredev, struct ib_port *port) { bool is_full_dev = &port->ibdev->coredev == coredev; list_del(&port->kobj.entry); if (is_full_dev) sysfs_remove_groups(&port->kobj, port->ibdev->ops.port_groups); sysfs_remove_groups(&port->kobj, port->groups_list); kobject_del(&port->kobj); if (port->ibdev->port_data && port->ibdev->port_data[port->port_num].sysfs == port) port->ibdev->port_data[port->port_num].sysfs = NULL; kobject_put(&port->kobj); } static const char *node_type_string(int node_type) { switch (node_type) { case RDMA_NODE_IB_CA: return "CA"; case RDMA_NODE_IB_SWITCH: return "switch"; case RDMA_NODE_IB_ROUTER: return "router"; case RDMA_NODE_RNIC: return "RNIC"; case RDMA_NODE_USNIC: return "usNIC"; case RDMA_NODE_USNIC_UDP: return "usNIC UDP"; case RDMA_NODE_UNSPECIFIED: return "unspecified"; } return "<unknown>"; } static ssize_t node_type_show(struct device *device, struct device_attribute *attr, char *buf) { struct ib_device *dev = rdma_device_to_ibdev(device); return sysfs_emit(buf, "%u: %s\n", dev->node_type, node_type_string(dev->node_type)); } static DEVICE_ATTR_RO(node_type); static ssize_t sys_image_guid_show(struct device *device, struct device_attribute *dev_attr, char *buf) { struct ib_device *dev = rdma_device_to_ibdev(device); __be16 *guid = (__be16 *)&dev->attrs.sys_image_guid; return sysfs_emit(buf, "%04x:%04x:%04x:%04x\n", be16_to_cpu(guid[0]), be16_to_cpu(guid[1]), be16_to_cpu(guid[2]), be16_to_cpu(guid[3])); } static DEVICE_ATTR_RO(sys_image_guid); static ssize_t node_guid_show(struct device *device, struct device_attribute *attr, char *buf) { struct ib_device *dev = rdma_device_to_ibdev(device); __be16 *node_guid = (__be16 *)&dev->node_guid; return sysfs_emit(buf, "%04x:%04x:%04x:%04x\n", be16_to_cpu(node_guid[0]), be16_to_cpu(node_guid[1]), be16_to_cpu(node_guid[2]), be16_to_cpu(node_guid[3])); } static DEVICE_ATTR_RO(node_guid); static ssize_t node_desc_show(struct device *device, struct device_attribute *attr, char *buf) { struct ib_device *dev = rdma_device_to_ibdev(device); return sysfs_emit(buf, "%.64s\n", dev->node_desc); } static ssize_t node_desc_store(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { struct ib_device *dev = rdma_device_to_ibdev(device); struct ib_device_modify desc = {}; int ret; if (!dev->ops.modify_device) return -EOPNOTSUPP; memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX)); ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc); if (ret) return ret; return count; } static DEVICE_ATTR_RW(node_desc); static ssize_t fw_ver_show(struct device *device, struct device_attribute *attr, char *buf) { struct ib_device *dev = rdma_device_to_ibdev(device); char version[IB_FW_VERSION_NAME_MAX] = {}; ib_get_device_fw_str(dev, version); return sysfs_emit(buf, "%s\n", version); } static DEVICE_ATTR_RO(fw_ver); static struct attribute *ib_dev_attrs[] = { &dev_attr_node_type.attr, &dev_attr_node_guid.attr, &dev_attr_sys_image_guid.attr, &dev_attr_fw_ver.attr, &dev_attr_node_desc.attr, NULL, }; const struct attribute_group ib_dev_attr_group = { .attrs = ib_dev_attrs, }; void ib_free_port_attrs(struct ib_core_device *coredev) { struct kobject *p, *t; list_for_each_entry_safe(p, t, &coredev->port_list, entry) { struct ib_port *port = container_of(p, struct ib_port, kobj); destroy_gid_attrs(port); destroy_port(coredev, port); } kobject_put(coredev->ports_kobj); } int ib_setup_port_attrs(struct ib_core_device *coredev) { struct ib_device *device = rdma_device_to_ibdev(&coredev->dev); u32 port_num; int ret; coredev->ports_kobj = kobject_create_and_add("ports", &coredev->dev.kobj); if (!coredev->ports_kobj) return -ENOMEM; rdma_for_each_port (device, port_num) { struct ib_port_attr attr; struct ib_port *port; ret = ib_query_port(device, port_num, &attr); if (ret) goto err_put; port = setup_port(coredev, port_num, &attr); if (IS_ERR(port)) { ret = PTR_ERR(port); goto err_put; } ret = setup_gid_attrs(port, &attr); if (ret) goto err_put; } return 0; err_put: ib_free_port_attrs(coredev); return ret; } /** * ib_port_register_client_groups - Add an ib_client's attributes to the port * * @ibdev: IB device to add counters * @port_num: valid port number * @groups: Group list of attributes * * Do not use. Only for legacy sysfs compatibility. */ int ib_port_register_client_groups(struct ib_device *ibdev, u32 port_num, const struct attribute_group **groups) { return sysfs_create_groups(&ibdev->port_data[port_num].sysfs->kobj, groups); } EXPORT_SYMBOL(ib_port_register_client_groups); void ib_port_unregister_client_groups(struct ib_device *ibdev, u32 port_num, const struct attribute_group **groups) { return sysfs_remove_groups(&ibdev->port_data[port_num].sysfs->kobj, groups); } EXPORT_SYMBOL(ib_port_unregister_client_groups);
linux-master
drivers/infiniband/core/sysfs.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. */ #include <linux/overflow.h> #include <rdma/uverbs_std_types.h> #include "rdma_core.h" #include "uverbs.h" #include <rdma/uverbs_ioctl.h> #include <rdma/opa_addr.h> #include <rdma/ib_cache.h> /* * This ioctl method allows calling any defined write or write_ex * handler. This essentially replaces the hdr/ex_hdr system with the ioctl * marshalling, and brings the non-ex path into the same marshalling as the ex * path. */ static int UVERBS_HANDLER(UVERBS_METHOD_INVOKE_WRITE)( struct uverbs_attr_bundle *attrs) { struct uverbs_api *uapi = attrs->ufile->device->uapi; const struct uverbs_api_write_method *method_elm; u32 cmd; int rc; rc = uverbs_get_const(&cmd, attrs, UVERBS_ATTR_WRITE_CMD); if (rc) return rc; method_elm = uapi_get_method(uapi, cmd); if (IS_ERR(method_elm)) return PTR_ERR(method_elm); uverbs_fill_udata(attrs, &attrs->ucore, UVERBS_ATTR_CORE_IN, UVERBS_ATTR_CORE_OUT); if (attrs->ucore.inlen < method_elm->req_size || attrs->ucore.outlen < method_elm->resp_size) return -ENOSPC; attrs->uobject = NULL; rc = method_elm->handler(attrs); if (attrs->uobject) uverbs_finalize_object(attrs->uobject, UVERBS_ACCESS_NEW, true, !rc, attrs); return rc; } DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_INVOKE_WRITE, UVERBS_ATTR_CONST_IN(UVERBS_ATTR_WRITE_CMD, enum ib_uverbs_write_cmds, UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CORE_IN, UVERBS_ATTR_MIN_SIZE(sizeof(u32)), UA_OPTIONAL), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CORE_OUT, UVERBS_ATTR_MIN_SIZE(0), UA_OPTIONAL), UVERBS_ATTR_UHW()); static uint32_t * gather_objects_handle(struct ib_uverbs_file *ufile, const struct uverbs_api_object *uapi_object, struct uverbs_attr_bundle *attrs, ssize_t out_len, u64 *total) { u64 max_count = out_len / sizeof(u32); struct ib_uobject *obj; u64 count = 0; u32 *handles; /* Allocated memory that cannot page out where we gather * all object ids under a spin_lock. */ handles = uverbs_zalloc(attrs, out_len); if (IS_ERR(handles)) return handles; spin_lock_irq(&ufile->uobjects_lock); list_for_each_entry(obj, &ufile->uobjects, list) { u32 obj_id = obj->id; if (obj->uapi_object != uapi_object) continue; if (count >= max_count) break; handles[count] = obj_id; count++; } spin_unlock_irq(&ufile->uobjects_lock); *total = count; return handles; } static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)( struct uverbs_attr_bundle *attrs) { const struct uverbs_api_object *uapi_object; ssize_t out_len; u64 total = 0; u16 object_id; u32 *handles; int ret; out_len = uverbs_attr_get_len(attrs, UVERBS_ATTR_INFO_HANDLES_LIST); if (out_len <= 0 || (out_len % sizeof(u32) != 0)) return -EINVAL; ret = uverbs_get_const(&object_id, attrs, UVERBS_ATTR_INFO_OBJECT_ID); if (ret) return ret; uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id); if (IS_ERR(uapi_object)) return PTR_ERR(uapi_object); handles = gather_objects_handle(attrs->ufile, uapi_object, attrs, out_len, &total); if (IS_ERR(handles)) return PTR_ERR(handles); ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_HANDLES_LIST, handles, sizeof(u32) * total); if (ret) goto err; ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_TOTAL_HANDLES, &total, sizeof(total)); err: return ret; } void copy_port_attr_to_resp(struct ib_port_attr *attr, struct ib_uverbs_query_port_resp *resp, struct ib_device *ib_dev, u8 port_num) { resp->state = attr->state; resp->max_mtu = attr->max_mtu; resp->active_mtu = attr->active_mtu; resp->gid_tbl_len = attr->gid_tbl_len; resp->port_cap_flags = make_port_cap_flags(attr); resp->max_msg_sz = attr->max_msg_sz; resp->bad_pkey_cntr = attr->bad_pkey_cntr; resp->qkey_viol_cntr = attr->qkey_viol_cntr; resp->pkey_tbl_len = attr->pkey_tbl_len; if (rdma_is_grh_required(ib_dev, port_num)) resp->flags |= IB_UVERBS_QPF_GRH_REQUIRED; if (rdma_cap_opa_ah(ib_dev, port_num)) { resp->lid = OPA_TO_IB_UCAST_LID(attr->lid); resp->sm_lid = OPA_TO_IB_UCAST_LID(attr->sm_lid); } else { resp->lid = ib_lid_cpu16(attr->lid); resp->sm_lid = ib_lid_cpu16(attr->sm_lid); } resp->lmc = attr->lmc; resp->max_vl_num = attr->max_vl_num; resp->sm_sl = attr->sm_sl; resp->subnet_timeout = attr->subnet_timeout; resp->init_type_reply = attr->init_type_reply; resp->active_width = attr->active_width; /* This ABI needs to be extended to provide any speed more than IB_SPEED_NDR */ resp->active_speed = min_t(u16, attr->active_speed, IB_SPEED_NDR); resp->phys_state = attr->phys_state; resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num); } static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( struct uverbs_attr_bundle *attrs) { struct ib_device *ib_dev; struct ib_port_attr attr = {}; struct ib_uverbs_query_port_resp_ex resp = {}; struct ib_ucontext *ucontext; int ret; u8 port_num; ucontext = ib_uverbs_get_ucontext(attrs); if (IS_ERR(ucontext)) return PTR_ERR(ucontext); ib_dev = ucontext->device; /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ if (!ib_dev->ops.query_port) return -EOPNOTSUPP; ret = uverbs_get_const(&port_num, attrs, UVERBS_ATTR_QUERY_PORT_PORT_NUM); if (ret) return ret; ret = ib_query_port(ib_dev, port_num, &attr); if (ret) return ret; copy_port_attr_to_resp(&attr, &resp.legacy_resp, ib_dev, port_num); resp.port_cap_flags2 = attr.port_cap_flags2; return uverbs_copy_to_struct_or_zero(attrs, UVERBS_ATTR_QUERY_PORT_RESP, &resp, sizeof(resp)); } static int UVERBS_HANDLER(UVERBS_METHOD_GET_CONTEXT)( struct uverbs_attr_bundle *attrs) { u32 num_comp = attrs->ufile->device->num_comp_vectors; u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS; int ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS, &num_comp, sizeof(num_comp)); if (IS_UVERBS_COPY_ERR(ret)) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT, &core_support, sizeof(core_support)); if (IS_UVERBS_COPY_ERR(ret)) return ret; ret = ib_alloc_ucontext(attrs); if (ret) return ret; ret = ib_init_ucontext(attrs); if (ret) { kfree(attrs->context); attrs->context = NULL; return ret; } return 0; } static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_CONTEXT)( struct uverbs_attr_bundle *attrs) { u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS; struct ib_ucontext *ucontext; struct ib_device *ib_dev; u32 num_comp; int ret; ucontext = ib_uverbs_get_ucontext(attrs); if (IS_ERR(ucontext)) return PTR_ERR(ucontext); ib_dev = ucontext->device; if (!ib_dev->ops.query_ucontext) return -EOPNOTSUPP; num_comp = attrs->ufile->device->num_comp_vectors; ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS, &num_comp, sizeof(num_comp)); if (IS_UVERBS_COPY_ERR(ret)) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT, &core_support, sizeof(core_support)); if (IS_UVERBS_COPY_ERR(ret)) return ret; return ucontext->device->ops.query_ucontext(ucontext, attrs); } static int copy_gid_entries_to_user(struct uverbs_attr_bundle *attrs, struct ib_uverbs_gid_entry *entries, size_t num_entries, size_t user_entry_size) { const struct uverbs_attr *attr; void __user *user_entries; size_t copy_len; int ret; int i; if (user_entry_size == sizeof(*entries)) { ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES, entries, sizeof(*entries) * num_entries); return ret; } copy_len = min_t(size_t, user_entry_size, sizeof(*entries)); attr = uverbs_attr_get(attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES); if (IS_ERR(attr)) return PTR_ERR(attr); user_entries = u64_to_user_ptr(attr->ptr_attr.data); for (i = 0; i < num_entries; i++) { if (copy_to_user(user_entries, entries, copy_len)) return -EFAULT; if (user_entry_size > sizeof(*entries)) { if (clear_user(user_entries + sizeof(*entries), user_entry_size - sizeof(*entries))) return -EFAULT; } entries++; user_entries += user_entry_size; } return uverbs_output_written(attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES); } static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)( struct uverbs_attr_bundle *attrs) { struct ib_uverbs_gid_entry *entries; struct ib_ucontext *ucontext; struct ib_device *ib_dev; size_t user_entry_size; ssize_t num_entries; int max_entries; u32 flags; int ret; ret = uverbs_get_flags32(&flags, attrs, UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, 0); if (ret) return ret; ret = uverbs_get_const(&user_entry_size, attrs, UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE); if (ret) return ret; if (!user_entry_size) return -EINVAL; max_entries = uverbs_attr_ptr_get_array_size( attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES, user_entry_size); if (max_entries <= 0) return max_entries ?: -EINVAL; ucontext = ib_uverbs_get_ucontext(attrs); if (IS_ERR(ucontext)) return PTR_ERR(ucontext); ib_dev = ucontext->device; entries = uverbs_kcalloc(attrs, max_entries, sizeof(*entries)); if (IS_ERR(entries)) return PTR_ERR(entries); num_entries = rdma_query_gid_table(ib_dev, entries, max_entries); if (num_entries < 0) return -EINVAL; ret = copy_gid_entries_to_user(attrs, entries, num_entries, user_entry_size); if (ret) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES, &num_entries, sizeof(num_entries)); return ret; } static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)( struct uverbs_attr_bundle *attrs) { struct ib_uverbs_gid_entry entry = {}; const struct ib_gid_attr *gid_attr; struct ib_ucontext *ucontext; struct ib_device *ib_dev; struct net_device *ndev; u32 gid_index; u32 port_num; u32 flags; int ret; ret = uverbs_get_flags32(&flags, attrs, UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, 0); if (ret) return ret; ret = uverbs_get_const(&port_num, attrs, UVERBS_ATTR_QUERY_GID_ENTRY_PORT); if (ret) return ret; ret = uverbs_get_const(&gid_index, attrs, UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX); if (ret) return ret; ucontext = ib_uverbs_get_ucontext(attrs); if (IS_ERR(ucontext)) return PTR_ERR(ucontext); ib_dev = ucontext->device; if (!rdma_is_port_valid(ib_dev, port_num)) return -EINVAL; gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index); if (IS_ERR(gid_attr)) return PTR_ERR(gid_attr); memcpy(&entry.gid, &gid_attr->gid, sizeof(gid_attr->gid)); entry.gid_index = gid_attr->index; entry.port_num = gid_attr->port_num; entry.gid_type = gid_attr->gid_type; rcu_read_lock(); ndev = rdma_read_gid_attr_ndev_rcu(gid_attr); if (IS_ERR(ndev)) { if (PTR_ERR(ndev) != -ENODEV) { ret = PTR_ERR(ndev); rcu_read_unlock(); goto out; } } else { entry.netdev_ifindex = ndev->ifindex; } rcu_read_unlock(); ret = uverbs_copy_to_struct_or_zero( attrs, UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, &entry, sizeof(entry)); out: rdma_put_gid_attr(gid_attr); return ret; } DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_GET_CONTEXT, UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS, UVERBS_ATTR_TYPE(u32), UA_OPTIONAL), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT, UVERBS_ATTR_TYPE(u64), UA_OPTIONAL), UVERBS_ATTR_UHW()); DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_QUERY_CONTEXT, UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS, UVERBS_ATTR_TYPE(u32), UA_OPTIONAL), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT, UVERBS_ATTR_TYPE(u64), UA_OPTIONAL)); DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_INFO_HANDLES, /* Also includes any device specific object ids */ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_INFO_OBJECT_ID, enum uverbs_default_objects, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_TOTAL_HANDLES, UVERBS_ATTR_TYPE(u32), UA_OPTIONAL), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_HANDLES_LIST, UVERBS_ATTR_MIN_SIZE(sizeof(u32)), UA_OPTIONAL)); DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_QUERY_PORT, UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_PORT_PORT_NUM, u8, UA_MANDATORY), UVERBS_ATTR_PTR_OUT( UVERBS_ATTR_QUERY_PORT_RESP, UVERBS_ATTR_STRUCT(struct ib_uverbs_query_port_resp_ex, reserved), UA_MANDATORY)); DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_QUERY_GID_TABLE, UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE, u64, UA_MANDATORY), UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, u32, UA_OPTIONAL), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES, UVERBS_ATTR_MIN_SIZE(0), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES, UVERBS_ATTR_TYPE(u64), UA_MANDATORY)); DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_QUERY_GID_ENTRY, UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_PORT, u32, UA_MANDATORY), UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX, u32, UA_MANDATORY), UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, u32, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, UVERBS_ATTR_STRUCT(struct ib_uverbs_gid_entry, netdev_ifindex), UA_MANDATORY)); DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE, &UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT), &UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE), &UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES), &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT), &UVERBS_METHOD(UVERBS_METHOD_QUERY_CONTEXT), &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_TABLE), &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_ENTRY)); const struct uapi_definition uverbs_def_obj_device[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DEVICE), {}, };
linux-master
drivers/infiniband/core/uverbs_std_types_device.c
/* * Copyright (c) 2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/random.h> #include <rdma/ib_cache.h> #include "sa.h" static int mcast_add_one(struct ib_device *device); static void mcast_remove_one(struct ib_device *device, void *client_data); static struct ib_client mcast_client = { .name = "ib_multicast", .add = mcast_add_one, .remove = mcast_remove_one }; static struct ib_sa_client sa_client; static struct workqueue_struct *mcast_wq; static union ib_gid mgid0; struct mcast_device; struct mcast_port { struct mcast_device *dev; spinlock_t lock; struct rb_root table; refcount_t refcount; struct completion comp; u32 port_num; }; struct mcast_device { struct ib_device *device; struct ib_event_handler event_handler; int start_port; int end_port; struct mcast_port port[]; }; enum mcast_state { MCAST_JOINING, MCAST_MEMBER, MCAST_ERROR, }; enum mcast_group_state { MCAST_IDLE, MCAST_BUSY, MCAST_GROUP_ERROR, MCAST_PKEY_EVENT }; enum { MCAST_INVALID_PKEY_INDEX = 0xFFFF }; struct mcast_member; struct mcast_group { struct ib_sa_mcmember_rec rec; struct rb_node node; struct mcast_port *port; spinlock_t lock; struct work_struct work; struct list_head pending_list; struct list_head active_list; struct mcast_member *last_join; int members[NUM_JOIN_MEMBERSHIP_TYPES]; atomic_t refcount; enum mcast_group_state state; struct ib_sa_query *query; u16 pkey_index; u8 leave_state; int retries; }; struct mcast_member { struct ib_sa_multicast multicast; struct ib_sa_client *client; struct mcast_group *group; struct list_head list; enum mcast_state state; refcount_t refcount; struct completion comp; }; static void join_handler(int status, struct ib_sa_mcmember_rec *rec, void *context); static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, void *context); static struct mcast_group *mcast_find(struct mcast_port *port, union ib_gid *mgid) { struct rb_node *node = port->table.rb_node; struct mcast_group *group; int ret; while (node) { group = rb_entry(node, struct mcast_group, node); ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); if (!ret) return group; if (ret < 0) node = node->rb_left; else node = node->rb_right; } return NULL; } static struct mcast_group *mcast_insert(struct mcast_port *port, struct mcast_group *group, int allow_duplicates) { struct rb_node **link = &port->table.rb_node; struct rb_node *parent = NULL; struct mcast_group *cur_group; int ret; while (*link) { parent = *link; cur_group = rb_entry(parent, struct mcast_group, node); ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, sizeof group->rec.mgid); if (ret < 0) link = &(*link)->rb_left; else if (ret > 0) link = &(*link)->rb_right; else if (allow_duplicates) link = &(*link)->rb_left; else return cur_group; } rb_link_node(&group->node, parent, link); rb_insert_color(&group->node, &port->table); return NULL; } static void deref_port(struct mcast_port *port) { if (refcount_dec_and_test(&port->refcount)) complete(&port->comp); } static void release_group(struct mcast_group *group) { struct mcast_port *port = group->port; unsigned long flags; spin_lock_irqsave(&port->lock, flags); if (atomic_dec_and_test(&group->refcount)) { rb_erase(&group->node, &port->table); spin_unlock_irqrestore(&port->lock, flags); kfree(group); deref_port(port); } else spin_unlock_irqrestore(&port->lock, flags); } static void deref_member(struct mcast_member *member) { if (refcount_dec_and_test(&member->refcount)) complete(&member->comp); } static void queue_join(struct mcast_member *member) { struct mcast_group *group = member->group; unsigned long flags; spin_lock_irqsave(&group->lock, flags); list_add_tail(&member->list, &group->pending_list); if (group->state == MCAST_IDLE) { group->state = MCAST_BUSY; atomic_inc(&group->refcount); queue_work(mcast_wq, &group->work); } spin_unlock_irqrestore(&group->lock, flags); } /* * A multicast group has four types of members: full member, non member, * sendonly non member and sendonly full member. * We need to keep track of the number of members of each * type based on their join state. Adjust the number of members the belong to * the specified join states. */ static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) { int i; for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1) if (join_state & 0x1) group->members[i] += inc; } /* * If a multicast group has zero members left for a particular join state, but * the group is still a member with the SA, we need to leave that join state. * Determine which join states we still belong to, but that do not have any * active members. */ static u8 get_leave_state(struct mcast_group *group) { u8 leave_state = 0; int i; for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++) if (!group->members[i]) leave_state |= (0x1 << i); return leave_state & group->rec.join_state; } static int check_selector(ib_sa_comp_mask comp_mask, ib_sa_comp_mask selector_mask, ib_sa_comp_mask value_mask, u8 selector, u8 src_value, u8 dst_value) { int err; if (!(comp_mask & selector_mask) || !(comp_mask & value_mask)) return 0; switch (selector) { case IB_SA_GT: err = (src_value <= dst_value); break; case IB_SA_LT: err = (src_value >= dst_value); break; case IB_SA_EQ: err = (src_value != dst_value); break; default: err = 0; break; } return err; } static int cmp_rec(struct ib_sa_mcmember_rec *src, struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask) { /* MGID must already match */ if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID && memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid)) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) return -EINVAL; if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR, IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector, src->mtu, dst->mtu)) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS && src->traffic_class != dst->traffic_class) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) return -EINVAL; if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR, IB_SA_MCMEMBER_REC_RATE, dst->rate_selector, src->rate, dst->rate)) return -EINVAL; if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR, IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME, dst->packet_life_time_selector, src->packet_life_time, dst->packet_life_time)) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL && src->flow_label != dst->flow_label) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT && src->hop_limit != dst->hop_limit) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope) return -EINVAL; /* join_state checked separately, proxy_join ignored */ return 0; } static int send_join(struct mcast_group *group, struct mcast_member *member) { struct mcast_port *port = group->port; int ret; group->last_join = member; ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, port->port_num, IB_MGMT_METHOD_SET, &member->multicast.rec, member->multicast.comp_mask, 3000, GFP_KERNEL, join_handler, group, &group->query); return (ret > 0) ? 0 : ret; } static int send_leave(struct mcast_group *group, u8 leave_state) { struct mcast_port *port = group->port; struct ib_sa_mcmember_rec rec; int ret; rec = group->rec; rec.join_state = leave_state; group->leave_state = leave_state; ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, port->port_num, IB_SA_METHOD_DELETE, &rec, IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE, 3000, GFP_KERNEL, leave_handler, group, &group->query); return (ret > 0) ? 0 : ret; } static void join_group(struct mcast_group *group, struct mcast_member *member, u8 join_state) { member->state = MCAST_MEMBER; adjust_membership(group, join_state, 1); group->rec.join_state |= join_state; member->multicast.rec = group->rec; member->multicast.rec.join_state = join_state; list_move(&member->list, &group->active_list); } static int fail_join(struct mcast_group *group, struct mcast_member *member, int status) { spin_lock_irq(&group->lock); list_del_init(&member->list); spin_unlock_irq(&group->lock); return member->multicast.callback(status, &member->multicast); } static void process_group_error(struct mcast_group *group) { struct mcast_member *member; int ret = 0; u16 pkey_index; if (group->state == MCAST_PKEY_EVENT) ret = ib_find_pkey(group->port->dev->device, group->port->port_num, be16_to_cpu(group->rec.pkey), &pkey_index); spin_lock_irq(&group->lock); if (group->state == MCAST_PKEY_EVENT && !ret && group->pkey_index == pkey_index) goto out; while (!list_empty(&group->active_list)) { member = list_entry(group->active_list.next, struct mcast_member, list); refcount_inc(&member->refcount); list_del_init(&member->list); adjust_membership(group, member->multicast.rec.join_state, -1); member->state = MCAST_ERROR; spin_unlock_irq(&group->lock); ret = member->multicast.callback(-ENETRESET, &member->multicast); deref_member(member); if (ret) ib_sa_free_multicast(&member->multicast); spin_lock_irq(&group->lock); } group->rec.join_state = 0; out: group->state = MCAST_BUSY; spin_unlock_irq(&group->lock); } static void mcast_work_handler(struct work_struct *work) { struct mcast_group *group; struct mcast_member *member; struct ib_sa_multicast *multicast; int status, ret; u8 join_state; group = container_of(work, typeof(*group), work); retest: spin_lock_irq(&group->lock); while (!list_empty(&group->pending_list) || (group->state != MCAST_BUSY)) { if (group->state != MCAST_BUSY) { spin_unlock_irq(&group->lock); process_group_error(group); goto retest; } member = list_entry(group->pending_list.next, struct mcast_member, list); multicast = &member->multicast; join_state = multicast->rec.join_state; refcount_inc(&member->refcount); if (join_state == (group->rec.join_state & join_state)) { status = cmp_rec(&group->rec, &multicast->rec, multicast->comp_mask); if (!status) join_group(group, member, join_state); else list_del_init(&member->list); spin_unlock_irq(&group->lock); ret = multicast->callback(status, multicast); } else { spin_unlock_irq(&group->lock); status = send_join(group, member); if (!status) { deref_member(member); return; } ret = fail_join(group, member, status); } deref_member(member); if (ret) ib_sa_free_multicast(&member->multicast); spin_lock_irq(&group->lock); } join_state = get_leave_state(group); if (join_state) { group->rec.join_state &= ~join_state; spin_unlock_irq(&group->lock); if (send_leave(group, join_state)) goto retest; } else { group->state = MCAST_IDLE; spin_unlock_irq(&group->lock); release_group(group); } } /* * Fail a join request if it is still active - at the head of the pending queue. */ static void process_join_error(struct mcast_group *group, int status) { struct mcast_member *member; int ret; spin_lock_irq(&group->lock); member = list_entry(group->pending_list.next, struct mcast_member, list); if (group->last_join == member) { refcount_inc(&member->refcount); list_del_init(&member->list); spin_unlock_irq(&group->lock); ret = member->multicast.callback(status, &member->multicast); deref_member(member); if (ret) ib_sa_free_multicast(&member->multicast); } else spin_unlock_irq(&group->lock); } static void join_handler(int status, struct ib_sa_mcmember_rec *rec, void *context) { struct mcast_group *group = context; u16 pkey_index = MCAST_INVALID_PKEY_INDEX; if (status) process_join_error(group, status); else { int mgids_changed, is_mgid0; if (ib_find_pkey(group->port->dev->device, group->port->port_num, be16_to_cpu(rec->pkey), &pkey_index)) pkey_index = MCAST_INVALID_PKEY_INDEX; spin_lock_irq(&group->port->lock); if (group->state == MCAST_BUSY && group->pkey_index == MCAST_INVALID_PKEY_INDEX) group->pkey_index = pkey_index; mgids_changed = memcmp(&rec->mgid, &group->rec.mgid, sizeof(group->rec.mgid)); group->rec = *rec; if (mgids_changed) { rb_erase(&group->node, &group->port->table); is_mgid0 = !memcmp(&mgid0, &group->rec.mgid, sizeof(mgid0)); mcast_insert(group->port, group, is_mgid0); } spin_unlock_irq(&group->port->lock); } mcast_work_handler(&group->work); } static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, void *context) { struct mcast_group *group = context; if (status && group->retries > 0 && !send_leave(group, group->leave_state)) group->retries--; else mcast_work_handler(&group->work); } static struct mcast_group *acquire_group(struct mcast_port *port, union ib_gid *mgid, gfp_t gfp_mask) { struct mcast_group *group, *cur_group; unsigned long flags; int is_mgid0; is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0); if (!is_mgid0) { spin_lock_irqsave(&port->lock, flags); group = mcast_find(port, mgid); if (group) goto found; spin_unlock_irqrestore(&port->lock, flags); } group = kzalloc(sizeof *group, gfp_mask); if (!group) return NULL; group->retries = 3; group->port = port; group->rec.mgid = *mgid; group->pkey_index = MCAST_INVALID_PKEY_INDEX; INIT_LIST_HEAD(&group->pending_list); INIT_LIST_HEAD(&group->active_list); INIT_WORK(&group->work, mcast_work_handler); spin_lock_init(&group->lock); spin_lock_irqsave(&port->lock, flags); cur_group = mcast_insert(port, group, is_mgid0); if (cur_group) { kfree(group); group = cur_group; } else refcount_inc(&port->refcount); found: atomic_inc(&group->refcount); spin_unlock_irqrestore(&port->lock, flags); return group; } /* * We serialize all join requests to a single group to make our lives much * easier. Otherwise, two users could try to join the same group * simultaneously, with different configurations, one could leave while the * join is in progress, etc., which makes locking around error recovery * difficult. */ struct ib_sa_multicast * ib_sa_join_multicast(struct ib_sa_client *client, struct ib_device *device, u32 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, gfp_t gfp_mask, int (*callback)(int status, struct ib_sa_multicast *multicast), void *context) { struct mcast_device *dev; struct mcast_member *member; struct ib_sa_multicast *multicast; int ret; dev = ib_get_client_data(device, &mcast_client); if (!dev) return ERR_PTR(-ENODEV); member = kmalloc(sizeof *member, gfp_mask); if (!member) return ERR_PTR(-ENOMEM); ib_sa_client_get(client); member->client = client; member->multicast.rec = *rec; member->multicast.comp_mask = comp_mask; member->multicast.callback = callback; member->multicast.context = context; init_completion(&member->comp); refcount_set(&member->refcount, 1); member->state = MCAST_JOINING; member->group = acquire_group(&dev->port[port_num - dev->start_port], &rec->mgid, gfp_mask); if (!member->group) { ret = -ENOMEM; goto err; } /* * The user will get the multicast structure in their callback. They * could then free the multicast structure before we can return from * this routine. So we save the pointer to return before queuing * any callback. */ multicast = &member->multicast; queue_join(member); return multicast; err: ib_sa_client_put(client); kfree(member); return ERR_PTR(ret); } EXPORT_SYMBOL(ib_sa_join_multicast); void ib_sa_free_multicast(struct ib_sa_multicast *multicast) { struct mcast_member *member; struct mcast_group *group; member = container_of(multicast, struct mcast_member, multicast); group = member->group; spin_lock_irq(&group->lock); if (member->state == MCAST_MEMBER) adjust_membership(group, multicast->rec.join_state, -1); list_del_init(&member->list); if (group->state == MCAST_IDLE) { group->state = MCAST_BUSY; spin_unlock_irq(&group->lock); /* Continue to hold reference on group until callback */ queue_work(mcast_wq, &group->work); } else { spin_unlock_irq(&group->lock); release_group(group); } deref_member(member); wait_for_completion(&member->comp); ib_sa_client_put(member->client); kfree(member); } EXPORT_SYMBOL(ib_sa_free_multicast); int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num, union ib_gid *mgid, struct ib_sa_mcmember_rec *rec) { struct mcast_device *dev; struct mcast_port *port; struct mcast_group *group; unsigned long flags; int ret = 0; dev = ib_get_client_data(device, &mcast_client); if (!dev) return -ENODEV; port = &dev->port[port_num - dev->start_port]; spin_lock_irqsave(&port->lock, flags); group = mcast_find(port, mgid); if (group) *rec = group->rec; else ret = -EADDRNOTAVAIL; spin_unlock_irqrestore(&port->lock, flags); return ret; } EXPORT_SYMBOL(ib_sa_get_mcmember_rec); /** * ib_init_ah_from_mcmember - Initialize AH attribute from multicast * member record and gid of the device. * @device: RDMA device * @port_num: Port of the rdma device to consider * @rec: Multicast member record to use * @ndev: Optional netdevice, applicable only for RoCE * @gid_type: GID type to consider * @ah_attr: AH attribute to fillup on successful completion * * ib_init_ah_from_mcmember() initializes AH attribute based on multicast * member record and other device properties. On success the caller is * responsible to call rdma_destroy_ah_attr on the ah_attr. Returns 0 on * success or appropriate error code. * */ int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num, struct ib_sa_mcmember_rec *rec, struct net_device *ndev, enum ib_gid_type gid_type, struct rdma_ah_attr *ah_attr) { const struct ib_gid_attr *sgid_attr; /* GID table is not based on the netdevice for IB link layer, * so ignore ndev during search. */ if (rdma_protocol_ib(device, port_num)) ndev = NULL; else if (!rdma_protocol_roce(device, port_num)) return -EINVAL; sgid_attr = rdma_find_gid_by_port(device, &rec->port_gid, gid_type, port_num, ndev); if (IS_ERR(sgid_attr)) return PTR_ERR(sgid_attr); memset(ah_attr, 0, sizeof(*ah_attr)); ah_attr->type = rdma_ah_find_type(device, port_num); rdma_ah_set_dlid(ah_attr, be16_to_cpu(rec->mlid)); rdma_ah_set_sl(ah_attr, rec->sl); rdma_ah_set_port_num(ah_attr, port_num); rdma_ah_set_static_rate(ah_attr, rec->rate); rdma_move_grh_sgid_attr(ah_attr, &rec->mgid, be32_to_cpu(rec->flow_label), rec->hop_limit, rec->traffic_class, sgid_attr); return 0; } EXPORT_SYMBOL(ib_init_ah_from_mcmember); static void mcast_groups_event(struct mcast_port *port, enum mcast_group_state state) { struct mcast_group *group; struct rb_node *node; unsigned long flags; spin_lock_irqsave(&port->lock, flags); for (node = rb_first(&port->table); node; node = rb_next(node)) { group = rb_entry(node, struct mcast_group, node); spin_lock(&group->lock); if (group->state == MCAST_IDLE) { atomic_inc(&group->refcount); queue_work(mcast_wq, &group->work); } if (group->state != MCAST_GROUP_ERROR) group->state = state; spin_unlock(&group->lock); } spin_unlock_irqrestore(&port->lock, flags); } static void mcast_event_handler(struct ib_event_handler *handler, struct ib_event *event) { struct mcast_device *dev; int index; dev = container_of(handler, struct mcast_device, event_handler); if (!rdma_cap_ib_mcast(dev->device, event->element.port_num)) return; index = event->element.port_num - dev->start_port; switch (event->event) { case IB_EVENT_PORT_ERR: case IB_EVENT_LID_CHANGE: case IB_EVENT_CLIENT_REREGISTER: mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR); break; case IB_EVENT_PKEY_CHANGE: mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT); break; default: break; } } static int mcast_add_one(struct ib_device *device) { struct mcast_device *dev; struct mcast_port *port; int i; int count = 0; dev = kmalloc(struct_size(dev, port, device->phys_port_cnt), GFP_KERNEL); if (!dev) return -ENOMEM; dev->start_port = rdma_start_port(device); dev->end_port = rdma_end_port(device); for (i = 0; i <= dev->end_port - dev->start_port; i++) { if (!rdma_cap_ib_mcast(device, dev->start_port + i)) continue; port = &dev->port[i]; port->dev = dev; port->port_num = dev->start_port + i; spin_lock_init(&port->lock); port->table = RB_ROOT; init_completion(&port->comp); refcount_set(&port->refcount, 1); ++count; } if (!count) { kfree(dev); return -EOPNOTSUPP; } dev->device = device; ib_set_client_data(device, &mcast_client, dev); INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler); ib_register_event_handler(&dev->event_handler); return 0; } static void mcast_remove_one(struct ib_device *device, void *client_data) { struct mcast_device *dev = client_data; struct mcast_port *port; int i; ib_unregister_event_handler(&dev->event_handler); flush_workqueue(mcast_wq); for (i = 0; i <= dev->end_port - dev->start_port; i++) { if (rdma_cap_ib_mcast(device, dev->start_port + i)) { port = &dev->port[i]; deref_port(port); wait_for_completion(&port->comp); } } kfree(dev); } int mcast_init(void) { int ret; mcast_wq = alloc_ordered_workqueue("ib_mcast", WQ_MEM_RECLAIM); if (!mcast_wq) return -ENOMEM; ib_sa_register_client(&sa_client); ret = ib_register_client(&mcast_client); if (ret) goto err; return 0; err: ib_sa_unregister_client(&sa_client); destroy_workqueue(mcast_wq); return ret; } void mcast_cleanup(void) { ib_unregister_client(&mcast_client); ib_sa_unregister_client(&sa_client); destroy_workqueue(mcast_wq); }
linux-master
drivers/infiniband/core/multicast.c
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. * Copyright (c) 2005 PathScale, Inc. All rights reserved. * Copyright (c) 2006 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/file.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <rdma/uverbs_types.h> #include <rdma/uverbs_std_types.h> #include "rdma_core.h" #include "uverbs.h" #include "core_priv.h" /* * Copy a response to userspace. If the provided 'resp' is larger than the * user buffer it is silently truncated. If the user provided a larger buffer * then the trailing portion is zero filled. * * These semantics are intended to support future extension of the output * structures. */ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp, size_t resp_len) { int ret; if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) return uverbs_copy_to_struct_or_zero( attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len); if (copy_to_user(attrs->ucore.outbuf, resp, min(attrs->ucore.outlen, resp_len))) return -EFAULT; if (resp_len < attrs->ucore.outlen) { /* * Zero fill any extra memory that user * space might have provided. */ ret = clear_user(attrs->ucore.outbuf + resp_len, attrs->ucore.outlen - resp_len); if (ret) return -EFAULT; } return 0; } /* * Copy a request from userspace. If the provided 'req' is larger than the * user buffer then the user buffer is zero extended into the 'req'. If 'req' * is smaller than the user buffer then the uncopied bytes in the user buffer * must be zero. */ static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req, size_t req_len) { if (copy_from_user(req, attrs->ucore.inbuf, min(attrs->ucore.inlen, req_len))) return -EFAULT; if (attrs->ucore.inlen < req_len) { memset(req + attrs->ucore.inlen, 0, req_len - attrs->ucore.inlen); } else if (attrs->ucore.inlen > req_len) { if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len, attrs->ucore.inlen - req_len)) return -EOPNOTSUPP; } return 0; } /* * Generate the value for the 'response_length' protocol used by write_ex. * This is the number of bytes the kernel actually wrote. Userspace can use * this to detect what structure members in the response the kernel * understood. */ static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs, size_t resp_len) { return min_t(size_t, attrs->ucore.outlen, resp_len); } /* * The iterator version of the request interface is for handlers that need to * step over a flex array at the end of a command header. */ struct uverbs_req_iter { const void __user *cur; const void __user *end; }; static int uverbs_request_start(struct uverbs_attr_bundle *attrs, struct uverbs_req_iter *iter, void *req, size_t req_len) { if (attrs->ucore.inlen < req_len) return -ENOSPC; if (copy_from_user(req, attrs->ucore.inbuf, req_len)) return -EFAULT; iter->cur = attrs->ucore.inbuf + req_len; iter->end = attrs->ucore.inbuf + attrs->ucore.inlen; return 0; } static int uverbs_request_next(struct uverbs_req_iter *iter, void *val, size_t len) { if (iter->cur + len > iter->end) return -ENOSPC; if (copy_from_user(val, iter->cur, len)) return -EFAULT; iter->cur += len; return 0; } static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter, size_t len) { const void __user *res = iter->cur; if (iter->cur + len > iter->end) return (void __force __user *)ERR_PTR(-ENOSPC); iter->cur += len; return res; } static int uverbs_request_finish(struct uverbs_req_iter *iter) { if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur)) return -EOPNOTSUPP; return 0; } /* * When calling a destroy function during an error unwind we need to pass in * the udata that is sanitized of all user arguments. Ie from the driver * perspective it looks like no udata was passed. */ struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs) { attrs->driver_udata = (struct ib_udata){}; return &attrs->driver_udata; } static struct ib_uverbs_completion_event_file * _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL, fd, attrs); if (IS_ERR(uobj)) return (void *)uobj; uverbs_uobject_get(uobj); uobj_put_read(uobj); return container_of(uobj, struct ib_uverbs_completion_event_file, uobj); } #define ib_uverbs_lookup_comp_file(_fd, _ufile) \ _ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile) int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_file *ufile = attrs->ufile; struct ib_ucontext *ucontext; struct ib_device *ib_dev; ib_dev = srcu_dereference(ufile->device->ib_dev, &ufile->device->disassociate_srcu); if (!ib_dev) return -EIO; ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext); if (!ucontext) return -ENOMEM; ucontext->device = ib_dev; ucontext->ufile = ufile; xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC); rdma_restrack_new(&ucontext->res, RDMA_RESTRACK_CTX); rdma_restrack_set_name(&ucontext->res, NULL); attrs->context = ucontext; return 0; } int ib_init_ucontext(struct uverbs_attr_bundle *attrs) { struct ib_ucontext *ucontext = attrs->context; struct ib_uverbs_file *file = attrs->ufile; int ret; if (!down_read_trylock(&file->hw_destroy_rwsem)) return -EIO; mutex_lock(&file->ucontext_lock); if (file->ucontext) { ret = -EINVAL; goto err; } ret = ib_rdmacg_try_charge(&ucontext->cg_obj, ucontext->device, RDMACG_RESOURCE_HCA_HANDLE); if (ret) goto err; ret = ucontext->device->ops.alloc_ucontext(ucontext, &attrs->driver_udata); if (ret) goto err_uncharge; rdma_restrack_add(&ucontext->res); /* * Make sure that ib_uverbs_get_ucontext() sees the pointer update * only after all writes to setup the ucontext have completed */ smp_store_release(&file->ucontext, ucontext); mutex_unlock(&file->ucontext_lock); up_read(&file->hw_destroy_rwsem); return 0; err_uncharge: ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device, RDMACG_RESOURCE_HCA_HANDLE); err: mutex_unlock(&file->ucontext_lock); up_read(&file->hw_destroy_rwsem); return ret; } static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_get_context_resp resp; struct ib_uverbs_get_context cmd; struct ib_device *ib_dev; struct ib_uobject *uobj; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; ret = ib_alloc_ucontext(attrs); if (ret) return ret; uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev); if (IS_ERR(uobj)) { ret = PTR_ERR(uobj); goto err_ucontext; } resp = (struct ib_uverbs_get_context_resp){ .num_comp_vectors = attrs->ufile->device->num_comp_vectors, .async_fd = uobj->id, }; ret = uverbs_response(attrs, &resp, sizeof(resp)); if (ret) goto err_uobj; ret = ib_init_ucontext(attrs); if (ret) goto err_uobj; ib_uverbs_init_async_event_file( container_of(uobj, struct ib_uverbs_async_event_file, uobj)); rdma_alloc_commit_uobject(uobj, attrs); return 0; err_uobj: rdma_alloc_abort_uobject(uobj, attrs, false); err_ucontext: rdma_restrack_put(&attrs->context->res); kfree(attrs->context); attrs->context = NULL; return ret; } static void copy_query_dev_fields(struct ib_ucontext *ucontext, struct ib_uverbs_query_device_resp *resp, struct ib_device_attr *attr) { struct ib_device *ib_dev = ucontext->device; resp->fw_ver = attr->fw_ver; resp->node_guid = ib_dev->node_guid; resp->sys_image_guid = attr->sys_image_guid; resp->max_mr_size = attr->max_mr_size; resp->page_size_cap = attr->page_size_cap; resp->vendor_id = attr->vendor_id; resp->vendor_part_id = attr->vendor_part_id; resp->hw_ver = attr->hw_ver; resp->max_qp = attr->max_qp; resp->max_qp_wr = attr->max_qp_wr; resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); resp->max_sge = min(attr->max_send_sge, attr->max_recv_sge); resp->max_sge_rd = attr->max_sge_rd; resp->max_cq = attr->max_cq; resp->max_cqe = attr->max_cqe; resp->max_mr = attr->max_mr; resp->max_pd = attr->max_pd; resp->max_qp_rd_atom = attr->max_qp_rd_atom; resp->max_ee_rd_atom = attr->max_ee_rd_atom; resp->max_res_rd_atom = attr->max_res_rd_atom; resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; resp->atomic_cap = attr->atomic_cap; resp->max_ee = attr->max_ee; resp->max_rdd = attr->max_rdd; resp->max_mw = attr->max_mw; resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; resp->max_mcast_grp = attr->max_mcast_grp; resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; resp->max_ah = attr->max_ah; resp->max_srq = attr->max_srq; resp->max_srq_wr = attr->max_srq_wr; resp->max_srq_sge = attr->max_srq_sge; resp->max_pkeys = attr->max_pkeys; resp->local_ca_ack_delay = attr->local_ca_ack_delay; resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX); } static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_query_device cmd; struct ib_uverbs_query_device_resp resp; struct ib_ucontext *ucontext; int ret; ucontext = ib_uverbs_get_ucontext(attrs); if (IS_ERR(ucontext)) return PTR_ERR(ucontext); ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; memset(&resp, 0, sizeof resp); copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs); return uverbs_response(attrs, &resp, sizeof(resp)); } static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_query_port cmd; struct ib_uverbs_query_port_resp resp; struct ib_port_attr attr; int ret; struct ib_ucontext *ucontext; struct ib_device *ib_dev; ucontext = ib_uverbs_get_ucontext(attrs); if (IS_ERR(ucontext)) return PTR_ERR(ucontext); ib_dev = ucontext->device; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; ret = ib_query_port(ib_dev, cmd.port_num, &attr); if (ret) return ret; memset(&resp, 0, sizeof resp); copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num); return uverbs_response(attrs, &resp, sizeof(resp)); } static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_alloc_pd_resp resp = {}; struct ib_uverbs_alloc_pd cmd; struct ib_uobject *uobj; struct ib_pd *pd; int ret; struct ib_device *ib_dev; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev); if (IS_ERR(uobj)) return PTR_ERR(uobj); pd = rdma_zalloc_drv_obj(ib_dev, ib_pd); if (!pd) { ret = -ENOMEM; goto err; } pd->device = ib_dev; pd->uobject = uobj; atomic_set(&pd->usecnt, 0); rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD); rdma_restrack_set_name(&pd->res, NULL); ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata); if (ret) goto err_alloc; rdma_restrack_add(&pd->res); uobj->object = pd; uobj_finalize_uobj_create(uobj, attrs); resp.pd_handle = uobj->id; return uverbs_response(attrs, &resp, sizeof(resp)); err_alloc: rdma_restrack_put(&pd->res); kfree(pd); err: uobj_alloc_abort(uobj, attrs); return ret; } static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_dealloc_pd cmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs); } struct xrcd_table_entry { struct rb_node node; struct ib_xrcd *xrcd; struct inode *inode; }; static int xrcd_table_insert(struct ib_uverbs_device *dev, struct inode *inode, struct ib_xrcd *xrcd) { struct xrcd_table_entry *entry, *scan; struct rb_node **p = &dev->xrcd_tree.rb_node; struct rb_node *parent = NULL; entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) return -ENOMEM; entry->xrcd = xrcd; entry->inode = inode; while (*p) { parent = *p; scan = rb_entry(parent, struct xrcd_table_entry, node); if (inode < scan->inode) { p = &(*p)->rb_left; } else if (inode > scan->inode) { p = &(*p)->rb_right; } else { kfree(entry); return -EEXIST; } } rb_link_node(&entry->node, parent, p); rb_insert_color(&entry->node, &dev->xrcd_tree); igrab(inode); return 0; } static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, struct inode *inode) { struct xrcd_table_entry *entry; struct rb_node *p = dev->xrcd_tree.rb_node; while (p) { entry = rb_entry(p, struct xrcd_table_entry, node); if (inode < entry->inode) p = p->rb_left; else if (inode > entry->inode) p = p->rb_right; else return entry; } return NULL; } static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) { struct xrcd_table_entry *entry; entry = xrcd_table_search(dev, inode); if (!entry) return NULL; return entry->xrcd; } static void xrcd_table_delete(struct ib_uverbs_device *dev, struct inode *inode) { struct xrcd_table_entry *entry; entry = xrcd_table_search(dev, inode); if (entry) { iput(inode); rb_erase(&entry->node, &dev->xrcd_tree); kfree(entry); } } static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_device *ibudev = attrs->ufile->device; struct ib_uverbs_open_xrcd_resp resp = {}; struct ib_uverbs_open_xrcd cmd; struct ib_uxrcd_object *obj; struct ib_xrcd *xrcd = NULL; struct inode *inode = NULL; int new_xrcd = 0; struct ib_device *ib_dev; struct fd f = {}; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; mutex_lock(&ibudev->xrcd_tree_mutex); if (cmd.fd != -1) { /* search for file descriptor */ f = fdget(cmd.fd); if (!f.file) { ret = -EBADF; goto err_tree_mutex_unlock; } inode = file_inode(f.file); xrcd = find_xrcd(ibudev, inode); if (!xrcd && !(cmd.oflags & O_CREAT)) { /* no file descriptor. Need CREATE flag */ ret = -EAGAIN; goto err_tree_mutex_unlock; } if (xrcd && cmd.oflags & O_EXCL) { ret = -EINVAL; goto err_tree_mutex_unlock; } } obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs, &ib_dev); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto err_tree_mutex_unlock; } if (!xrcd) { xrcd = ib_alloc_xrcd_user(ib_dev, inode, &attrs->driver_udata); if (IS_ERR(xrcd)) { ret = PTR_ERR(xrcd); goto err; } new_xrcd = 1; } atomic_set(&obj->refcnt, 0); obj->uobject.object = xrcd; if (inode) { if (new_xrcd) { /* create new inode/xrcd table entry */ ret = xrcd_table_insert(ibudev, inode, xrcd); if (ret) goto err_dealloc_xrcd; } atomic_inc(&xrcd->usecnt); } if (f.file) fdput(f); mutex_unlock(&ibudev->xrcd_tree_mutex); uobj_finalize_uobj_create(&obj->uobject, attrs); resp.xrcd_handle = obj->uobject.id; return uverbs_response(attrs, &resp, sizeof(resp)); err_dealloc_xrcd: ib_dealloc_xrcd_user(xrcd, uverbs_get_cleared_udata(attrs)); err: uobj_alloc_abort(&obj->uobject, attrs); err_tree_mutex_unlock: if (f.file) fdput(f); mutex_unlock(&ibudev->xrcd_tree_mutex); return ret; } static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_close_xrcd cmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs); } int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct inode *inode; int ret; struct ib_uverbs_device *dev = attrs->ufile->device; inode = xrcd->inode; if (inode && !atomic_dec_and_test(&xrcd->usecnt)) return 0; ret = ib_dealloc_xrcd_user(xrcd, &attrs->driver_udata); if (ret) { atomic_inc(&xrcd->usecnt); return ret; } if (inode) xrcd_table_delete(dev, inode); return 0; } static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_reg_mr_resp resp = {}; struct ib_uverbs_reg_mr cmd; struct ib_uobject *uobj; struct ib_pd *pd; struct ib_mr *mr; int ret; struct ib_device *ib_dev; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) return -EINVAL; uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev); if (IS_ERR(uobj)) return PTR_ERR(uobj); ret = ib_check_mr_access(ib_dev, cmd.access_flags); if (ret) goto err_free; pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); if (!pd) { ret = -EINVAL; goto err_free; } mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, cmd.access_flags, &attrs->driver_udata); if (IS_ERR(mr)) { ret = PTR_ERR(mr); goto err_put; } mr->device = pd->device; mr->pd = pd; mr->type = IB_MR_TYPE_USER; mr->dm = NULL; mr->sig_attrs = NULL; mr->uobject = uobj; atomic_inc(&pd->usecnt); mr->iova = cmd.hca_va; mr->length = cmd.length; rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_set_name(&mr->res, NULL); rdma_restrack_add(&mr->res); uobj->object = mr; uobj_put_obj_read(pd); uobj_finalize_uobj_create(uobj, attrs); resp.lkey = mr->lkey; resp.rkey = mr->rkey; resp.mr_handle = uobj->id; return uverbs_response(attrs, &resp, sizeof(resp)); err_put: uobj_put_obj_read(pd); err_free: uobj_alloc_abort(uobj, attrs); return ret; } static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_rereg_mr cmd; struct ib_uverbs_rereg_mr_resp resp; struct ib_mr *mr; int ret; struct ib_uobject *uobj; struct ib_uobject *new_uobj; struct ib_device *ib_dev; struct ib_pd *orig_pd; struct ib_pd *new_pd; struct ib_mr *new_mr; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if (!cmd.flags) return -EINVAL; if (cmd.flags & ~IB_MR_REREG_SUPPORTED) return -EOPNOTSUPP; if ((cmd.flags & IB_MR_REREG_TRANS) && (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) return -EINVAL; uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); mr = uobj->object; if (mr->dm) { ret = -EINVAL; goto put_uobjs; } if (cmd.flags & IB_MR_REREG_ACCESS) { ret = ib_check_mr_access(mr->device, cmd.access_flags); if (ret) goto put_uobjs; } orig_pd = mr->pd; if (cmd.flags & IB_MR_REREG_PD) { new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); if (!new_pd) { ret = -EINVAL; goto put_uobjs; } } else { new_pd = mr->pd; } /* * The driver might create a new HW object as part of the rereg, we need * to have a uobject ready to hold it. */ new_uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev); if (IS_ERR(new_uobj)) { ret = PTR_ERR(new_uobj); goto put_uobj_pd; } new_mr = ib_dev->ops.rereg_user_mr(mr, cmd.flags, cmd.start, cmd.length, cmd.hca_va, cmd.access_flags, new_pd, &attrs->driver_udata); if (IS_ERR(new_mr)) { ret = PTR_ERR(new_mr); goto put_new_uobj; } if (new_mr) { new_mr->device = new_pd->device; new_mr->pd = new_pd; new_mr->type = IB_MR_TYPE_USER; new_mr->uobject = uobj; atomic_inc(&new_pd->usecnt); new_uobj->object = new_mr; rdma_restrack_new(&new_mr->res, RDMA_RESTRACK_MR); rdma_restrack_set_name(&new_mr->res, NULL); rdma_restrack_add(&new_mr->res); /* * The new uobj for the new HW object is put into the same spot * in the IDR and the old uobj & HW object is deleted. */ rdma_assign_uobject(uobj, new_uobj, attrs); rdma_alloc_commit_uobject(new_uobj, attrs); uobj_put_destroy(uobj); new_uobj = NULL; uobj = NULL; mr = new_mr; } else { if (cmd.flags & IB_MR_REREG_PD) { atomic_dec(&orig_pd->usecnt); mr->pd = new_pd; atomic_inc(&new_pd->usecnt); } if (cmd.flags & IB_MR_REREG_TRANS) { mr->iova = cmd.hca_va; mr->length = cmd.length; } } memset(&resp, 0, sizeof(resp)); resp.lkey = mr->lkey; resp.rkey = mr->rkey; ret = uverbs_response(attrs, &resp, sizeof(resp)); put_new_uobj: if (new_uobj) uobj_alloc_abort(new_uobj, attrs); put_uobj_pd: if (cmd.flags & IB_MR_REREG_PD) uobj_put_obj_read(new_pd); put_uobjs: if (uobj) uobj_put_write(uobj); return ret; } static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_dereg_mr cmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs); } static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_alloc_mw cmd; struct ib_uverbs_alloc_mw_resp resp = {}; struct ib_uobject *uobj; struct ib_pd *pd; struct ib_mw *mw; int ret; struct ib_device *ib_dev; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev); if (IS_ERR(uobj)) return PTR_ERR(uobj); pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); if (!pd) { ret = -EINVAL; goto err_free; } if (cmd.mw_type != IB_MW_TYPE_1 && cmd.mw_type != IB_MW_TYPE_2) { ret = -EINVAL; goto err_put; } mw = rdma_zalloc_drv_obj(ib_dev, ib_mw); if (!mw) { ret = -ENOMEM; goto err_put; } mw->device = ib_dev; mw->pd = pd; mw->uobject = uobj; mw->type = cmd.mw_type; ret = pd->device->ops.alloc_mw(mw, &attrs->driver_udata); if (ret) goto err_alloc; atomic_inc(&pd->usecnt); uobj->object = mw; uobj_put_obj_read(pd); uobj_finalize_uobj_create(uobj, attrs); resp.rkey = mw->rkey; resp.mw_handle = uobj->id; return uverbs_response(attrs, &resp, sizeof(resp)); err_alloc: kfree(mw); err_put: uobj_put_obj_read(pd); err_free: uobj_alloc_abort(uobj, attrs); return ret; } static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_dealloc_mw cmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs); } static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_comp_channel cmd; struct ib_uverbs_create_comp_channel_resp resp; struct ib_uobject *uobj; struct ib_uverbs_completion_event_file *ev_file; struct ib_device *ib_dev; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev); if (IS_ERR(uobj)) return PTR_ERR(uobj); ev_file = container_of(uobj, struct ib_uverbs_completion_event_file, uobj); ib_uverbs_init_event_queue(&ev_file->ev_queue); uobj_finalize_uobj_create(uobj, attrs); resp.fd = uobj->id; return uverbs_response(attrs, &resp, sizeof(resp)); } static int create_cq(struct uverbs_attr_bundle *attrs, struct ib_uverbs_ex_create_cq *cmd) { struct ib_ucq_object *obj; struct ib_uverbs_completion_event_file *ev_file = NULL; struct ib_cq *cq; int ret; struct ib_uverbs_ex_create_cq_resp resp = {}; struct ib_cq_init_attr attr = {}; struct ib_device *ib_dev; if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors) return -EINVAL; obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs, &ib_dev); if (IS_ERR(obj)) return PTR_ERR(obj); if (cmd->comp_channel >= 0) { ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs); if (IS_ERR(ev_file)) { ret = PTR_ERR(ev_file); goto err; } } obj->uevent.uobject.user_handle = cmd->user_handle; INIT_LIST_HEAD(&obj->comp_list); INIT_LIST_HEAD(&obj->uevent.event_list); attr.cqe = cmd->cqe; attr.comp_vector = cmd->comp_vector; attr.flags = cmd->flags; cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); if (!cq) { ret = -ENOMEM; goto err_file; } cq->device = ib_dev; cq->uobject = obj; cq->comp_handler = ib_uverbs_comp_handler; cq->event_handler = ib_uverbs_cq_event_handler; cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; atomic_set(&cq->usecnt, 0); rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); rdma_restrack_set_name(&cq->res, NULL); ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata); if (ret) goto err_free; rdma_restrack_add(&cq->res); obj->uevent.uobject.object = cq; obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file); if (obj->uevent.event_file) uverbs_uobject_get(&obj->uevent.event_file->uobj); uobj_finalize_uobj_create(&obj->uevent.uobject, attrs); resp.base.cq_handle = obj->uevent.uobject.id; resp.base.cqe = cq->cqe; resp.response_length = uverbs_response_length(attrs, sizeof(resp)); return uverbs_response(attrs, &resp, sizeof(resp)); err_free: rdma_restrack_put(&cq->res); kfree(cq); err_file: if (ev_file) ib_uverbs_release_ucq(ev_file, obj); err: uobj_alloc_abort(&obj->uevent.uobject, attrs); return ret; } static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_cq cmd; struct ib_uverbs_ex_create_cq cmd_ex; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; memset(&cmd_ex, 0, sizeof(cmd_ex)); cmd_ex.user_handle = cmd.user_handle; cmd_ex.cqe = cmd.cqe; cmd_ex.comp_vector = cmd.comp_vector; cmd_ex.comp_channel = cmd.comp_channel; return create_cq(attrs, &cmd_ex); } static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_create_cq cmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if (cmd.comp_mask) return -EINVAL; if (cmd.reserved) return -EINVAL; return create_cq(attrs, &cmd); } static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_resize_cq cmd; struct ib_uverbs_resize_cq_resp resp = {}; struct ib_cq *cq; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (!cq) return -EINVAL; ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata); if (ret) goto out; resp.cqe = cq->cqe; ret = uverbs_response(attrs, &resp, sizeof(resp)); out: rdma_lookup_put_uobject(&cq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); return ret; } static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest, struct ib_wc *wc) { struct ib_uverbs_wc tmp; tmp.wr_id = wc->wr_id; tmp.status = wc->status; tmp.opcode = wc->opcode; tmp.vendor_err = wc->vendor_err; tmp.byte_len = wc->byte_len; tmp.ex.imm_data = wc->ex.imm_data; tmp.qp_num = wc->qp->qp_num; tmp.src_qp = wc->src_qp; tmp.wc_flags = wc->wc_flags; tmp.pkey_index = wc->pkey_index; if (rdma_cap_opa_ah(ib_dev, wc->port_num)) tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid); else tmp.slid = ib_lid_cpu16(wc->slid); tmp.sl = wc->sl; tmp.dlid_path_bits = wc->dlid_path_bits; tmp.port_num = wc->port_num; tmp.reserved = 0; if (copy_to_user(dest, &tmp, sizeof tmp)) return -EFAULT; return 0; } static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_poll_cq cmd; struct ib_uverbs_poll_cq_resp resp; u8 __user *header_ptr; u8 __user *data_ptr; struct ib_cq *cq; struct ib_wc wc; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (!cq) return -EINVAL; /* we copy a struct ib_uverbs_poll_cq_resp to user space */ header_ptr = attrs->ucore.outbuf; data_ptr = header_ptr + sizeof resp; memset(&resp, 0, sizeof resp); while (resp.count < cmd.ne) { ret = ib_poll_cq(cq, 1, &wc); if (ret < 0) goto out_put; if (!ret) break; ret = copy_wc_to_user(cq->device, data_ptr, &wc); if (ret) goto out_put; data_ptr += sizeof(struct ib_uverbs_wc); ++resp.count; } if (copy_to_user(header_ptr, &resp, sizeof resp)) { ret = -EFAULT; goto out_put; } ret = 0; if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT); out_put: rdma_lookup_put_uobject(&cq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); return ret; } static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_req_notify_cq cmd; struct ib_cq *cq; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (!cq) return -EINVAL; ib_req_notify_cq(cq, cmd.solicited_only ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); rdma_lookup_put_uobject(&cq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); return 0; } static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_destroy_cq cmd; struct ib_uverbs_destroy_cq_resp resp; struct ib_uobject *uobj; struct ib_ucq_object *obj; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); obj = container_of(uobj, struct ib_ucq_object, uevent.uobject); memset(&resp, 0, sizeof(resp)); resp.comp_events_reported = obj->comp_events_reported; resp.async_events_reported = obj->uevent.events_reported; uobj_put_destroy(uobj); return uverbs_response(attrs, &resp, sizeof(resp)); } static int create_qp(struct uverbs_attr_bundle *attrs, struct ib_uverbs_ex_create_qp *cmd) { struct ib_uqp_object *obj; struct ib_device *device; struct ib_pd *pd = NULL; struct ib_xrcd *xrcd = NULL; struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT); struct ib_cq *scq = NULL, *rcq = NULL; struct ib_srq *srq = NULL; struct ib_qp *qp; struct ib_qp_init_attr attr = {}; struct ib_uverbs_ex_create_qp_resp resp = {}; int ret; struct ib_rwq_ind_table *ind_tbl = NULL; bool has_sq = true; struct ib_device *ib_dev; switch (cmd->qp_type) { case IB_QPT_RAW_PACKET: if (!capable(CAP_NET_RAW)) return -EPERM; break; case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_UD: case IB_QPT_XRC_INI: case IB_QPT_XRC_TGT: case IB_QPT_DRIVER: break; default: return -EINVAL; } obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs, &ib_dev); if (IS_ERR(obj)) return PTR_ERR(obj); obj->uxrcd = NULL; obj->uevent.uobject.user_handle = cmd->user_handle; mutex_init(&obj->mcast_lock); if (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE) { ind_tbl = uobj_get_obj_read(rwq_ind_table, UVERBS_OBJECT_RWQ_IND_TBL, cmd->rwq_ind_tbl_handle, attrs); if (!ind_tbl) { ret = -EINVAL; goto err_put; } attr.rwq_ind_tbl = ind_tbl; } if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { ret = -EINVAL; goto err_put; } if (ind_tbl && !cmd->max_send_wr) has_sq = false; if (cmd->qp_type == IB_QPT_XRC_TGT) { xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle, attrs); if (IS_ERR(xrcd_uobj)) { ret = -EINVAL; goto err_put; } xrcd = (struct ib_xrcd *)xrcd_uobj->object; if (!xrcd) { ret = -EINVAL; goto err_put; } device = xrcd->device; } else { if (cmd->qp_type == IB_QPT_XRC_INI) { cmd->max_recv_wr = 0; cmd->max_recv_sge = 0; } else { if (cmd->is_srq) { srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd->srq_handle, attrs); if (!srq || srq->srq_type == IB_SRQT_XRC) { ret = -EINVAL; goto err_put; } } if (!ind_tbl) { if (cmd->recv_cq_handle != cmd->send_cq_handle) { rcq = uobj_get_obj_read( cq, UVERBS_OBJECT_CQ, cmd->recv_cq_handle, attrs); if (!rcq) { ret = -EINVAL; goto err_put; } } } } if (has_sq) scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->send_cq_handle, attrs); if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI) rcq = rcq ?: scq; pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs); if (!pd || (!scq && has_sq)) { ret = -EINVAL; goto err_put; } device = pd->device; } attr.event_handler = ib_uverbs_qp_event_handler; attr.send_cq = scq; attr.recv_cq = rcq; attr.srq = srq; attr.xrcd = xrcd; attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; attr.qp_type = cmd->qp_type; attr.cap.max_send_wr = cmd->max_send_wr; attr.cap.max_recv_wr = cmd->max_recv_wr; attr.cap.max_send_sge = cmd->max_send_sge; attr.cap.max_recv_sge = cmd->max_recv_sge; attr.cap.max_inline_data = cmd->max_inline_data; INIT_LIST_HEAD(&obj->uevent.event_list); INIT_LIST_HEAD(&obj->mcast_list); attr.create_flags = cmd->create_flags; if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | IB_QP_CREATE_CROSS_CHANNEL | IB_QP_CREATE_MANAGED_SEND | IB_QP_CREATE_MANAGED_RECV | IB_QP_CREATE_SCATTER_FCS | IB_QP_CREATE_CVLAN_STRIPPING | IB_QP_CREATE_SOURCE_QPN | IB_QP_CREATE_PCI_WRITE_END_PADDING)) { ret = -EINVAL; goto err_put; } if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) { if (!capable(CAP_NET_RAW)) { ret = -EPERM; goto err_put; } attr.source_qpn = cmd->source_qpn; } qp = ib_create_qp_user(device, pd, &attr, &attrs->driver_udata, obj, KBUILD_MODNAME); if (IS_ERR(qp)) { ret = PTR_ERR(qp); goto err_put; } ib_qp_usecnt_inc(qp); obj->uevent.uobject.object = qp; obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file); if (obj->uevent.event_file) uverbs_uobject_get(&obj->uevent.event_file->uobj); if (xrcd) { obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); atomic_inc(&obj->uxrcd->refcnt); uobj_put_read(xrcd_uobj); } if (pd) uobj_put_obj_read(pd); if (scq) rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); if (rcq && rcq != scq) rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); if (srq) rdma_lookup_put_uobject(&srq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); if (ind_tbl) uobj_put_obj_read(ind_tbl); uobj_finalize_uobj_create(&obj->uevent.uobject, attrs); resp.base.qpn = qp->qp_num; resp.base.qp_handle = obj->uevent.uobject.id; resp.base.max_recv_sge = attr.cap.max_recv_sge; resp.base.max_send_sge = attr.cap.max_send_sge; resp.base.max_recv_wr = attr.cap.max_recv_wr; resp.base.max_send_wr = attr.cap.max_send_wr; resp.base.max_inline_data = attr.cap.max_inline_data; resp.response_length = uverbs_response_length(attrs, sizeof(resp)); return uverbs_response(attrs, &resp, sizeof(resp)); err_put: if (!IS_ERR(xrcd_uobj)) uobj_put_read(xrcd_uobj); if (pd) uobj_put_obj_read(pd); if (scq) rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); if (rcq && rcq != scq) rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); if (srq) rdma_lookup_put_uobject(&srq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); if (ind_tbl) uobj_put_obj_read(ind_tbl); uobj_alloc_abort(&obj->uevent.uobject, attrs); return ret; } static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_qp cmd; struct ib_uverbs_ex_create_qp cmd_ex; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; memset(&cmd_ex, 0, sizeof(cmd_ex)); cmd_ex.user_handle = cmd.user_handle; cmd_ex.pd_handle = cmd.pd_handle; cmd_ex.send_cq_handle = cmd.send_cq_handle; cmd_ex.recv_cq_handle = cmd.recv_cq_handle; cmd_ex.srq_handle = cmd.srq_handle; cmd_ex.max_send_wr = cmd.max_send_wr; cmd_ex.max_recv_wr = cmd.max_recv_wr; cmd_ex.max_send_sge = cmd.max_send_sge; cmd_ex.max_recv_sge = cmd.max_recv_sge; cmd_ex.max_inline_data = cmd.max_inline_data; cmd_ex.sq_sig_all = cmd.sq_sig_all; cmd_ex.qp_type = cmd.qp_type; cmd_ex.is_srq = cmd.is_srq; return create_qp(attrs, &cmd_ex); } static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_create_qp cmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) return -EINVAL; if (cmd.reserved) return -EINVAL; return create_qp(attrs, &cmd); } static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_qp_resp resp = {}; struct ib_uverbs_open_qp cmd; struct ib_uqp_object *obj; struct ib_xrcd *xrcd; struct ib_qp *qp; struct ib_qp_open_attr attr = {}; int ret; struct ib_uobject *xrcd_uobj; struct ib_device *ib_dev; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs, &ib_dev); if (IS_ERR(obj)) return PTR_ERR(obj); xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs); if (IS_ERR(xrcd_uobj)) { ret = -EINVAL; goto err_put; } xrcd = (struct ib_xrcd *)xrcd_uobj->object; if (!xrcd) { ret = -EINVAL; goto err_xrcd; } attr.event_handler = ib_uverbs_qp_event_handler; attr.qp_num = cmd.qpn; attr.qp_type = cmd.qp_type; INIT_LIST_HEAD(&obj->uevent.event_list); INIT_LIST_HEAD(&obj->mcast_list); qp = ib_open_qp(xrcd, &attr); if (IS_ERR(qp)) { ret = PTR_ERR(qp); goto err_xrcd; } obj->uevent.uobject.object = qp; obj->uevent.uobject.user_handle = cmd.user_handle; obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); atomic_inc(&obj->uxrcd->refcnt); qp->uobject = obj; uobj_put_read(xrcd_uobj); uobj_finalize_uobj_create(&obj->uevent.uobject, attrs); resp.qpn = qp->qp_num; resp.qp_handle = obj->uevent.uobject.id; return uverbs_response(attrs, &resp, sizeof(resp)); err_xrcd: uobj_put_read(xrcd_uobj); err_put: uobj_alloc_abort(&obj->uevent.uobject, attrs); return ret; } static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr, struct rdma_ah_attr *rdma_attr) { const struct ib_global_route *grh; uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr); uverb_attr->sl = rdma_ah_get_sl(rdma_attr); uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr); uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr); uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) & IB_AH_GRH); if (uverb_attr->is_global) { grh = rdma_ah_read_grh(rdma_attr); memcpy(uverb_attr->dgid, grh->dgid.raw, 16); uverb_attr->flow_label = grh->flow_label; uverb_attr->sgid_index = grh->sgid_index; uverb_attr->hop_limit = grh->hop_limit; uverb_attr->traffic_class = grh->traffic_class; } uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr); } static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_query_qp cmd; struct ib_uverbs_query_qp_resp resp; struct ib_qp *qp; struct ib_qp_attr *attr; struct ib_qp_init_attr *init_attr; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; attr = kmalloc(sizeof *attr, GFP_KERNEL); init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); if (!attr || !init_attr) { ret = -ENOMEM; goto out; } qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) { ret = -EINVAL; goto out; } ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); rdma_lookup_put_uobject(&qp->uobject->uevent.uobject, UVERBS_LOOKUP_READ); if (ret) goto out; memset(&resp, 0, sizeof resp); resp.qp_state = attr->qp_state; resp.cur_qp_state = attr->cur_qp_state; resp.path_mtu = attr->path_mtu; resp.path_mig_state = attr->path_mig_state; resp.qkey = attr->qkey; resp.rq_psn = attr->rq_psn; resp.sq_psn = attr->sq_psn; resp.dest_qp_num = attr->dest_qp_num; resp.qp_access_flags = attr->qp_access_flags; resp.pkey_index = attr->pkey_index; resp.alt_pkey_index = attr->alt_pkey_index; resp.sq_draining = attr->sq_draining; resp.max_rd_atomic = attr->max_rd_atomic; resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; resp.min_rnr_timer = attr->min_rnr_timer; resp.port_num = attr->port_num; resp.timeout = attr->timeout; resp.retry_cnt = attr->retry_cnt; resp.rnr_retry = attr->rnr_retry; resp.alt_port_num = attr->alt_port_num; resp.alt_timeout = attr->alt_timeout; copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr); copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr); resp.max_send_wr = init_attr->cap.max_send_wr; resp.max_recv_wr = init_attr->cap.max_recv_wr; resp.max_send_sge = init_attr->cap.max_send_sge; resp.max_recv_sge = init_attr->cap.max_recv_sge; resp.max_inline_data = init_attr->cap.max_inline_data; resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; ret = uverbs_response(attrs, &resp, sizeof(resp)); out: kfree(attr); kfree(init_attr); return ret; } /* Remove ignored fields set in the attribute mask */ static int modify_qp_mask(enum ib_qp_type qp_type, int mask) { switch (qp_type) { case IB_QPT_XRC_INI: return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); case IB_QPT_XRC_TGT: return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY); default: return mask; } } static void copy_ah_attr_from_uverbs(struct ib_device *dev, struct rdma_ah_attr *rdma_attr, struct ib_uverbs_qp_dest *uverb_attr) { rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num); if (uverb_attr->is_global) { rdma_ah_set_grh(rdma_attr, NULL, uverb_attr->flow_label, uverb_attr->sgid_index, uverb_attr->hop_limit, uverb_attr->traffic_class); rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid); } else { rdma_ah_set_ah_flags(rdma_attr, 0); } rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid); rdma_ah_set_sl(rdma_attr, uverb_attr->sl); rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits); rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate); rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num); rdma_ah_set_make_grd(rdma_attr, false); } static int modify_qp(struct uverbs_attr_bundle *attrs, struct ib_uverbs_ex_modify_qp *cmd) { struct ib_qp_attr *attr; struct ib_qp *qp; int ret; attr = kzalloc(sizeof(*attr), GFP_KERNEL); if (!attr) return -ENOMEM; qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, attrs); if (!qp) { ret = -EINVAL; goto out; } if ((cmd->base.attr_mask & IB_QP_PORT) && !rdma_is_port_valid(qp->device, cmd->base.port_num)) { ret = -EINVAL; goto release_qp; } if ((cmd->base.attr_mask & IB_QP_AV)) { if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { ret = -EINVAL; goto release_qp; } if (cmd->base.attr_mask & IB_QP_STATE && cmd->base.qp_state == IB_QPS_RTR) { /* We are in INIT->RTR TRANSITION (if we are not, * this transition will be rejected in subsequent checks). * In the INIT->RTR transition, we cannot have IB_QP_PORT set, * but the IB_QP_STATE flag is required. * * Since kernel 3.14 (commit dbf727de7440), the uverbs driver, * when IB_QP_AV is set, has required inclusion of a valid * port number in the primary AV. (AVs are created and handled * differently for infiniband and ethernet (RoCE) ports). * * Check the port number included in the primary AV against * the port number in the qp struct, which was set (and saved) * in the RST->INIT transition. */ if (cmd->base.dest.port_num != qp->real_qp->port) { ret = -EINVAL; goto release_qp; } } else { /* We are in SQD->SQD. (If we are not, this transition will * be rejected later in the verbs layer checks). * Check for both IB_QP_PORT and IB_QP_AV, these can be set * together in the SQD->SQD transition. * * If only IP_QP_AV was set, add in IB_QP_PORT as well (the * verbs layer driver does not track primary port changes * resulting from path migration. Thus, in SQD, if the primary * AV is modified, the primary port should also be modified). * * Note that in this transition, the IB_QP_STATE flag * is not allowed. */ if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) == (IB_QP_AV | IB_QP_PORT)) && cmd->base.port_num != cmd->base.dest.port_num) { ret = -EINVAL; goto release_qp; } if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) == IB_QP_AV) { cmd->base.attr_mask |= IB_QP_PORT; cmd->base.port_num = cmd->base.dest.port_num; } } } if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) || cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) { ret = -EINVAL; goto release_qp; } if ((cmd->base.attr_mask & IB_QP_CUR_STATE && cmd->base.cur_qp_state > IB_QPS_ERR) || (cmd->base.attr_mask & IB_QP_STATE && cmd->base.qp_state > IB_QPS_ERR)) { ret = -EINVAL; goto release_qp; } if (cmd->base.attr_mask & IB_QP_STATE) attr->qp_state = cmd->base.qp_state; if (cmd->base.attr_mask & IB_QP_CUR_STATE) attr->cur_qp_state = cmd->base.cur_qp_state; if (cmd->base.attr_mask & IB_QP_PATH_MTU) attr->path_mtu = cmd->base.path_mtu; if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE) attr->path_mig_state = cmd->base.path_mig_state; if (cmd->base.attr_mask & IB_QP_QKEY) { if (cmd->base.qkey & IB_QP_SET_QKEY && !capable(CAP_NET_RAW)) { ret = -EPERM; goto release_qp; } attr->qkey = cmd->base.qkey; } if (cmd->base.attr_mask & IB_QP_RQ_PSN) attr->rq_psn = cmd->base.rq_psn; if (cmd->base.attr_mask & IB_QP_SQ_PSN) attr->sq_psn = cmd->base.sq_psn; if (cmd->base.attr_mask & IB_QP_DEST_QPN) attr->dest_qp_num = cmd->base.dest_qp_num; if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS) attr->qp_access_flags = cmd->base.qp_access_flags; if (cmd->base.attr_mask & IB_QP_PKEY_INDEX) attr->pkey_index = cmd->base.pkey_index; if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC) attr->max_rd_atomic = cmd->base.max_rd_atomic; if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER) attr->min_rnr_timer = cmd->base.min_rnr_timer; if (cmd->base.attr_mask & IB_QP_PORT) attr->port_num = cmd->base.port_num; if (cmd->base.attr_mask & IB_QP_TIMEOUT) attr->timeout = cmd->base.timeout; if (cmd->base.attr_mask & IB_QP_RETRY_CNT) attr->retry_cnt = cmd->base.retry_cnt; if (cmd->base.attr_mask & IB_QP_RNR_RETRY) attr->rnr_retry = cmd->base.rnr_retry; if (cmd->base.attr_mask & IB_QP_ALT_PATH) { attr->alt_port_num = cmd->base.alt_port_num; attr->alt_timeout = cmd->base.alt_timeout; attr->alt_pkey_index = cmd->base.alt_pkey_index; } if (cmd->base.attr_mask & IB_QP_RATE_LIMIT) attr->rate_limit = cmd->rate_limit; if (cmd->base.attr_mask & IB_QP_AV) copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, &cmd->base.dest); if (cmd->base.attr_mask & IB_QP_ALT_PATH) copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr, &cmd->base.alt_dest); ret = ib_modify_qp_with_udata(qp, attr, modify_qp_mask(qp->qp_type, cmd->base.attr_mask), &attrs->driver_udata); release_qp: rdma_lookup_put_uobject(&qp->uobject->uevent.uobject, UVERBS_LOOKUP_READ); out: kfree(attr); return ret; } static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_modify_qp cmd; int ret; ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base)); if (ret) return ret; if (cmd.base.attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; return modify_qp(attrs, &cmd); } static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_modify_qp cmd; struct ib_uverbs_ex_modify_qp_resp resp = { .response_length = uverbs_response_length(attrs, sizeof(resp)) }; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; /* * Last bit is reserved for extending the attr_mask by * using another field. */ if (cmd.base.attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT)) return -EOPNOTSUPP; ret = modify_qp(attrs, &cmd); if (ret) return ret; return uverbs_response(attrs, &resp, sizeof(resp)); } static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_destroy_qp cmd; struct ib_uverbs_destroy_qp_resp resp; struct ib_uobject *uobj; struct ib_uqp_object *obj; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); memset(&resp, 0, sizeof(resp)); resp.events_reported = obj->uevent.events_reported; uobj_put_destroy(uobj); return uverbs_response(attrs, &resp, sizeof(resp)); } static void *alloc_wr(size_t wr_size, __u32 num_sge) { if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof(struct ib_sge))) / sizeof(struct ib_sge)) return NULL; return kmalloc(ALIGN(wr_size, sizeof(struct ib_sge)) + num_sge * sizeof(struct ib_sge), GFP_KERNEL); } static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_post_send cmd; struct ib_uverbs_post_send_resp resp; struct ib_uverbs_send_wr *user_wr; struct ib_send_wr *wr = NULL, *last, *next; const struct ib_send_wr *bad_wr; struct ib_qp *qp; int i, sg_ind; int is_ud; int ret, ret2; size_t next_size; const struct ib_sge __user *sgls; const void __user *wqes; struct uverbs_req_iter iter; ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); if (ret) return ret; wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count); if (IS_ERR(wqes)) return PTR_ERR(wqes); sgls = uverbs_request_next_ptr( &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge)); if (IS_ERR(sgls)) return PTR_ERR(sgls); ret = uverbs_request_finish(&iter); if (ret) return ret; user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); if (!user_wr) return -ENOMEM; qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) { ret = -EINVAL; goto out; } is_ud = qp->qp_type == IB_QPT_UD; sg_ind = 0; last = NULL; for (i = 0; i < cmd.wr_count; ++i) { if (copy_from_user(user_wr, wqes + i * cmd.wqe_size, cmd.wqe_size)) { ret = -EFAULT; goto out_put; } if (user_wr->num_sge + sg_ind > cmd.sge_count) { ret = -EINVAL; goto out_put; } if (is_ud) { struct ib_ud_wr *ud; if (user_wr->opcode != IB_WR_SEND && user_wr->opcode != IB_WR_SEND_WITH_IMM) { ret = -EINVAL; goto out_put; } next_size = sizeof(*ud); ud = alloc_wr(next_size, user_wr->num_sge); if (!ud) { ret = -ENOMEM; goto out_put; } ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, user_wr->wr.ud.ah, attrs); if (!ud->ah) { kfree(ud); ret = -EINVAL; goto out_put; } ud->remote_qpn = user_wr->wr.ud.remote_qpn; ud->remote_qkey = user_wr->wr.ud.remote_qkey; next = &ud->wr; } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || user_wr->opcode == IB_WR_RDMA_WRITE || user_wr->opcode == IB_WR_RDMA_READ) { struct ib_rdma_wr *rdma; next_size = sizeof(*rdma); rdma = alloc_wr(next_size, user_wr->num_sge); if (!rdma) { ret = -ENOMEM; goto out_put; } rdma->remote_addr = user_wr->wr.rdma.remote_addr; rdma->rkey = user_wr->wr.rdma.rkey; next = &rdma->wr; } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { struct ib_atomic_wr *atomic; next_size = sizeof(*atomic); atomic = alloc_wr(next_size, user_wr->num_sge); if (!atomic) { ret = -ENOMEM; goto out_put; } atomic->remote_addr = user_wr->wr.atomic.remote_addr; atomic->compare_add = user_wr->wr.atomic.compare_add; atomic->swap = user_wr->wr.atomic.swap; atomic->rkey = user_wr->wr.atomic.rkey; next = &atomic->wr; } else if (user_wr->opcode == IB_WR_SEND || user_wr->opcode == IB_WR_SEND_WITH_IMM || user_wr->opcode == IB_WR_SEND_WITH_INV) { next_size = sizeof(*next); next = alloc_wr(next_size, user_wr->num_sge); if (!next) { ret = -ENOMEM; goto out_put; } } else { ret = -EINVAL; goto out_put; } if (user_wr->opcode == IB_WR_SEND_WITH_IMM || user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { next->ex.imm_data = (__be32 __force) user_wr->ex.imm_data; } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; } if (!last) wr = next; else last->next = next; last = next; next->next = NULL; next->wr_id = user_wr->wr_id; next->num_sge = user_wr->num_sge; next->opcode = user_wr->opcode; next->send_flags = user_wr->send_flags; if (next->num_sge) { next->sg_list = (void *) next + ALIGN(next_size, sizeof(struct ib_sge)); if (copy_from_user(next->sg_list, sgls + sg_ind, next->num_sge * sizeof(struct ib_sge))) { ret = -EFAULT; goto out_put; } sg_ind += next->num_sge; } else next->sg_list = NULL; } resp.bad_wr = 0; ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr); if (ret) for (next = wr; next; next = next->next) { ++resp.bad_wr; if (next == bad_wr) break; } ret2 = uverbs_response(attrs, &resp, sizeof(resp)); if (ret2) ret = ret2; out_put: rdma_lookup_put_uobject(&qp->uobject->uevent.uobject, UVERBS_LOOKUP_READ); while (wr) { if (is_ud && ud_wr(wr)->ah) uobj_put_obj_read(ud_wr(wr)->ah); next = wr->next; kfree(wr); wr = next; } out: kfree(user_wr); return ret; } static struct ib_recv_wr * ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count, u32 wqe_size, u32 sge_count) { struct ib_uverbs_recv_wr *user_wr; struct ib_recv_wr *wr = NULL, *last, *next; int sg_ind; int i; int ret; const struct ib_sge __user *sgls; const void __user *wqes; if (wqe_size < sizeof(struct ib_uverbs_recv_wr)) return ERR_PTR(-EINVAL); wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count); if (IS_ERR(wqes)) return ERR_CAST(wqes); sgls = uverbs_request_next_ptr( iter, sge_count * sizeof(struct ib_uverbs_sge)); if (IS_ERR(sgls)) return ERR_CAST(sgls); ret = uverbs_request_finish(iter); if (ret) return ERR_PTR(ret); user_wr = kmalloc(wqe_size, GFP_KERNEL); if (!user_wr) return ERR_PTR(-ENOMEM); sg_ind = 0; last = NULL; for (i = 0; i < wr_count; ++i) { if (copy_from_user(user_wr, wqes + i * wqe_size, wqe_size)) { ret = -EFAULT; goto err; } if (user_wr->num_sge + sg_ind > sge_count) { ret = -EINVAL; goto err; } if (user_wr->num_sge >= (U32_MAX - ALIGN(sizeof(*next), sizeof(struct ib_sge))) / sizeof(struct ib_sge)) { ret = -EINVAL; goto err; } next = kmalloc(ALIGN(sizeof(*next), sizeof(struct ib_sge)) + user_wr->num_sge * sizeof(struct ib_sge), GFP_KERNEL); if (!next) { ret = -ENOMEM; goto err; } if (!last) wr = next; else last->next = next; last = next; next->next = NULL; next->wr_id = user_wr->wr_id; next->num_sge = user_wr->num_sge; if (next->num_sge) { next->sg_list = (void *)next + ALIGN(sizeof(*next), sizeof(struct ib_sge)); if (copy_from_user(next->sg_list, sgls + sg_ind, next->num_sge * sizeof(struct ib_sge))) { ret = -EFAULT; goto err; } sg_ind += next->num_sge; } else next->sg_list = NULL; } kfree(user_wr); return wr; err: kfree(user_wr); while (wr) { next = wr->next; kfree(wr); wr = next; } return ERR_PTR(ret); } static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_post_recv cmd; struct ib_uverbs_post_recv_resp resp; struct ib_recv_wr *wr, *next; const struct ib_recv_wr *bad_wr; struct ib_qp *qp; int ret, ret2; struct uverbs_req_iter iter; ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); if (ret) return ret; wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size, cmd.sge_count); if (IS_ERR(wr)) return PTR_ERR(wr); qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) { ret = -EINVAL; goto out; } resp.bad_wr = 0; ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr); rdma_lookup_put_uobject(&qp->uobject->uevent.uobject, UVERBS_LOOKUP_READ); if (ret) { for (next = wr; next; next = next->next) { ++resp.bad_wr; if (next == bad_wr) break; } } ret2 = uverbs_response(attrs, &resp, sizeof(resp)); if (ret2) ret = ret2; out: while (wr) { next = wr->next; kfree(wr); wr = next; } return ret; } static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_post_srq_recv cmd; struct ib_uverbs_post_srq_recv_resp resp; struct ib_recv_wr *wr, *next; const struct ib_recv_wr *bad_wr; struct ib_srq *srq; int ret, ret2; struct uverbs_req_iter iter; ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); if (ret) return ret; wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size, cmd.sge_count); if (IS_ERR(wr)) return PTR_ERR(wr); srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); if (!srq) { ret = -EINVAL; goto out; } resp.bad_wr = 0; ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr); rdma_lookup_put_uobject(&srq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); if (ret) for (next = wr; next; next = next->next) { ++resp.bad_wr; if (next == bad_wr) break; } ret2 = uverbs_response(attrs, &resp, sizeof(resp)); if (ret2) ret = ret2; out: while (wr) { next = wr->next; kfree(wr); wr = next; } return ret; } static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_ah cmd; struct ib_uverbs_create_ah_resp resp; struct ib_uobject *uobj; struct ib_pd *pd; struct ib_ah *ah; struct rdma_ah_attr attr = {}; int ret; struct ib_device *ib_dev; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev); if (IS_ERR(uobj)) return PTR_ERR(uobj); if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) { ret = -EINVAL; goto err; } pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); if (!pd) { ret = -EINVAL; goto err; } attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); rdma_ah_set_make_grd(&attr, false); rdma_ah_set_dlid(&attr, cmd.attr.dlid); rdma_ah_set_sl(&attr, cmd.attr.sl); rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits); rdma_ah_set_static_rate(&attr, cmd.attr.static_rate); rdma_ah_set_port_num(&attr, cmd.attr.port_num); if (cmd.attr.is_global) { rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label, cmd.attr.grh.sgid_index, cmd.attr.grh.hop_limit, cmd.attr.grh.traffic_class); rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid); } else { rdma_ah_set_ah_flags(&attr, 0); } ah = rdma_create_user_ah(pd, &attr, &attrs->driver_udata); if (IS_ERR(ah)) { ret = PTR_ERR(ah); goto err_put; } ah->uobject = uobj; uobj->user_handle = cmd.user_handle; uobj->object = ah; uobj_put_obj_read(pd); uobj_finalize_uobj_create(uobj, attrs); resp.ah_handle = uobj->id; return uverbs_response(attrs, &resp, sizeof(resp)); err_put: uobj_put_obj_read(pd); err: uobj_alloc_abort(uobj, attrs); return ret; } static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_destroy_ah cmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs); } static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_attach_mcast cmd; struct ib_qp *qp; struct ib_uqp_object *obj; struct ib_uverbs_mcast_entry *mcast; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) return -EINVAL; obj = qp->uobject; mutex_lock(&obj->mcast_lock); list_for_each_entry(mcast, &obj->mcast_list, list) if (cmd.mlid == mcast->lid && !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { ret = 0; goto out_put; } mcast = kmalloc(sizeof *mcast, GFP_KERNEL); if (!mcast) { ret = -ENOMEM; goto out_put; } mcast->lid = cmd.mlid; memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); if (!ret) list_add_tail(&mcast->list, &obj->mcast_list); else kfree(mcast); out_put: mutex_unlock(&obj->mcast_lock); rdma_lookup_put_uobject(&qp->uobject->uevent.uobject, UVERBS_LOOKUP_READ); return ret; } static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_detach_mcast cmd; struct ib_uqp_object *obj; struct ib_qp *qp; struct ib_uverbs_mcast_entry *mcast; int ret; bool found = false; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) return -EINVAL; obj = qp->uobject; mutex_lock(&obj->mcast_lock); list_for_each_entry(mcast, &obj->mcast_list, list) if (cmd.mlid == mcast->lid && !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { list_del(&mcast->list); kfree(mcast); found = true; break; } if (!found) { ret = -EINVAL; goto out_put; } ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); out_put: mutex_unlock(&obj->mcast_lock); rdma_lookup_put_uobject(&qp->uobject->uevent.uobject, UVERBS_LOOKUP_READ); return ret; } struct ib_uflow_resources *flow_resources_alloc(size_t num_specs) { struct ib_uflow_resources *resources; resources = kzalloc(sizeof(*resources), GFP_KERNEL); if (!resources) return NULL; if (!num_specs) goto out; resources->counters = kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL); resources->collection = kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL); if (!resources->counters || !resources->collection) goto err; out: resources->max = num_specs; return resources; err: kfree(resources->counters); kfree(resources); return NULL; } EXPORT_SYMBOL(flow_resources_alloc); void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res) { unsigned int i; if (!uflow_res) return; for (i = 0; i < uflow_res->collection_num; i++) atomic_dec(&uflow_res->collection[i]->usecnt); for (i = 0; i < uflow_res->counters_num; i++) atomic_dec(&uflow_res->counters[i]->usecnt); kfree(uflow_res->collection); kfree(uflow_res->counters); kfree(uflow_res); } EXPORT_SYMBOL(ib_uverbs_flow_resources_free); void flow_resources_add(struct ib_uflow_resources *uflow_res, enum ib_flow_spec_type type, void *ibobj) { WARN_ON(uflow_res->num >= uflow_res->max); switch (type) { case IB_FLOW_SPEC_ACTION_HANDLE: atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt); uflow_res->collection[uflow_res->collection_num++] = (struct ib_flow_action *)ibobj; break; case IB_FLOW_SPEC_ACTION_COUNT: atomic_inc(&((struct ib_counters *)ibobj)->usecnt); uflow_res->counters[uflow_res->counters_num++] = (struct ib_counters *)ibobj; break; default: WARN_ON(1); } uflow_res->num++; } EXPORT_SYMBOL(flow_resources_add); static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs, struct ib_uverbs_flow_spec *kern_spec, union ib_flow_spec *ib_spec, struct ib_uflow_resources *uflow_res) { ib_spec->type = kern_spec->type; switch (ib_spec->type) { case IB_FLOW_SPEC_ACTION_TAG: if (kern_spec->flow_tag.size != sizeof(struct ib_uverbs_flow_spec_action_tag)) return -EINVAL; ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag); ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id; break; case IB_FLOW_SPEC_ACTION_DROP: if (kern_spec->drop.size != sizeof(struct ib_uverbs_flow_spec_action_drop)) return -EINVAL; ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); break; case IB_FLOW_SPEC_ACTION_HANDLE: if (kern_spec->action.size != sizeof(struct ib_uverbs_flow_spec_action_handle)) return -EOPNOTSUPP; ib_spec->action.act = uobj_get_obj_read(flow_action, UVERBS_OBJECT_FLOW_ACTION, kern_spec->action.handle, attrs); if (!ib_spec->action.act) return -EINVAL; ib_spec->action.size = sizeof(struct ib_flow_spec_action_handle); flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE, ib_spec->action.act); uobj_put_obj_read(ib_spec->action.act); break; case IB_FLOW_SPEC_ACTION_COUNT: if (kern_spec->flow_count.size != sizeof(struct ib_uverbs_flow_spec_action_count)) return -EINVAL; ib_spec->flow_count.counters = uobj_get_obj_read(counters, UVERBS_OBJECT_COUNTERS, kern_spec->flow_count.handle, attrs); if (!ib_spec->flow_count.counters) return -EINVAL; ib_spec->flow_count.size = sizeof(struct ib_flow_spec_action_count); flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_COUNT, ib_spec->flow_count.counters); uobj_put_obj_read(ib_spec->flow_count.counters); break; default: return -EINVAL; } return 0; } static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size, u16 ib_real_filter_sz) { /* * User space filter structures must be 64 bit aligned, otherwise this * may pass, but we won't handle additional new attributes. */ if (kern_filter_size > ib_real_filter_sz) { if (memchr_inv(kern_spec_filter + ib_real_filter_sz, 0, kern_filter_size - ib_real_filter_sz)) return -EINVAL; return ib_real_filter_sz; } return kern_filter_size; } int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, const void *kern_spec_mask, const void *kern_spec_val, size_t kern_filter_sz, union ib_flow_spec *ib_spec) { ssize_t actual_filter_sz; ssize_t ib_filter_sz; /* User flow spec size must be aligned to 4 bytes */ if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) return -EINVAL; ib_spec->type = type; if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) return -EINVAL; switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { case IB_FLOW_SPEC_ETH: ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); if (actual_filter_sz <= 0) return -EINVAL; ib_spec->size = sizeof(struct ib_flow_spec_eth); memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); break; case IB_FLOW_SPEC_IPV4: ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); if (actual_filter_sz <= 0) return -EINVAL; ib_spec->size = sizeof(struct ib_flow_spec_ipv4); memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); break; case IB_FLOW_SPEC_IPV6: ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); if (actual_filter_sz <= 0) return -EINVAL; ib_spec->size = sizeof(struct ib_flow_spec_ipv6); memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) return -EINVAL; break; case IB_FLOW_SPEC_TCP: case IB_FLOW_SPEC_UDP: ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); if (actual_filter_sz <= 0) return -EINVAL; ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); break; case IB_FLOW_SPEC_VXLAN_TUNNEL: ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); if (actual_filter_sz <= 0) return -EINVAL; ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) return -EINVAL; break; case IB_FLOW_SPEC_ESP: ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); if (actual_filter_sz <= 0) return -EINVAL; ib_spec->esp.size = sizeof(struct ib_flow_spec_esp); memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz); memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz); break; case IB_FLOW_SPEC_GRE: ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); if (actual_filter_sz <= 0) return -EINVAL; ib_spec->gre.size = sizeof(struct ib_flow_spec_gre); memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz); memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz); break; case IB_FLOW_SPEC_MPLS: ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); if (actual_filter_sz <= 0) return -EINVAL; ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls); memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz); memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz); break; default: return -EINVAL; } return 0; } static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, union ib_flow_spec *ib_spec) { size_t kern_filter_sz; void *kern_spec_mask; void *kern_spec_val; if (check_sub_overflow((size_t)kern_spec->hdr.size, sizeof(struct ib_uverbs_flow_spec_hdr), &kern_filter_sz)) return -EINVAL; kern_filter_sz /= 2; kern_spec_val = (void *)kern_spec + sizeof(struct ib_uverbs_flow_spec_hdr); kern_spec_mask = kern_spec_val + kern_filter_sz; return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type, kern_spec_mask, kern_spec_val, kern_filter_sz, ib_spec); } static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs, struct ib_uverbs_flow_spec *kern_spec, union ib_flow_spec *ib_spec, struct ib_uflow_resources *uflow_res) { if (kern_spec->reserved) return -EINVAL; if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec, uflow_res); else return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); } static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_create_wq cmd; struct ib_uverbs_ex_create_wq_resp resp = {}; struct ib_uwq_object *obj; int err = 0; struct ib_cq *cq; struct ib_pd *pd; struct ib_wq *wq; struct ib_wq_init_attr wq_init_attr = {}; struct ib_device *ib_dev; err = uverbs_request(attrs, &cmd, sizeof(cmd)); if (err) return err; if (cmd.comp_mask) return -EOPNOTSUPP; obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs, &ib_dev); if (IS_ERR(obj)) return PTR_ERR(obj); pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); if (!pd) { err = -EINVAL; goto err_uobj; } cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (!cq) { err = -EINVAL; goto err_put_pd; } wq_init_attr.cq = cq; wq_init_attr.max_sge = cmd.max_sge; wq_init_attr.max_wr = cmd.max_wr; wq_init_attr.wq_type = cmd.wq_type; wq_init_attr.event_handler = ib_uverbs_wq_event_handler; wq_init_attr.create_flags = cmd.create_flags; INIT_LIST_HEAD(&obj->uevent.event_list); obj->uevent.uobject.user_handle = cmd.user_handle; wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata); if (IS_ERR(wq)) { err = PTR_ERR(wq); goto err_put_cq; } wq->uobject = obj; obj->uevent.uobject.object = wq; wq->wq_type = wq_init_attr.wq_type; wq->cq = cq; wq->pd = pd; wq->device = pd->device; atomic_set(&wq->usecnt, 0); atomic_inc(&pd->usecnt); atomic_inc(&cq->usecnt); obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file); if (obj->uevent.event_file) uverbs_uobject_get(&obj->uevent.event_file->uobj); uobj_put_obj_read(pd); rdma_lookup_put_uobject(&cq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); uobj_finalize_uobj_create(&obj->uevent.uobject, attrs); resp.wq_handle = obj->uevent.uobject.id; resp.max_sge = wq_init_attr.max_sge; resp.max_wr = wq_init_attr.max_wr; resp.wqn = wq->wq_num; resp.response_length = uverbs_response_length(attrs, sizeof(resp)); return uverbs_response(attrs, &resp, sizeof(resp)); err_put_cq: rdma_lookup_put_uobject(&cq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); err_put_pd: uobj_put_obj_read(pd); err_uobj: uobj_alloc_abort(&obj->uevent.uobject, attrs); return err; } static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_destroy_wq cmd; struct ib_uverbs_ex_destroy_wq_resp resp = {}; struct ib_uobject *uobj; struct ib_uwq_object *obj; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if (cmd.comp_mask) return -EOPNOTSUPP; resp.response_length = uverbs_response_length(attrs, sizeof(resp)); uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); resp.events_reported = obj->uevent.events_reported; uobj_put_destroy(uobj); return uverbs_response(attrs, &resp, sizeof(resp)); } static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_modify_wq cmd; struct ib_wq *wq; struct ib_wq_attr wq_attr = {}; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if (!cmd.attr_mask) return -EINVAL; if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) return -EINVAL; wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs); if (!wq) return -EINVAL; if (cmd.attr_mask & IB_WQ_FLAGS) { wq_attr.flags = cmd.flags; wq_attr.flags_mask = cmd.flags_mask; } if (cmd.attr_mask & IB_WQ_CUR_STATE) { if (cmd.curr_wq_state > IB_WQS_ERR) return -EINVAL; wq_attr.curr_wq_state = cmd.curr_wq_state; } else { wq_attr.curr_wq_state = wq->state; } if (cmd.attr_mask & IB_WQ_STATE) { if (cmd.wq_state > IB_WQS_ERR) return -EINVAL; wq_attr.wq_state = cmd.wq_state; } else { wq_attr.wq_state = wq_attr.curr_wq_state; } ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask, &attrs->driver_udata); rdma_lookup_put_uobject(&wq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); return ret; } static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_create_rwq_ind_table cmd; struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; struct ib_uobject *uobj; int err; struct ib_rwq_ind_table_init_attr init_attr = {}; struct ib_rwq_ind_table *rwq_ind_tbl; struct ib_wq **wqs = NULL; u32 *wqs_handles = NULL; struct ib_wq *wq = NULL; int i, num_read_wqs; u32 num_wq_handles; struct uverbs_req_iter iter; struct ib_device *ib_dev; err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); if (err) return err; if (cmd.comp_mask) return -EOPNOTSUPP; if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) return -EINVAL; num_wq_handles = 1 << cmd.log_ind_tbl_size; wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), GFP_KERNEL); if (!wqs_handles) return -ENOMEM; err = uverbs_request_next(&iter, wqs_handles, num_wq_handles * sizeof(__u32)); if (err) goto err_free; err = uverbs_request_finish(&iter); if (err) goto err_free; wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); if (!wqs) { err = -ENOMEM; goto err_free; } for (num_read_wqs = 0; num_read_wqs < num_wq_handles; num_read_wqs++) { wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, wqs_handles[num_read_wqs], attrs); if (!wq) { err = -EINVAL; goto put_wqs; } wqs[num_read_wqs] = wq; atomic_inc(&wqs[num_read_wqs]->usecnt); } uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev); if (IS_ERR(uobj)) { err = PTR_ERR(uobj); goto put_wqs; } rwq_ind_tbl = rdma_zalloc_drv_obj(ib_dev, ib_rwq_ind_table); if (!rwq_ind_tbl) { err = -ENOMEM; goto err_uobj; } init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; init_attr.ind_tbl = wqs; rwq_ind_tbl->ind_tbl = wqs; rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; rwq_ind_tbl->uobject = uobj; uobj->object = rwq_ind_tbl; rwq_ind_tbl->device = ib_dev; atomic_set(&rwq_ind_tbl->usecnt, 0); err = ib_dev->ops.create_rwq_ind_table(rwq_ind_tbl, &init_attr, &attrs->driver_udata); if (err) goto err_create; for (i = 0; i < num_wq_handles; i++) rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject, UVERBS_LOOKUP_READ); kfree(wqs_handles); uobj_finalize_uobj_create(uobj, attrs); resp.ind_tbl_handle = uobj->id; resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; resp.response_length = uverbs_response_length(attrs, sizeof(resp)); return uverbs_response(attrs, &resp, sizeof(resp)); err_create: kfree(rwq_ind_tbl); err_uobj: uobj_alloc_abort(uobj, attrs); put_wqs: for (i = 0; i < num_read_wqs; i++) { rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject, UVERBS_LOOKUP_READ); atomic_dec(&wqs[i]->usecnt); } err_free: kfree(wqs_handles); kfree(wqs); return err; } static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_destroy_rwq_ind_table cmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if (cmd.comp_mask) return -EOPNOTSUPP; return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL, cmd.ind_tbl_handle, attrs); } static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_flow cmd; struct ib_uverbs_create_flow_resp resp = {}; struct ib_uobject *uobj; struct ib_flow *flow_id; struct ib_uverbs_flow_attr *kern_flow_attr; struct ib_flow_attr *flow_attr; struct ib_qp *qp; struct ib_uflow_resources *uflow_res; struct ib_uverbs_flow_spec_hdr *kern_spec; struct uverbs_req_iter iter; int err; void *ib_spec; int i; struct ib_device *ib_dev; err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); if (err) return err; if (cmd.comp_mask) return -EINVAL; if (!capable(CAP_NET_RAW)) return -EPERM; if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) return -EINVAL; if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) return -EINVAL; if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) return -EINVAL; if (cmd.flow_attr.size > (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) return -EINVAL; if (cmd.flow_attr.reserved[0] || cmd.flow_attr.reserved[1]) return -EINVAL; if (cmd.flow_attr.num_of_specs) { kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, GFP_KERNEL); if (!kern_flow_attr) return -ENOMEM; *kern_flow_attr = cmd.flow_attr; err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs, cmd.flow_attr.size); if (err) goto err_free_attr; } else { kern_flow_attr = &cmd.flow_attr; } err = uverbs_request_finish(&iter); if (err) goto err_free_attr; uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev); if (IS_ERR(uobj)) { err = PTR_ERR(uobj); goto err_free_attr; } if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) { err = -EINVAL; goto err_uobj; } qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) { err = -EINVAL; goto err_uobj; } if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) { err = -EINVAL; goto err_put; } flow_attr = kzalloc(struct_size(flow_attr, flows, cmd.flow_attr.num_of_specs), GFP_KERNEL); if (!flow_attr) { err = -ENOMEM; goto err_put; } uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs); if (!uflow_res) { err = -ENOMEM; goto err_free_flow_attr; } flow_attr->type = kern_flow_attr->type; flow_attr->priority = kern_flow_attr->priority; flow_attr->num_of_specs = kern_flow_attr->num_of_specs; flow_attr->port = kern_flow_attr->port; flow_attr->flags = kern_flow_attr->flags; flow_attr->size = sizeof(*flow_attr); kern_spec = kern_flow_attr->flow_specs; ib_spec = flow_attr + 1; for (i = 0; i < flow_attr->num_of_specs && cmd.flow_attr.size >= sizeof(*kern_spec) && cmd.flow_attr.size >= kern_spec->size; i++) { err = kern_spec_to_ib_spec( attrs, (struct ib_uverbs_flow_spec *)kern_spec, ib_spec, uflow_res); if (err) goto err_free; flow_attr->size += ((union ib_flow_spec *) ib_spec)->size; cmd.flow_attr.size -= kern_spec->size; kern_spec = ((void *)kern_spec) + kern_spec->size; ib_spec += ((union ib_flow_spec *) ib_spec)->size; } if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { pr_warn("create flow failed, flow %d: %u bytes left from uverb cmd\n", i, cmd.flow_attr.size); err = -EINVAL; goto err_free; } flow_id = qp->device->ops.create_flow(qp, flow_attr, &attrs->driver_udata); if (IS_ERR(flow_id)) { err = PTR_ERR(flow_id); goto err_free; } ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res); rdma_lookup_put_uobject(&qp->uobject->uevent.uobject, UVERBS_LOOKUP_READ); kfree(flow_attr); if (cmd.flow_attr.num_of_specs) kfree(kern_flow_attr); uobj_finalize_uobj_create(uobj, attrs); resp.flow_handle = uobj->id; return uverbs_response(attrs, &resp, sizeof(resp)); err_free: ib_uverbs_flow_resources_free(uflow_res); err_free_flow_attr: kfree(flow_attr); err_put: rdma_lookup_put_uobject(&qp->uobject->uevent.uobject, UVERBS_LOOKUP_READ); err_uobj: uobj_alloc_abort(uobj, attrs); err_free_attr: if (cmd.flow_attr.num_of_specs) kfree(kern_flow_attr); return err; } static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_destroy_flow cmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if (cmd.comp_mask) return -EINVAL; return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs); } static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs, struct ib_uverbs_create_xsrq *cmd, struct ib_udata *udata) { struct ib_uverbs_create_srq_resp resp = {}; struct ib_usrq_object *obj; struct ib_pd *pd; struct ib_srq *srq; struct ib_srq_init_attr attr; int ret; struct ib_uobject *xrcd_uobj; struct ib_device *ib_dev; obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs, &ib_dev); if (IS_ERR(obj)) return PTR_ERR(obj); if (cmd->srq_type == IB_SRQT_TM) attr.ext.tag_matching.max_num_tags = cmd->max_num_tags; if (cmd->srq_type == IB_SRQT_XRC) { xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle, attrs); if (IS_ERR(xrcd_uobj)) { ret = -EINVAL; goto err; } attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; if (!attr.ext.xrc.xrcd) { ret = -EINVAL; goto err_put_xrcd; } obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); atomic_inc(&obj->uxrcd->refcnt); } if (ib_srq_has_cq(cmd->srq_type)) { attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->cq_handle, attrs); if (!attr.ext.cq) { ret = -EINVAL; goto err_put_xrcd; } } pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs); if (!pd) { ret = -EINVAL; goto err_put_cq; } attr.event_handler = ib_uverbs_srq_event_handler; attr.srq_type = cmd->srq_type; attr.attr.max_wr = cmd->max_wr; attr.attr.max_sge = cmd->max_sge; attr.attr.srq_limit = cmd->srq_limit; INIT_LIST_HEAD(&obj->uevent.event_list); obj->uevent.uobject.user_handle = cmd->user_handle; srq = ib_create_srq_user(pd, &attr, obj, udata); if (IS_ERR(srq)) { ret = PTR_ERR(srq); goto err_put_pd; } obj->uevent.uobject.object = srq; obj->uevent.uobject.user_handle = cmd->user_handle; obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file); if (obj->uevent.event_file) uverbs_uobject_get(&obj->uevent.event_file->uobj); if (cmd->srq_type == IB_SRQT_XRC) resp.srqn = srq->ext.xrc.srq_num; if (cmd->srq_type == IB_SRQT_XRC) uobj_put_read(xrcd_uobj); if (ib_srq_has_cq(cmd->srq_type)) rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); uobj_put_obj_read(pd); uobj_finalize_uobj_create(&obj->uevent.uobject, attrs); resp.srq_handle = obj->uevent.uobject.id; resp.max_wr = attr.attr.max_wr; resp.max_sge = attr.attr.max_sge; return uverbs_response(attrs, &resp, sizeof(resp)); err_put_pd: uobj_put_obj_read(pd); err_put_cq: if (ib_srq_has_cq(cmd->srq_type)) rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); err_put_xrcd: if (cmd->srq_type == IB_SRQT_XRC) { atomic_dec(&obj->uxrcd->refcnt); uobj_put_read(xrcd_uobj); } err: uobj_alloc_abort(&obj->uevent.uobject, attrs); return ret; } static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_srq cmd; struct ib_uverbs_create_xsrq xcmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; memset(&xcmd, 0, sizeof(xcmd)); xcmd.response = cmd.response; xcmd.user_handle = cmd.user_handle; xcmd.srq_type = IB_SRQT_BASIC; xcmd.pd_handle = cmd.pd_handle; xcmd.max_wr = cmd.max_wr; xcmd.max_sge = cmd.max_sge; xcmd.srq_limit = cmd.srq_limit; return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata); } static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_xsrq cmd; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata); } static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_modify_srq cmd; struct ib_srq *srq; struct ib_srq_attr attr; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); if (!srq) return -EINVAL; attr.max_wr = cmd.max_wr; attr.srq_limit = cmd.srq_limit; ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask, &attrs->driver_udata); rdma_lookup_put_uobject(&srq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); return ret; } static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_query_srq cmd; struct ib_uverbs_query_srq_resp resp; struct ib_srq_attr attr; struct ib_srq *srq; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); if (!srq) return -EINVAL; ret = ib_query_srq(srq, &attr); rdma_lookup_put_uobject(&srq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); if (ret) return ret; memset(&resp, 0, sizeof resp); resp.max_wr = attr.max_wr; resp.max_sge = attr.max_sge; resp.srq_limit = attr.srq_limit; return uverbs_response(attrs, &resp, sizeof(resp)); } static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_destroy_srq cmd; struct ib_uverbs_destroy_srq_resp resp; struct ib_uobject *uobj; struct ib_uevent_object *obj; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); obj = container_of(uobj, struct ib_uevent_object, uobject); memset(&resp, 0, sizeof(resp)); resp.events_reported = obj->events_reported; uobj_put_destroy(uobj); return uverbs_response(attrs, &resp, sizeof(resp)); } static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_query_device_resp resp = {}; struct ib_uverbs_ex_query_device cmd; struct ib_device_attr attr = {0}; struct ib_ucontext *ucontext; struct ib_device *ib_dev; int err; ucontext = ib_uverbs_get_ucontext(attrs); if (IS_ERR(ucontext)) return PTR_ERR(ucontext); ib_dev = ucontext->device; err = uverbs_request(attrs, &cmd, sizeof(cmd)); if (err) return err; if (cmd.comp_mask) return -EINVAL; if (cmd.reserved) return -EINVAL; err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata); if (err) return err; copy_query_dev_fields(ucontext, &resp.base, &attr); resp.odp_caps.general_caps = attr.odp_caps.general_caps; resp.odp_caps.per_transport_caps.rc_odp_caps = attr.odp_caps.per_transport_caps.rc_odp_caps; resp.odp_caps.per_transport_caps.uc_odp_caps = attr.odp_caps.per_transport_caps.uc_odp_caps; resp.odp_caps.per_transport_caps.ud_odp_caps = attr.odp_caps.per_transport_caps.ud_odp_caps; resp.xrc_odp_caps = attr.odp_caps.per_transport_caps.xrc_odp_caps; resp.timestamp_mask = attr.timestamp_mask; resp.hca_core_clock = attr.hca_core_clock; resp.device_cap_flags_ex = attr.device_cap_flags; resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; resp.rss_caps.max_rwq_indirection_tables = attr.rss_caps.max_rwq_indirection_tables; resp.rss_caps.max_rwq_indirection_table_size = attr.rss_caps.max_rwq_indirection_table_size; resp.max_wq_type_rq = attr.max_wq_type_rq; resp.raw_packet_caps = attr.raw_packet_caps; resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; resp.tm_caps.max_ops = attr.tm_caps.max_ops; resp.tm_caps.max_sge = attr.tm_caps.max_sge; resp.tm_caps.flags = attr.tm_caps.flags; resp.cq_moderation_caps.max_cq_moderation_count = attr.cq_caps.max_cq_moderation_count; resp.cq_moderation_caps.max_cq_moderation_period = attr.cq_caps.max_cq_moderation_period; resp.max_dm_size = attr.max_dm_size; resp.response_length = uverbs_response_length(attrs, sizeof(resp)); return uverbs_response(attrs, &resp, sizeof(resp)); } static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_ex_modify_cq cmd; struct ib_cq *cq; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if (!cmd.attr_mask || cmd.reserved) return -EINVAL; if (cmd.attr_mask > IB_CQ_MODERATE) return -EOPNOTSUPP; cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (!cq) return -EINVAL; ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period); rdma_lookup_put_uobject(&cq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); return ret; } /* * Describe the input structs for write(). Some write methods have an input * only struct, most have an input and output. If the struct has an output then * the 'response' u64 must be the first field in the request structure. * * If udata is present then both the request and response structs have a * trailing driver_data flex array. In this case the size of the base struct * cannot be changed. */ #define UAPI_DEF_WRITE_IO(req, resp) \ .write.has_resp = 1 + \ BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) + \ BUILD_BUG_ON_ZERO(sizeof_field(req, response) != \ sizeof(u64)), \ .write.req_size = sizeof(req), .write.resp_size = sizeof(resp) #define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req) #define UAPI_DEF_WRITE_UDATA_IO(req, resp) \ UAPI_DEF_WRITE_IO(req, resp), \ .write.has_udata = \ 1 + \ BUILD_BUG_ON_ZERO(offsetof(req, driver_data) != \ sizeof(req)) + \ BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) != \ sizeof(resp)) #define UAPI_DEF_WRITE_UDATA_I(req) \ UAPI_DEF_WRITE_I(req), \ .write.has_udata = \ 1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) != \ sizeof(req)) /* * The _EX versions are for use with WRITE_EX and allow the last struct member * to be specified. Buffers that do not include that member will be rejected. */ #define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member) \ .write.has_resp = 1, \ .write.req_size = offsetofend(req, req_last_member), \ .write.resp_size = offsetofend(resp, resp_last_member) #define UAPI_DEF_WRITE_I_EX(req, req_last_member) \ .write.req_size = offsetofend(req, req_last_member) const struct uapi_definition uverbs_def_write_intf[] = { DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_AH, DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH, ib_uverbs_create_ah, UAPI_DEF_WRITE_UDATA_IO( struct ib_uverbs_create_ah, struct ib_uverbs_create_ah_resp)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_DESTROY_AH, ib_uverbs_destroy_ah, UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah)), UAPI_DEF_OBJ_NEEDS_FN(create_user_ah), UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_COMP_CHANNEL, DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL, ib_uverbs_create_comp_channel, UAPI_DEF_WRITE_IO( struct ib_uverbs_create_comp_channel, struct ib_uverbs_create_comp_channel_resp))), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_CQ, DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ, ib_uverbs_create_cq, UAPI_DEF_WRITE_UDATA_IO( struct ib_uverbs_create_cq, struct ib_uverbs_create_cq_resp), UAPI_DEF_METHOD_NEEDS_FN(create_cq)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_DESTROY_CQ, ib_uverbs_destroy_cq, UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq, struct ib_uverbs_destroy_cq_resp), UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_POLL_CQ, ib_uverbs_poll_cq, UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq, struct ib_uverbs_poll_cq_resp), UAPI_DEF_METHOD_NEEDS_FN(poll_cq)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_REQ_NOTIFY_CQ, ib_uverbs_req_notify_cq, UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq), UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)), DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ, ib_uverbs_resize_cq, UAPI_DEF_WRITE_UDATA_IO( struct ib_uverbs_resize_cq, struct ib_uverbs_resize_cq_resp), UAPI_DEF_METHOD_NEEDS_FN(resize_cq)), DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_CREATE_CQ, ib_uverbs_ex_create_cq, UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq, reserved, struct ib_uverbs_ex_create_cq_resp, response_length), UAPI_DEF_METHOD_NEEDS_FN(create_cq)), DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_MODIFY_CQ, ib_uverbs_ex_modify_cq, UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq), UAPI_DEF_METHOD_NEEDS_FN(modify_cq))), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_DEVICE, DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT, ib_uverbs_get_context, UAPI_DEF_WRITE_UDATA_IO( struct ib_uverbs_get_context, struct ib_uverbs_get_context_resp)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_QUERY_DEVICE, ib_uverbs_query_device, UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device, struct ib_uverbs_query_device_resp)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_QUERY_PORT, ib_uverbs_query_port, UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port, struct ib_uverbs_query_port_resp), UAPI_DEF_METHOD_NEEDS_FN(query_port)), DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_QUERY_DEVICE, ib_uverbs_ex_query_device, UAPI_DEF_WRITE_IO_EX( struct ib_uverbs_ex_query_device, reserved, struct ib_uverbs_ex_query_device_resp, response_length), UAPI_DEF_METHOD_NEEDS_FN(query_device)), UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext), UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_FLOW, DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_CREATE_FLOW, ib_uverbs_ex_create_flow, UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow, flow_attr, struct ib_uverbs_create_flow_resp, flow_handle), UAPI_DEF_METHOD_NEEDS_FN(create_flow)), DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_DESTROY_FLOW, ib_uverbs_ex_destroy_flow, UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow), UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_MR, DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR, ib_uverbs_dereg_mr, UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr), UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_REG_MR, ib_uverbs_reg_mr, UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr, struct ib_uverbs_reg_mr_resp), UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_REREG_MR, ib_uverbs_rereg_mr, UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr, struct ib_uverbs_rereg_mr_resp), UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_MW, DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_ALLOC_MW, ib_uverbs_alloc_mw, UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw, struct ib_uverbs_alloc_mw_resp), UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_DEALLOC_MW, ib_uverbs_dealloc_mw, UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw), UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_PD, DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_ALLOC_PD, ib_uverbs_alloc_pd, UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd, struct ib_uverbs_alloc_pd_resp), UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_DEALLOC_PD, ib_uverbs_dealloc_pd, UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd), UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_QP, DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_ATTACH_MCAST, ib_uverbs_attach_mcast, UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast), UAPI_DEF_METHOD_NEEDS_FN(attach_mcast), UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)), DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP, ib_uverbs_create_qp, UAPI_DEF_WRITE_UDATA_IO( struct ib_uverbs_create_qp, struct ib_uverbs_create_qp_resp), UAPI_DEF_METHOD_NEEDS_FN(create_qp)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_DESTROY_QP, ib_uverbs_destroy_qp, UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp, struct ib_uverbs_destroy_qp_resp), UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_DETACH_MCAST, ib_uverbs_detach_mcast, UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast), UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_MODIFY_QP, ib_uverbs_modify_qp, UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp), UAPI_DEF_METHOD_NEEDS_FN(modify_qp)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_POST_RECV, ib_uverbs_post_recv, UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv, struct ib_uverbs_post_recv_resp), UAPI_DEF_METHOD_NEEDS_FN(post_recv)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_POST_SEND, ib_uverbs_post_send, UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send, struct ib_uverbs_post_send_resp), UAPI_DEF_METHOD_NEEDS_FN(post_send)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_QUERY_QP, ib_uverbs_query_qp, UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp, struct ib_uverbs_query_qp_resp), UAPI_DEF_METHOD_NEEDS_FN(query_qp)), DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_CREATE_QP, ib_uverbs_ex_create_qp, UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp, comp_mask, struct ib_uverbs_ex_create_qp_resp, response_length), UAPI_DEF_METHOD_NEEDS_FN(create_qp)), DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_MODIFY_QP, ib_uverbs_ex_modify_qp, UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp, base, struct ib_uverbs_ex_modify_qp_resp, response_length), UAPI_DEF_METHOD_NEEDS_FN(modify_qp))), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_RWQ_IND_TBL, DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL, ib_uverbs_ex_create_rwq_ind_table, UAPI_DEF_WRITE_IO_EX( struct ib_uverbs_ex_create_rwq_ind_table, log_ind_tbl_size, struct ib_uverbs_ex_create_rwq_ind_table_resp, ind_tbl_num), UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)), DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL, ib_uverbs_ex_destroy_rwq_ind_table, UAPI_DEF_WRITE_I( struct ib_uverbs_ex_destroy_rwq_ind_table), UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_WQ, DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_CREATE_WQ, ib_uverbs_ex_create_wq, UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq, max_sge, struct ib_uverbs_ex_create_wq_resp, wqn), UAPI_DEF_METHOD_NEEDS_FN(create_wq)), DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_DESTROY_WQ, ib_uverbs_ex_destroy_wq, UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq, wq_handle, struct ib_uverbs_ex_destroy_wq_resp, reserved), UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)), DECLARE_UVERBS_WRITE_EX( IB_USER_VERBS_EX_CMD_MODIFY_WQ, ib_uverbs_ex_modify_wq, UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq, curr_wq_state), UAPI_DEF_METHOD_NEEDS_FN(modify_wq))), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_SRQ, DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ, ib_uverbs_create_srq, UAPI_DEF_WRITE_UDATA_IO( struct ib_uverbs_create_srq, struct ib_uverbs_create_srq_resp), UAPI_DEF_METHOD_NEEDS_FN(create_srq)), DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ, ib_uverbs_create_xsrq, UAPI_DEF_WRITE_UDATA_IO( struct ib_uverbs_create_xsrq, struct ib_uverbs_create_srq_resp), UAPI_DEF_METHOD_NEEDS_FN(create_srq)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_DESTROY_SRQ, ib_uverbs_destroy_srq, UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq, struct ib_uverbs_destroy_srq_resp), UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_MODIFY_SRQ, ib_uverbs_modify_srq, UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq), UAPI_DEF_METHOD_NEEDS_FN(modify_srq)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_POST_SRQ_RECV, ib_uverbs_post_srq_recv, UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv, struct ib_uverbs_post_srq_recv_resp), UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)), DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_QUERY_SRQ, ib_uverbs_query_srq, UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq, struct ib_uverbs_query_srq_resp), UAPI_DEF_METHOD_NEEDS_FN(query_srq))), DECLARE_UVERBS_OBJECT( UVERBS_OBJECT_XRCD, DECLARE_UVERBS_WRITE( IB_USER_VERBS_CMD_CLOSE_XRCD, ib_uverbs_close_xrcd, UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd)), DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP, ib_uverbs_open_qp, UAPI_DEF_WRITE_UDATA_IO( struct ib_uverbs_open_qp, struct ib_uverbs_create_qp_resp)), DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD, ib_uverbs_open_xrcd, UAPI_DEF_WRITE_UDATA_IO( struct ib_uverbs_open_xrcd, struct ib_uverbs_open_xrcd_resp)), UAPI_DEF_OBJ_NEEDS_FN(alloc_xrcd), UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)), {}, };
linux-master
drivers/infiniband/core/uverbs_cmd.c
/* * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/string.h> #include <linux/export.h> #include <linux/if_ether.h> #include <linux/ip.h> #include <rdma/ib_pack.h> #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof_field(struct ib_unpacked_ ## header, field), \ .field_name = #header ":" #field static const struct ib_field lrh_table[] = { { STRUCT_FIELD(lrh, virtual_lane), .offset_words = 0, .offset_bits = 0, .size_bits = 4 }, { STRUCT_FIELD(lrh, link_version), .offset_words = 0, .offset_bits = 4, .size_bits = 4 }, { STRUCT_FIELD(lrh, service_level), .offset_words = 0, .offset_bits = 8, .size_bits = 4 }, { RESERVED, .offset_words = 0, .offset_bits = 12, .size_bits = 2 }, { STRUCT_FIELD(lrh, link_next_header), .offset_words = 0, .offset_bits = 14, .size_bits = 2 }, { STRUCT_FIELD(lrh, destination_lid), .offset_words = 0, .offset_bits = 16, .size_bits = 16 }, { RESERVED, .offset_words = 1, .offset_bits = 0, .size_bits = 5 }, { STRUCT_FIELD(lrh, packet_length), .offset_words = 1, .offset_bits = 5, .size_bits = 11 }, { STRUCT_FIELD(lrh, source_lid), .offset_words = 1, .offset_bits = 16, .size_bits = 16 } }; static const struct ib_field eth_table[] = { { STRUCT_FIELD(eth, dmac_h), .offset_words = 0, .offset_bits = 0, .size_bits = 32 }, { STRUCT_FIELD(eth, dmac_l), .offset_words = 1, .offset_bits = 0, .size_bits = 16 }, { STRUCT_FIELD(eth, smac_h), .offset_words = 1, .offset_bits = 16, .size_bits = 16 }, { STRUCT_FIELD(eth, smac_l), .offset_words = 2, .offset_bits = 0, .size_bits = 32 }, { STRUCT_FIELD(eth, type), .offset_words = 3, .offset_bits = 0, .size_bits = 16 } }; static const struct ib_field vlan_table[] = { { STRUCT_FIELD(vlan, tag), .offset_words = 0, .offset_bits = 0, .size_bits = 16 }, { STRUCT_FIELD(vlan, type), .offset_words = 0, .offset_bits = 16, .size_bits = 16 } }; static const struct ib_field ip4_table[] = { { STRUCT_FIELD(ip4, ver), .offset_words = 0, .offset_bits = 0, .size_bits = 4 }, { STRUCT_FIELD(ip4, hdr_len), .offset_words = 0, .offset_bits = 4, .size_bits = 4 }, { STRUCT_FIELD(ip4, tos), .offset_words = 0, .offset_bits = 8, .size_bits = 8 }, { STRUCT_FIELD(ip4, tot_len), .offset_words = 0, .offset_bits = 16, .size_bits = 16 }, { STRUCT_FIELD(ip4, id), .offset_words = 1, .offset_bits = 0, .size_bits = 16 }, { STRUCT_FIELD(ip4, frag_off), .offset_words = 1, .offset_bits = 16, .size_bits = 16 }, { STRUCT_FIELD(ip4, ttl), .offset_words = 2, .offset_bits = 0, .size_bits = 8 }, { STRUCT_FIELD(ip4, protocol), .offset_words = 2, .offset_bits = 8, .size_bits = 8 }, { STRUCT_FIELD(ip4, check), .offset_words = 2, .offset_bits = 16, .size_bits = 16 }, { STRUCT_FIELD(ip4, saddr), .offset_words = 3, .offset_bits = 0, .size_bits = 32 }, { STRUCT_FIELD(ip4, daddr), .offset_words = 4, .offset_bits = 0, .size_bits = 32 } }; static const struct ib_field udp_table[] = { { STRUCT_FIELD(udp, sport), .offset_words = 0, .offset_bits = 0, .size_bits = 16 }, { STRUCT_FIELD(udp, dport), .offset_words = 0, .offset_bits = 16, .size_bits = 16 }, { STRUCT_FIELD(udp, length), .offset_words = 1, .offset_bits = 0, .size_bits = 16 }, { STRUCT_FIELD(udp, csum), .offset_words = 1, .offset_bits = 16, .size_bits = 16 } }; static const struct ib_field grh_table[] = { { STRUCT_FIELD(grh, ip_version), .offset_words = 0, .offset_bits = 0, .size_bits = 4 }, { STRUCT_FIELD(grh, traffic_class), .offset_words = 0, .offset_bits = 4, .size_bits = 8 }, { STRUCT_FIELD(grh, flow_label), .offset_words = 0, .offset_bits = 12, .size_bits = 20 }, { STRUCT_FIELD(grh, payload_length), .offset_words = 1, .offset_bits = 0, .size_bits = 16 }, { STRUCT_FIELD(grh, next_header), .offset_words = 1, .offset_bits = 16, .size_bits = 8 }, { STRUCT_FIELD(grh, hop_limit), .offset_words = 1, .offset_bits = 24, .size_bits = 8 }, { STRUCT_FIELD(grh, source_gid), .offset_words = 2, .offset_bits = 0, .size_bits = 128 }, { STRUCT_FIELD(grh, destination_gid), .offset_words = 6, .offset_bits = 0, .size_bits = 128 } }; static const struct ib_field bth_table[] = { { STRUCT_FIELD(bth, opcode), .offset_words = 0, .offset_bits = 0, .size_bits = 8 }, { STRUCT_FIELD(bth, solicited_event), .offset_words = 0, .offset_bits = 8, .size_bits = 1 }, { STRUCT_FIELD(bth, mig_req), .offset_words = 0, .offset_bits = 9, .size_bits = 1 }, { STRUCT_FIELD(bth, pad_count), .offset_words = 0, .offset_bits = 10, .size_bits = 2 }, { STRUCT_FIELD(bth, transport_header_version), .offset_words = 0, .offset_bits = 12, .size_bits = 4 }, { STRUCT_FIELD(bth, pkey), .offset_words = 0, .offset_bits = 16, .size_bits = 16 }, { RESERVED, .offset_words = 1, .offset_bits = 0, .size_bits = 8 }, { STRUCT_FIELD(bth, destination_qpn), .offset_words = 1, .offset_bits = 8, .size_bits = 24 }, { STRUCT_FIELD(bth, ack_req), .offset_words = 2, .offset_bits = 0, .size_bits = 1 }, { RESERVED, .offset_words = 2, .offset_bits = 1, .size_bits = 7 }, { STRUCT_FIELD(bth, psn), .offset_words = 2, .offset_bits = 8, .size_bits = 24 } }; static const struct ib_field deth_table[] = { { STRUCT_FIELD(deth, qkey), .offset_words = 0, .offset_bits = 0, .size_bits = 32 }, { RESERVED, .offset_words = 1, .offset_bits = 0, .size_bits = 8 }, { STRUCT_FIELD(deth, source_qpn), .offset_words = 1, .offset_bits = 8, .size_bits = 24 } }; __sum16 ib_ud_ip4_csum(struct ib_ud_header *header) { struct iphdr iph; iph.ihl = 5; iph.version = 4; iph.tos = header->ip4.tos; iph.tot_len = header->ip4.tot_len; iph.id = header->ip4.id; iph.frag_off = header->ip4.frag_off; iph.ttl = header->ip4.ttl; iph.protocol = header->ip4.protocol; iph.check = 0; iph.saddr = header->ip4.saddr; iph.daddr = header->ip4.daddr; return ip_fast_csum((u8 *)&iph, iph.ihl); } EXPORT_SYMBOL(ib_ud_ip4_csum); /** * ib_ud_header_init - Initialize UD header structure * @payload_bytes:Length of packet payload * @lrh_present: specify if LRH is present * @eth_present: specify if Eth header is present * @vlan_present: packet is tagged vlan * @grh_present: GRH flag (if non-zero, GRH will be included) * @ip_version: if non-zero, IP header, V4 or V6, will be included * @udp_present :if non-zero, UDP header will be included * @immediate_present: specify if immediate data is present * @header:Structure to initialize */ int ib_ud_header_init(int payload_bytes, int lrh_present, int eth_present, int vlan_present, int grh_present, int ip_version, int udp_present, int immediate_present, struct ib_ud_header *header) { size_t udp_bytes = udp_present ? IB_UDP_BYTES : 0; grh_present = grh_present && !ip_version; memset(header, 0, sizeof *header); /* * UDP header without IP header doesn't make sense */ if (udp_present && ip_version != 4 && ip_version != 6) return -EINVAL; if (lrh_present) { u16 packet_length; header->lrh.link_version = 0; header->lrh.link_next_header = grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; packet_length = (IB_LRH_BYTES + IB_BTH_BYTES + IB_DETH_BYTES + (grh_present ? IB_GRH_BYTES : 0) + payload_bytes + 4 + /* ICRC */ 3) / 4; /* round up */ header->lrh.packet_length = cpu_to_be16(packet_length); } if (vlan_present) header->eth.type = cpu_to_be16(ETH_P_8021Q); if (ip_version == 6 || grh_present) { header->grh.ip_version = 6; header->grh.payload_length = cpu_to_be16((udp_bytes + IB_BTH_BYTES + IB_DETH_BYTES + payload_bytes + 4 + /* ICRC */ 3) & ~3); /* round up */ header->grh.next_header = udp_present ? IPPROTO_UDP : 0x1b; } if (ip_version == 4) { header->ip4.ver = 4; /* version 4 */ header->ip4.hdr_len = 5; /* 5 words */ header->ip4.tot_len = cpu_to_be16(IB_IP4_BYTES + udp_bytes + IB_BTH_BYTES + IB_DETH_BYTES + payload_bytes + 4); /* ICRC */ header->ip4.protocol = IPPROTO_UDP; } if (udp_present && ip_version) header->udp.length = cpu_to_be16(IB_UDP_BYTES + IB_BTH_BYTES + IB_DETH_BYTES + payload_bytes + 4); /* ICRC */ if (immediate_present) header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; else header->bth.opcode = IB_OPCODE_UD_SEND_ONLY; header->bth.pad_count = (4 - payload_bytes) & 3; header->bth.transport_header_version = 0; header->lrh_present = lrh_present; header->eth_present = eth_present; header->vlan_present = vlan_present; header->grh_present = grh_present || (ip_version == 6); header->ipv4_present = ip_version == 4; header->udp_present = udp_present; header->immediate_present = immediate_present; return 0; } EXPORT_SYMBOL(ib_ud_header_init); /** * ib_ud_header_pack - Pack UD header struct into wire format * @header:UD header struct * @buf:Buffer to pack into * * ib_ud_header_pack() packs the UD header structure @header into wire * format in the buffer @buf. */ int ib_ud_header_pack(struct ib_ud_header *header, void *buf) { int len = 0; if (header->lrh_present) { ib_pack(lrh_table, ARRAY_SIZE(lrh_table), &header->lrh, buf + len); len += IB_LRH_BYTES; } if (header->eth_present) { ib_pack(eth_table, ARRAY_SIZE(eth_table), &header->eth, buf + len); len += IB_ETH_BYTES; } if (header->vlan_present) { ib_pack(vlan_table, ARRAY_SIZE(vlan_table), &header->vlan, buf + len); len += IB_VLAN_BYTES; } if (header->grh_present) { ib_pack(grh_table, ARRAY_SIZE(grh_table), &header->grh, buf + len); len += IB_GRH_BYTES; } if (header->ipv4_present) { ib_pack(ip4_table, ARRAY_SIZE(ip4_table), &header->ip4, buf + len); len += IB_IP4_BYTES; } if (header->udp_present) { ib_pack(udp_table, ARRAY_SIZE(udp_table), &header->udp, buf + len); len += IB_UDP_BYTES; } ib_pack(bth_table, ARRAY_SIZE(bth_table), &header->bth, buf + len); len += IB_BTH_BYTES; ib_pack(deth_table, ARRAY_SIZE(deth_table), &header->deth, buf + len); len += IB_DETH_BYTES; if (header->immediate_present) { memcpy(buf + len, &header->immediate_data, sizeof header->immediate_data); len += sizeof header->immediate_data; } return len; } EXPORT_SYMBOL(ib_ud_header_pack); /** * ib_ud_header_unpack - Unpack UD header struct from wire format * @header:UD header struct * @buf:Buffer to pack into * * ib_ud_header_pack() unpacks the UD header structure @header from wire * format in the buffer @buf. */ int ib_ud_header_unpack(void *buf, struct ib_ud_header *header) { ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), buf, &header->lrh); buf += IB_LRH_BYTES; if (header->lrh.link_version != 0) { pr_warn("Invalid LRH.link_version %u\n", header->lrh.link_version); return -EINVAL; } switch (header->lrh.link_next_header) { case IB_LNH_IBA_LOCAL: header->grh_present = 0; break; case IB_LNH_IBA_GLOBAL: header->grh_present = 1; ib_unpack(grh_table, ARRAY_SIZE(grh_table), buf, &header->grh); buf += IB_GRH_BYTES; if (header->grh.ip_version != 6) { pr_warn("Invalid GRH.ip_version %u\n", header->grh.ip_version); return -EINVAL; } if (header->grh.next_header != 0x1b) { pr_warn("Invalid GRH.next_header 0x%02x\n", header->grh.next_header); return -EINVAL; } break; default: pr_warn("Invalid LRH.link_next_header %u\n", header->lrh.link_next_header); return -EINVAL; } ib_unpack(bth_table, ARRAY_SIZE(bth_table), buf, &header->bth); buf += IB_BTH_BYTES; switch (header->bth.opcode) { case IB_OPCODE_UD_SEND_ONLY: header->immediate_present = 0; break; case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE: header->immediate_present = 1; break; default: pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode); return -EINVAL; } if (header->bth.transport_header_version != 0) { pr_warn("Invalid BTH.transport_header_version %u\n", header->bth.transport_header_version); return -EINVAL; } ib_unpack(deth_table, ARRAY_SIZE(deth_table), buf, &header->deth); buf += IB_DETH_BYTES; if (header->immediate_present) memcpy(&header->immediate_data, buf, sizeof header->immediate_data); return 0; } EXPORT_SYMBOL(ib_ud_header_unpack);
linux-master
drivers/infiniband/core/ud_header.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ #include <rdma/uverbs_std_types.h> #include "rdma_core.h" #include "uverbs.h" #include "core_priv.h" static int uverbs_free_qp(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_qp *qp = uobject->object; struct ib_uqp_object *uqp = container_of(uobject, struct ib_uqp_object, uevent.uobject); int ret; /* * If this is a user triggered destroy then do not allow destruction * until the user cleans up all the mcast bindings. Unlike in other * places we forcibly clean up the mcast attachments for !DESTROY * because the mcast attaches are not ubojects and will not be * destroyed by anything else during cleanup processing. */ if (why == RDMA_REMOVE_DESTROY) { if (!list_empty(&uqp->mcast_list)) return -EBUSY; } else if (qp == qp->real_qp) { ib_uverbs_detach_umcast(qp, uqp); } ret = ib_destroy_qp_user(qp, &attrs->driver_udata); if (ret) return ret; if (uqp->uxrcd) atomic_dec(&uqp->uxrcd->refcnt); ib_uverbs_release_uevent(&uqp->uevent); return 0; } static int check_creation_flags(enum ib_qp_type qp_type, u32 create_flags) { create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL; if (!create_flags || qp_type == IB_QPT_DRIVER) return 0; if (qp_type != IB_QPT_RAW_PACKET && qp_type != IB_QPT_UD) return -EINVAL; if ((create_flags & IB_UVERBS_QP_CREATE_SCATTER_FCS || create_flags & IB_UVERBS_QP_CREATE_CVLAN_STRIPPING) && qp_type != IB_QPT_RAW_PACKET) return -EINVAL; return 0; } static void set_caps(struct ib_qp_init_attr *attr, struct ib_uverbs_qp_cap *cap, bool req) { if (req) { attr->cap.max_send_wr = cap->max_send_wr; attr->cap.max_recv_wr = cap->max_recv_wr; attr->cap.max_send_sge = cap->max_send_sge; attr->cap.max_recv_sge = cap->max_recv_sge; attr->cap.max_inline_data = cap->max_inline_data; } else { cap->max_send_wr = attr->cap.max_send_wr; cap->max_recv_wr = attr->cap.max_recv_wr; cap->max_send_sge = attr->cap.max_send_sge; cap->max_recv_sge = attr->cap.max_recv_sge; cap->max_inline_data = attr->cap.max_inline_data; } } static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)( struct uverbs_attr_bundle *attrs) { struct ib_uqp_object *obj = container_of( uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_QP_HANDLE), typeof(*obj), uevent.uobject); struct ib_qp_init_attr attr = {}; struct ib_uverbs_qp_cap cap = {}; struct ib_rwq_ind_table *rwq_ind_tbl = NULL; struct ib_qp *qp; struct ib_pd *pd = NULL; struct ib_srq *srq = NULL; struct ib_cq *recv_cq = NULL; struct ib_cq *send_cq = NULL; struct ib_xrcd *xrcd = NULL; struct ib_uobject *xrcd_uobj = NULL; struct ib_device *device; u64 user_handle; int ret; ret = uverbs_copy_from_or_zero(&cap, attrs, UVERBS_ATTR_CREATE_QP_CAP); if (!ret) ret = uverbs_copy_from(&user_handle, attrs, UVERBS_ATTR_CREATE_QP_USER_HANDLE); if (!ret) ret = uverbs_get_const(&attr.qp_type, attrs, UVERBS_ATTR_CREATE_QP_TYPE); if (ret) return ret; switch (attr.qp_type) { case IB_QPT_XRC_TGT: if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) || uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE) || uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_QP_PD_HANDLE) || uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE)) return -EINVAL; xrcd_uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_QP_XRCD_HANDLE); if (IS_ERR(xrcd_uobj)) return PTR_ERR(xrcd_uobj); xrcd = (struct ib_xrcd *)xrcd_uobj->object; if (!xrcd) return -EINVAL; device = xrcd->device; break; case IB_UVERBS_QPT_RAW_PACKET: if (!capable(CAP_NET_RAW)) return -EPERM; fallthrough; case IB_UVERBS_QPT_RC: case IB_UVERBS_QPT_UC: case IB_UVERBS_QPT_UD: case IB_UVERBS_QPT_XRC_INI: case IB_UVERBS_QPT_DRIVER: if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_QP_XRCD_HANDLE) || (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_QP_SRQ_HANDLE) && attr.qp_type == IB_QPT_XRC_INI)) return -EINVAL; pd = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_QP_PD_HANDLE); if (IS_ERR(pd)) return PTR_ERR(pd); rwq_ind_tbl = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE); if (!IS_ERR(rwq_ind_tbl)) { if (cap.max_recv_wr || cap.max_recv_sge || uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) || uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_QP_SRQ_HANDLE)) return -EINVAL; /* send_cq is optional */ if (cap.max_send_wr) { send_cq = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE); if (IS_ERR(send_cq)) return PTR_ERR(send_cq); } attr.rwq_ind_tbl = rwq_ind_tbl; } else { send_cq = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE); if (IS_ERR(send_cq)) return PTR_ERR(send_cq); if (attr.qp_type != IB_QPT_XRC_INI) { recv_cq = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE); if (IS_ERR(recv_cq)) return PTR_ERR(recv_cq); } } device = pd->device; break; default: return -EINVAL; } ret = uverbs_get_flags32(&attr.create_flags, attrs, UVERBS_ATTR_CREATE_QP_FLAGS, IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | IB_UVERBS_QP_CREATE_SCATTER_FCS | IB_UVERBS_QP_CREATE_CVLAN_STRIPPING | IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING | IB_UVERBS_QP_CREATE_SQ_SIG_ALL); if (ret) return ret; ret = check_creation_flags(attr.qp_type, attr.create_flags); if (ret) return ret; if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_QP_SOURCE_QPN)) { ret = uverbs_copy_from(&attr.source_qpn, attrs, UVERBS_ATTR_CREATE_QP_SOURCE_QPN); if (ret) return ret; attr.create_flags |= IB_QP_CREATE_SOURCE_QPN; } srq = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_QP_SRQ_HANDLE); if (!IS_ERR(srq)) { if ((srq->srq_type == IB_SRQT_XRC && attr.qp_type != IB_QPT_XRC_TGT) || (srq->srq_type != IB_SRQT_XRC && attr.qp_type == IB_QPT_XRC_TGT)) return -EINVAL; attr.srq = srq; } obj->uevent.event_file = ib_uverbs_get_async_event(attrs, UVERBS_ATTR_CREATE_QP_EVENT_FD); INIT_LIST_HEAD(&obj->uevent.event_list); INIT_LIST_HEAD(&obj->mcast_list); obj->uevent.uobject.user_handle = user_handle; attr.event_handler = ib_uverbs_qp_event_handler; attr.send_cq = send_cq; attr.recv_cq = recv_cq; attr.xrcd = xrcd; if (attr.create_flags & IB_UVERBS_QP_CREATE_SQ_SIG_ALL) { /* This creation bit is uverbs one, need to mask before * calling drivers. It was added to prevent an extra user attr * only for that when using ioctl. */ attr.create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL; attr.sq_sig_type = IB_SIGNAL_ALL_WR; } else { attr.sq_sig_type = IB_SIGNAL_REQ_WR; } set_caps(&attr, &cap, true); mutex_init(&obj->mcast_lock); qp = ib_create_qp_user(device, pd, &attr, &attrs->driver_udata, obj, KBUILD_MODNAME); if (IS_ERR(qp)) { ret = PTR_ERR(qp); goto err_put; } ib_qp_usecnt_inc(qp); if (attr.qp_type == IB_QPT_XRC_TGT) { obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); atomic_inc(&obj->uxrcd->refcnt); } obj->uevent.uobject.object = qp; uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_QP_HANDLE); set_caps(&attr, &cap, false); ret = uverbs_copy_to_struct_or_zero(attrs, UVERBS_ATTR_CREATE_QP_RESP_CAP, &cap, sizeof(cap)); if (ret) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_QP_RESP_QP_NUM, &qp->qp_num, sizeof(qp->qp_num)); return ret; err_put: if (obj->uevent.event_file) uverbs_uobject_put(&obj->uevent.event_file->uobj); return ret; }; DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_QP_CREATE, UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_HANDLE, UVERBS_OBJECT_QP, UVERBS_ACCESS_NEW, UA_MANDATORY), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_XRCD_HANDLE, UVERBS_OBJECT_XRCD, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_PD_HANDLE, UVERBS_OBJECT_PD, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SRQ_HANDLE, UVERBS_OBJECT_SRQ, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE, UVERBS_OBJECT_RWQ_IND_TBL, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_USER_HANDLE, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_CAP, UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap, max_inline_data), UA_MANDATORY), UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_QP_TYPE, enum ib_uverbs_qp_type, UA_MANDATORY), UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_QP_FLAGS, enum ib_uverbs_qp_create_flags, UA_OPTIONAL), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_SOURCE_QPN, UVERBS_ATTR_TYPE(u32), UA_OPTIONAL), UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_QP_EVENT_FD, UVERBS_OBJECT_ASYNC_EVENT, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_CAP, UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap, max_inline_data), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_QP_NUM, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_UHW()); static int UVERBS_HANDLER(UVERBS_METHOD_QP_DESTROY)( struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_QP_HANDLE); struct ib_uqp_object *obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); struct ib_uverbs_destroy_qp_resp resp = { .events_reported = obj->uevent.events_reported }; return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_QP_RESP, &resp, sizeof(resp)); } DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_QP_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_QP_HANDLE, UVERBS_OBJECT_QP, UVERBS_ACCESS_DESTROY, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_QP_RESP, UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_qp_resp), UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_QP, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp), &UVERBS_METHOD(UVERBS_METHOD_QP_CREATE), &UVERBS_METHOD(UVERBS_METHOD_QP_DESTROY)); const struct uapi_definition uverbs_def_obj_qp[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP, UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)), {} };
linux-master
drivers/infiniband/core/uverbs_std_types_qp.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ #include <rdma/uverbs_std_types.h> #include "rdma_core.h" #include "uverbs.h" static int uverbs_free_wq(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_wq *wq = uobject->object; struct ib_uwq_object *uwq = container_of(uobject, struct ib_uwq_object, uevent.uobject); int ret; ret = ib_destroy_wq_user(wq, &attrs->driver_udata); if (ret) return ret; ib_uverbs_release_uevent(&uwq->uevent); return 0; } static int UVERBS_HANDLER(UVERBS_METHOD_WQ_CREATE)( struct uverbs_attr_bundle *attrs) { struct ib_uwq_object *obj = container_of( uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_WQ_HANDLE), typeof(*obj), uevent.uobject); struct ib_pd *pd = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_WQ_PD_HANDLE); struct ib_cq *cq = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_WQ_CQ_HANDLE); struct ib_wq_init_attr wq_init_attr = {}; struct ib_wq *wq; u64 user_handle; int ret; ret = uverbs_get_flags32(&wq_init_attr.create_flags, attrs, UVERBS_ATTR_CREATE_WQ_FLAGS, IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING | IB_UVERBS_WQ_FLAGS_SCATTER_FCS | IB_UVERBS_WQ_FLAGS_DELAY_DROP | IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING); if (!ret) ret = uverbs_copy_from(&wq_init_attr.max_sge, attrs, UVERBS_ATTR_CREATE_WQ_MAX_SGE); if (!ret) ret = uverbs_copy_from(&wq_init_attr.max_wr, attrs, UVERBS_ATTR_CREATE_WQ_MAX_WR); if (!ret) ret = uverbs_copy_from(&user_handle, attrs, UVERBS_ATTR_CREATE_WQ_USER_HANDLE); if (!ret) ret = uverbs_get_const(&wq_init_attr.wq_type, attrs, UVERBS_ATTR_CREATE_WQ_TYPE); if (ret) return ret; if (wq_init_attr.wq_type != IB_WQT_RQ) return -EINVAL; obj->uevent.event_file = ib_uverbs_get_async_event(attrs, UVERBS_ATTR_CREATE_WQ_EVENT_FD); obj->uevent.uobject.user_handle = user_handle; INIT_LIST_HEAD(&obj->uevent.event_list); wq_init_attr.event_handler = ib_uverbs_wq_event_handler; wq_init_attr.wq_context = attrs->ufile; wq_init_attr.cq = cq; wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata); if (IS_ERR(wq)) { ret = PTR_ERR(wq); goto err; } obj->uevent.uobject.object = wq; wq->wq_type = wq_init_attr.wq_type; wq->cq = cq; wq->pd = pd; wq->device = pd->device; wq->wq_context = wq_init_attr.wq_context; atomic_set(&wq->usecnt, 0); atomic_inc(&pd->usecnt); atomic_inc(&cq->usecnt); wq->uobject = obj; uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_WQ_HANDLE); ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR, &wq_init_attr.max_wr, sizeof(wq_init_attr.max_wr)); if (ret) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE, &wq_init_attr.max_sge, sizeof(wq_init_attr.max_sge)); if (ret) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM, &wq->wq_num, sizeof(wq->wq_num)); return ret; err: if (obj->uevent.event_file) uverbs_uobject_put(&obj->uevent.event_file->uobj); return ret; }; DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_WQ_CREATE, UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_HANDLE, UVERBS_OBJECT_WQ, UVERBS_ACCESS_NEW, UA_MANDATORY), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_PD_HANDLE, UVERBS_OBJECT_PD, UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_WQ_TYPE, enum ib_wq_type, UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_USER_HANDLE, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_MAX_WR, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_MAX_SGE, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_WQ_FLAGS, enum ib_uverbs_wq_flags, UA_MANDATORY), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_WQ_EVENT_FD, UVERBS_OBJECT_ASYNC_EVENT, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM, UVERBS_ATTR_TYPE(u32), UA_OPTIONAL), UVERBS_ATTR_UHW()); static int UVERBS_HANDLER(UVERBS_METHOD_WQ_DESTROY)( struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_WQ_HANDLE); struct ib_uwq_object *obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_WQ_RESP, &obj->uevent.events_reported, sizeof(obj->uevent.events_reported)); } DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_WQ_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_WQ_HANDLE, UVERBS_OBJECT_WQ, UVERBS_ACCESS_DESTROY, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_WQ_RESP, UVERBS_ATTR_TYPE(u32), UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_WQ, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq), &UVERBS_METHOD(UVERBS_METHOD_WQ_CREATE), &UVERBS_METHOD(UVERBS_METHOD_WQ_DESTROY) ); const struct uapi_definition uverbs_def_obj_wq[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ, UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)), {} };
linux-master
drivers/infiniband/core/uverbs_std_types_wq.c
/* * Copyright (c) 2017 Mellanox Technologies. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/pid.h> #include <linux/pid_namespace.h> #include <linux/mutex.h> #include <net/netlink.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_netlink.h> #include "core_priv.h" #include "cma_priv.h" #include "restrack.h" #include "uverbs.h" typedef int (*res_fill_func_t)(struct sk_buff*, bool, struct rdma_restrack_entry*, uint32_t); /* * Sort array elements by the netlink attribute name */ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE }, [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, .len = IB_DEVICE_NAME_MAX }, [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 }, [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 }, [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ }, [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ }, [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_DST_ADDR] = { .len = sizeof(struct __kernel_sockaddr_storage) }, [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY }, [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = { .len = sizeof(struct __kernel_sockaddr_storage) }, [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 }, [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 }, [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 }, [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 }, }; static int put_driver_name_print_type(struct sk_buff *msg, const char *name, enum rdma_nldev_print_type print_type) { if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) return -EMSGSIZE; if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC && nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) return -EMSGSIZE; return 0; } static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, enum rdma_nldev_print_type print_type, u32 value) { if (put_driver_name_print_type(msg, name, print_type)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) return -EMSGSIZE; return 0; } static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, enum rdma_nldev_print_type print_type, u64 value) { if (put_driver_name_print_type(msg, name, print_type)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; return 0; } int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name, const char *str) { if (put_driver_name_print_type(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC)) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str)) return -EMSGSIZE; return 0; } EXPORT_SYMBOL(rdma_nl_put_driver_string); int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) { return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u32); int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, u32 value) { return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex); int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) { return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u64); int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) { return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex); static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, dev_name(&device->dev))) return -EMSGSIZE; return 0; } static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) { char fw[IB_FW_VERSION_NAME_MAX]; int ret = 0; u32 port; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) return -EMSGSIZE; BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64)); if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, device->attrs.device_cap_flags, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; ib_get_device_fw_str(device, fw); /* Device without FW has strlen(fw) = 0 */ if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, be64_to_cpu(device->node_guid), RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, be64_to_cpu(device->attrs.sys_image_guid), RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim)) return -EMSGSIZE; /* * Link type is determined on first port and mlx4 device * which can potentially have two different link type for the same * IB device is considered as better to be avoided in the future, */ port = rdma_start_port(device); if (rdma_cap_opa_mad(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); else if (rdma_protocol_ib(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); else if (rdma_protocol_iwarp(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); else if (rdma_protocol_roce(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); else if (rdma_protocol_usnic(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "usnic"); return ret; } static int fill_port_info(struct sk_buff *msg, struct ib_device *device, u32 port, const struct net *net) { struct net_device *netdev = NULL; struct ib_port_attr attr; int ret; u64 cap_flags = 0; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) return -EMSGSIZE; ret = ib_query_port(device, port, &attr); if (ret) return ret; if (rdma_protocol_ib(device, port)) { BUILD_BUG_ON((sizeof(attr.port_cap_flags) + sizeof(attr.port_cap_flags2)) > sizeof(u64)); cap_flags = attr.port_cap_flags | ((u64)attr.port_cap_flags2 << 32); if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, cap_flags, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) return -EMSGSIZE; } if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) return -EMSGSIZE; netdev = ib_device_get_netdev(device, port); if (netdev && net_eq(dev_net(netdev), net)) { ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); if (ret) goto out; ret = nla_put_string(msg, RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); } out: if (netdev) dev_put(netdev); return ret; } static int fill_res_info_entry(struct sk_buff *msg, const char *name, u64 curr) { struct nlattr *entry_attr; entry_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); if (!entry_attr) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) goto err; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, RDMA_NLDEV_ATTR_PAD)) goto err; nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } static int fill_res_info(struct sk_buff *msg, struct ib_device *device) { static const char * const names[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_PD] = "pd", [RDMA_RESTRACK_CQ] = "cq", [RDMA_RESTRACK_QP] = "qp", [RDMA_RESTRACK_CM_ID] = "cm_id", [RDMA_RESTRACK_MR] = "mr", [RDMA_RESTRACK_CTX] = "ctx", [RDMA_RESTRACK_SRQ] = "srq", }; struct nlattr *table_attr; int ret, i, curr; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); if (!table_attr) return -EMSGSIZE; for (i = 0; i < RDMA_RESTRACK_MAX; i++) { if (!names[i]) continue; curr = rdma_restrack_count(device, i); ret = fill_res_info_entry(msg, names[i], curr); if (ret) goto err; } nla_nest_end(msg, table_attr); return 0; err: nla_nest_cancel(msg, table_attr); return ret; } static int fill_res_name_pid(struct sk_buff *msg, struct rdma_restrack_entry *res) { int err = 0; /* * For user resources, user is should read /proc/PID/comm to get the * name of the task file. */ if (rdma_is_kernel_res(res)) { err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, res->kern_name); } else { pid_t pid; pid = task_pid_vnr(res->task); /* * Task is dead and in zombie state. * There is no need to print PID anymore. */ if (pid) /* * This part is racy, task can be killed and PID will * be zero right here but it is ok, next query won't * return PID. We don't promise real-time reflection * of SW objects. */ err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid); } return err ? -EMSGSIZE : 0; } static int fill_res_qp_entry_query(struct sk_buff *msg, struct rdma_restrack_entry *res, struct ib_device *dev, struct ib_qp *qp) { struct ib_qp_init_attr qp_init_attr; struct ib_qp_attr qp_attr; int ret; ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr); if (ret) return ret; if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, qp_attr.dest_qp_num)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, qp_attr.rq_psn)) goto err; } if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) goto err; if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC || qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) { if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, qp_attr.path_mig_state)) goto err; } if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) goto err; if (dev->ops.fill_res_qp_entry) return dev->ops.fill_res_qp_entry(msg, qp); return 0; err: return -EMSGSIZE; } static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_qp *qp = container_of(res, struct ib_qp, res); struct ib_device *dev = qp->device; int ret; if (port && port != qp->port) return -EAGAIN; /* In create_qp() port is not set yet */ if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) return -EMSGSIZE; ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); if (ret) return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) return -EMSGSIZE; ret = fill_res_name_pid(msg, res); if (ret) return -EMSGSIZE; return fill_res_qp_entry_query(msg, res, dev, qp); } static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_qp *qp = container_of(res, struct ib_qp, res); struct ib_device *dev = qp->device; if (port && port != qp->port) return -EAGAIN; if (!dev->ops.fill_res_qp_entry_raw) return -EINVAL; return dev->ops.fill_res_qp_entry_raw(msg, qp); } static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct rdma_id_private *id_priv = container_of(res, struct rdma_id_private, res); struct ib_device *dev = id_priv->id.device; struct rdma_cm_id *cm_id = &id_priv->id; if (port && port != cm_id->port_num) return -EAGAIN; if (cm_id->port_num && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) goto err; if (id_priv->qp_num) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) goto err; } if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) goto err; if (cm_id->route.addr.src_addr.ss_family && nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, sizeof(cm_id->route.addr.src_addr), &cm_id->route.addr.src_addr)) goto err; if (cm_id->route.addr.dst_addr.ss_family && nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, sizeof(cm_id->route.addr.dst_addr), &cm_id->route.addr.dst_addr)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) goto err; if (fill_res_name_pid(msg, res)) goto err; if (dev->ops.fill_res_cm_id_entry) return dev->ops.fill_res_cm_id_entry(msg, cm_id); return 0; err: return -EMSGSIZE; } static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_cq *cq = container_of(res, struct ib_cq, res); struct ib_device *dev = cq->device; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; /* Poll context is only valid for kernel CQs */ if (rdma_is_kernel_res(res) && nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, cq->uobject->uevent.uobject.context->res.id)) return -EMSGSIZE; if (fill_res_name_pid(msg, res)) return -EMSGSIZE; return (dev->ops.fill_res_cq_entry) ? dev->ops.fill_res_cq_entry(msg, cq) : 0; } static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_cq *cq = container_of(res, struct ib_cq, res); struct ib_device *dev = cq->device; if (!dev->ops.fill_res_cq_entry_raw) return -EINVAL; return dev->ops.fill_res_cq_entry_raw(msg, cq); } static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_mr *mr = container_of(res, struct ib_mr, res); struct ib_device *dev = mr->pd->device; if (has_cap_net_admin) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) return -EMSGSIZE; } if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) return -EMSGSIZE; if (fill_res_name_pid(msg, res)) return -EMSGSIZE; return (dev->ops.fill_res_mr_entry) ? dev->ops.fill_res_mr_entry(msg, mr) : 0; } static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_mr *mr = container_of(res, struct ib_mr, res); struct ib_device *dev = mr->pd->device; if (!dev->ops.fill_res_mr_entry_raw) return -EINVAL; return dev->ops.fill_res_mr_entry_raw(msg, mr); } static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_pd *pd = container_of(res, struct ib_pd, res); if (has_cap_net_admin) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, pd->local_dma_lkey)) goto err; if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, pd->unsafe_global_rkey)) goto err; } if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) goto err; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, pd->uobject->context->res.id)) goto err; return fill_res_name_pid(msg, res); err: return -EMSGSIZE; } static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res); if (rdma_is_kernel_res(res)) return 0; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id)) return -EMSGSIZE; return fill_res_name_pid(msg, res); } static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range, uint32_t max_range) { struct nlattr *entry_attr; if (!min_range) return 0; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); if (!entry_attr) return -EMSGSIZE; if (min_range == max_range) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range)) goto err; } else { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range)) goto err; } nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq) { uint32_t min_range = 0, prev = 0; struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; struct nlattr *table_attr; struct ib_qp *qp = NULL; unsigned long id = 0; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); if (!table_attr) return -EMSGSIZE; rt = &srq->device->res[RDMA_RESTRACK_QP]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { if (!rdma_restrack_get(res)) continue; qp = container_of(res, struct ib_qp, res); if (!qp->srq || (qp->srq->res.id != srq->res.id)) { rdma_restrack_put(res); continue; } if (qp->qp_num < prev) /* qp_num should be ascending */ goto err_loop; if (min_range == 0) { min_range = qp->qp_num; } else if (qp->qp_num > (prev + 1)) { if (fill_res_range_qp_entry(msg, min_range, prev)) goto err_loop; min_range = qp->qp_num; } prev = qp->qp_num; rdma_restrack_put(res); } xa_unlock(&rt->xa); if (fill_res_range_qp_entry(msg, min_range, prev)) goto err; nla_nest_end(msg, table_attr); return 0; err_loop: rdma_restrack_put(res); xa_unlock(&rt->xa); err: nla_nest_cancel(msg, table_attr); return -EMSGSIZE; } static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_srq *srq = container_of(res, struct ib_srq, res); if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id)) goto err; if (ib_srq_has_cq(srq->srq_type)) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, srq->ext.cq->res.id)) goto err; } if (fill_res_srq_qps(msg, srq)) goto err; return fill_res_name_pid(msg, res); err: return -EMSGSIZE; } static int fill_stat_counter_mode(struct sk_buff *msg, struct rdma_counter *counter) { struct rdma_counter_mode *m = &counter->mode; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) return -EMSGSIZE; if (m->mode == RDMA_COUNTER_MODE_AUTO) { if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) && nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) return -EMSGSIZE; if ((m->mask & RDMA_COUNTER_MASK_PID) && fill_res_name_pid(msg, &counter->res)) return -EMSGSIZE; } return 0; } static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn) { struct nlattr *entry_attr; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); if (!entry_attr) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) goto err; nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } static int fill_stat_counter_qps(struct sk_buff *msg, struct rdma_counter *counter) { struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; struct nlattr *table_attr; struct ib_qp *qp = NULL; unsigned long id = 0; int ret = 0; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); if (!table_attr) return -EMSGSIZE; rt = &counter->device->res[RDMA_RESTRACK_QP]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { qp = container_of(res, struct ib_qp, res); if (!qp->counter || (qp->counter->id != counter->id)) continue; ret = fill_stat_counter_qp_entry(msg, qp->qp_num); if (ret) goto err; } xa_unlock(&rt->xa); nla_nest_end(msg, table_attr); return 0; err: xa_unlock(&rt->xa); nla_nest_cancel(msg, table_attr); return ret; } int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name, u64 value) { struct nlattr *entry_attr; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); if (!entry_attr) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, name)) goto err; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, value, RDMA_NLDEV_ATTR_PAD)) goto err; nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry); static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_mr *mr = container_of(res, struct ib_mr, res); struct ib_device *dev = mr->pd->device; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) goto err; if (dev->ops.fill_stat_mr_entry) return dev->ops.fill_stat_mr_entry(msg, mr); return 0; err: return -EMSGSIZE; } static int fill_stat_counter_hwcounters(struct sk_buff *msg, struct rdma_counter *counter) { struct rdma_hw_stats *st = counter->stats; struct nlattr *table_attr; int i; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); if (!table_attr) return -EMSGSIZE; mutex_lock(&st->lock); for (i = 0; i < st->num_counters; i++) { if (test_bit(i, st->is_disabled)) continue; if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name, st->value[i])) goto err; } mutex_unlock(&st->lock); nla_nest_end(msg, table_attr); return 0; err: mutex_unlock(&st->lock); nla_nest_cancel(msg, table_attr); return -EMSGSIZE; } static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct rdma_counter *counter = container_of(res, struct rdma_counter, res); if (port && port != counter->port) return -EAGAIN; /* Dump it even query failed */ rdma_counter_query_stats(counter); if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || fill_stat_counter_mode(msg, counter) || fill_stat_counter_qps(msg, counter) || fill_stat_counter_hwcounters(msg, counter)) return -EMSGSIZE; return 0; } static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index; int err; err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); if (!nlh) { err = -EMSGSIZE; goto err_free; } err = fill_dev_info(msg, device); if (err) goto err_free; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: ib_device_put(device); return err; } static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; u32 index; int err; err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) { char name[IB_DEVICE_NAME_MAX] = {}; nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], IB_DEVICE_NAME_MAX); if (strlen(name) == 0) { err = -EINVAL; goto done; } err = ib_device_rename(device, name); goto done; } if (tb[RDMA_NLDEV_NET_NS_FD]) { u32 ns_fd; ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]); err = ib_device_set_netns_put(skb, device, ns_fd); goto put_done; } if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) { u8 use_dim; use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]); err = ib_device_set_dim(device, use_dim); goto done; } done: ib_device_put(device); put_done: return err; } static int _nldev_get_dumpit(struct ib_device *device, struct sk_buff *skb, struct netlink_callback *cb, unsigned int idx) { int start = cb->args[0]; struct nlmsghdr *nlh; if (idx < start) return 0; nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, NLM_F_MULTI); if (!nlh || fill_dev_info(skb, device)) { nlmsg_cancel(skb, nlh); goto out; } nlmsg_end(skb, nlh); idx++; out: cb->args[0] = idx; return skb->len; } static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { /* * There is no need to take lock, because * we are relying on ib_core's locking. */ return ib_enum_all_devs(_nldev_get_dumpit, skb, cb); } static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index; u32 port; int err; err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { err = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); if (!nlh) { err = -EMSGSIZE; goto err_free; } err = fill_port_info(msg, device, port, sock_net(skb->sk)); if (err) goto err_free; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: ib_device_put(device); return err; } static int nldev_port_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; int start = cb->args[0]; struct nlmsghdr *nlh; u32 idx = 0; u32 ifindex; int err; unsigned int p; err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NULL); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), ifindex); if (!device) return -EINVAL; rdma_for_each_port (device, p) { /* * The dumpit function returns all information from specific * index. This specific index is taken from the netlink * messages request sent by user and it is available * in cb->args[0]. * * Usually, the user doesn't fill this field and it causes * to return everything. * */ if (idx < start) { idx++; continue; } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_PORT_GET), 0, NLM_F_MULTI); if (!nlh || fill_port_info(skb, device, p, sock_net(skb->sk))) { nlmsg_cancel(skb, nlh); goto out; } idx++; nlmsg_end(skb, nlh); } out: ib_device_put(device); cb->args[0] = idx; return skb->len; } static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index; int ret; ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 0, 0); if (!nlh) { ret = -EMSGSIZE; goto err_free; } ret = fill_res_info(msg, device); if (ret) goto err_free; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int _nldev_res_get_dumpit(struct ib_device *device, struct sk_buff *skb, struct netlink_callback *cb, unsigned int idx) { int start = cb->args[0]; struct nlmsghdr *nlh; if (idx < start) return 0; nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 0, NLM_F_MULTI); if (!nlh || fill_res_info(skb, device)) { nlmsg_cancel(skb, nlh); goto out; } nlmsg_end(skb, nlh); idx++; out: cb->args[0] = idx; return skb->len; } static int nldev_res_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb); } struct nldev_fill_res_entry { enum rdma_nldev_attr nldev_attr; u8 flags; u32 entry; u32 id; }; enum nldev_res_flags { NLDEV_PER_DEV = 1 << 0, }; static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_QP] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY, .id = RDMA_NLDEV_ATTR_RES_LQPN, }, [RDMA_RESTRACK_CM_ID] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CM_IDN, }, [RDMA_RESTRACK_CQ] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CQN, }, [RDMA_RESTRACK_MR] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY, .id = RDMA_NLDEV_ATTR_RES_MRN, }, [RDMA_RESTRACK_PD] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY, .id = RDMA_NLDEV_ATTR_RES_PDN, }, [RDMA_RESTRACK_COUNTER] = { .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER, .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY, .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID, }, [RDMA_RESTRACK_CTX] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CTXN, }, [RDMA_RESTRACK_SRQ] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY, .id = RDMA_NLDEV_ATTR_RES_SRQN, }, }; static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, enum rdma_restrack_type res_type, res_fill_func_t fill_func) { const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct rdma_restrack_entry *res; struct ib_device *device; u32 index, id, port = 0; bool has_cap_net_admin; struct sk_buff *msg; int ret; ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } } if ((port && fe->flags & NLDEV_PER_DEV) || (!port && ~fe->flags & NLDEV_PER_DEV)) { ret = -EINVAL; goto err; } id = nla_get_u32(tb[fe->id]); res = rdma_restrack_get_byid(device, res_type, id); if (IS_ERR(res)) { ret = PTR_ERR(res); goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err_get; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NL_GET_OP(nlh->nlmsg_type)), 0, 0); if (!nlh || fill_nldev_handle(msg, device)) { ret = -EMSGSIZE; goto err_free; } has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN); ret = fill_func(msg, has_cap_net_admin, res, port); if (ret) goto err_free; rdma_restrack_put(res); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err_get: rdma_restrack_put(res); err: ib_device_put(device); return ret; } static int res_get_common_dumpit(struct sk_buff *skb, struct netlink_callback *cb, enum rdma_restrack_type res_type, res_fill_func_t fill_func) { const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; int err, ret = 0, idx = 0; struct nlattr *table_attr; struct nlattr *entry_attr; struct ib_device *device; int start = cb->args[0]; bool has_cap_net_admin; struct nlmsghdr *nlh; unsigned long id; u32 index, port = 0; bool filled = false; err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NULL); /* * Right now, we are expecting the device index to get res information, * but it is possible to extend this code to return all devices in * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX. * if it doesn't exist, we will iterate over all devices. * * But it is not needed for now. */ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; /* * If no PORT_INDEX is supplied, we will return all QPs from that device */ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err_index; } } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NL_GET_OP(cb->nlh->nlmsg_type)), 0, NLM_F_MULTI); if (!nlh || fill_nldev_handle(skb, device)) { ret = -EMSGSIZE; goto err; } table_attr = nla_nest_start_noflag(skb, fe->nldev_attr); if (!table_attr) { ret = -EMSGSIZE; goto err; } has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN); rt = &device->res[res_type]; xa_lock(&rt->xa); /* * FIXME: if the skip ahead is something common this loop should * use xas_for_each & xas_pause to optimize, we can have a lot of * objects. */ xa_for_each(&rt->xa, id, res) { if (idx < start || !rdma_restrack_get(res)) goto next; xa_unlock(&rt->xa); filled = true; entry_attr = nla_nest_start_noflag(skb, fe->entry); if (!entry_attr) { ret = -EMSGSIZE; rdma_restrack_put(res); goto msg_full; } ret = fill_func(skb, has_cap_net_admin, res, port); rdma_restrack_put(res); if (ret) { nla_nest_cancel(skb, entry_attr); if (ret == -EMSGSIZE) goto msg_full; if (ret == -EAGAIN) goto again; goto res_err; } nla_nest_end(skb, entry_attr); again: xa_lock(&rt->xa); next: idx++; } xa_unlock(&rt->xa); msg_full: nla_nest_end(skb, table_attr); nlmsg_end(skb, nlh); cb->args[0] = idx; /* * No more entries to fill, cancel the message and * return 0 to mark end of dumpit. */ if (!filled) goto err; ib_device_put(device); return skb->len; res_err: nla_nest_cancel(skb, table_attr); err: nlmsg_cancel(skb, nlh); err_index: ib_device_put(device); return ret; } #define RES_GET_FUNCS(name, type) \ static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ struct netlink_callback *cb) \ { \ return res_get_common_dumpit(skb, cb, type, \ fill_res_##name##_entry); \ } \ static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ struct nlmsghdr *nlh, \ struct netlink_ext_ack *extack) \ { \ return res_get_common_doit(skb, nlh, extack, type, \ fill_res_##name##_entry); \ } RES_GET_FUNCS(qp, RDMA_RESTRACK_QP); RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP); RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID); RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ); RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ); RES_GET_FUNCS(pd, RDMA_RESTRACK_PD); RES_GET_FUNCS(mr, RDMA_RESTRACK_MR); RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR); RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER); RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX); RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ); static LIST_HEAD(link_ops); static DECLARE_RWSEM(link_ops_rwsem); static const struct rdma_link_ops *link_ops_get(const char *type) { const struct rdma_link_ops *ops; list_for_each_entry(ops, &link_ops, list) { if (!strcmp(ops->type, type)) goto out; } ops = NULL; out: return ops; } void rdma_link_register(struct rdma_link_ops *ops) { down_write(&link_ops_rwsem); if (WARN_ON_ONCE(link_ops_get(ops->type))) goto out; list_add(&ops->list, &link_ops); out: up_write(&link_ops_rwsem); } EXPORT_SYMBOL(rdma_link_register); void rdma_link_unregister(struct rdma_link_ops *ops) { down_write(&link_ops_rwsem); list_del(&ops->list); up_write(&link_ops_rwsem); } EXPORT_SYMBOL(rdma_link_unregister); static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; char ibdev_name[IB_DEVICE_NAME_MAX]; const struct rdma_link_ops *ops; char ndev_name[IFNAMSIZ]; struct net_device *ndev; char type[IFNAMSIZ]; int err; err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME]) return -EINVAL; nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME], sizeof(ibdev_name)); if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0) return -EINVAL; nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type)); nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME], sizeof(ndev_name)); ndev = dev_get_by_name(sock_net(skb->sk), ndev_name); if (!ndev) return -ENODEV; down_read(&link_ops_rwsem); ops = link_ops_get(type); #ifdef CONFIG_MODULES if (!ops) { up_read(&link_ops_rwsem); request_module("rdma-link-%s", type); down_read(&link_ops_rwsem); ops = link_ops_get(type); } #endif err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL; up_read(&link_ops_rwsem); dev_put(ndev); return err; } static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; u32 index; int err; err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) { ib_device_put(device); return -EINVAL; } ib_unregister_device_and_put(device); return 0; } static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE]; struct ib_client_nl_info data = {}; struct ib_device *ibdev = NULL; struct sk_buff *msg; u32 index; int err; err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE]) return -EINVAL; nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE], sizeof(client_name)); if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) { index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); ibdev = ib_device_get_by_index(sock_net(skb->sk), index); if (!ibdev) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(ibdev, data.port)) { err = -EINVAL; goto out_put; } } else { data.port = -1; } } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { return -EINVAL; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto out_put; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET_CHARDEV), 0, 0); if (!nlh) { err = -EMSGSIZE; goto out_nlmsg; } data.nl_msg = msg; err = ib_get_client_nl_info(ibdev, client_name, &data); if (err) goto out_nlmsg; err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV, huge_encode_dev(data.cdev->devt), RDMA_NLDEV_ATTR_PAD); if (err) goto out_data; err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi, RDMA_NLDEV_ATTR_PAD); if (err) goto out_data; if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME, dev_name(data.cdev))) { err = -EMSGSIZE; goto out_data; } nlmsg_end(msg, nlh); put_device(data.cdev); if (ibdev) ib_device_put(ibdev); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); out_data: put_device(data.cdev); out_nlmsg: nlmsg_free(msg); out_put: if (ibdev) ib_device_put(ibdev); return err; } static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct sk_buff *msg; int err; err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_SYS_GET), 0, 0); if (!nlh) { nlmsg_free(msg); return -EMSGSIZE; } err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, (u8)ib_devices_shared_netns); if (err) { nlmsg_free(msg); return err; } /* * Copy-on-fork is supported. * See commits: * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes") * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm") * for more details. Don't backport this without them. * * Return value ignored on purpose, assume copy-on-fork is not * supported in case of failure. */ nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1); nlmsg_end(msg, nlh); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); } static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; u8 enable; int err; err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]) return -EINVAL; enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]); /* Only 0 and 1 are supported */ if (enable > 1) return -EINVAL; err = rdma_compatdev_set(enable); return err; } static int nldev_stat_set_mode_doit(struct sk_buff *msg, struct netlink_ext_ack *extack, struct nlattr *tb[], struct ib_device *device, u32 port) { u32 mode, mask = 0, qpn, cntn = 0; int ret; /* Currently only counter for QP is supported */ if (!tb[RDMA_NLDEV_ATTR_STAT_RES] || nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) return -EINVAL; mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); if (mode == RDMA_COUNTER_MODE_AUTO) { if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]) mask = nla_get_u32( tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]); return rdma_counter_set_auto_mode(device, port, mask, extack); } if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) return -EINVAL; qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) { cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); ret = rdma_counter_bind_qpn(device, port, qpn, cntn); if (ret) return ret; } else { ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn); if (ret) return ret; } if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { ret = -EMSGSIZE; goto err_fill; } return 0; err_fill: rdma_counter_unbind_qpn(device, port, qpn, cntn); return ret; } static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[], struct ib_device *device, u32 port) { struct rdma_hw_stats *stats; struct nlattr *entry_attr; unsigned long *target; int rem, i, ret = 0; u32 index; stats = ib_get_hw_stats_port(device, port); if (!stats) return -EINVAL; target = kcalloc(BITS_TO_LONGS(stats->num_counters), sizeof(*stats->is_disabled), GFP_KERNEL); if (!target) return -ENOMEM; nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS], rem) { index = nla_get_u32(entry_attr); if ((index >= stats->num_counters) || !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) { ret = -EINVAL; goto out; } set_bit(index, target); } for (i = 0; i < stats->num_counters; i++) { if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL)) continue; ret = rdma_counter_modify(device, port, i, test_bit(i, target)); if (ret) goto out; } out: kfree(target); return ret; } static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index, port; int ret; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err_put_device; } if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] && !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { ret = -EINVAL; goto err_put_device; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err_put_device; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_SET), 0, 0); if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { ret = -EMSGSIZE; goto err_free_msg; } if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) { ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port); if (ret) goto err_free_msg; } if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { ret = nldev_stat_set_counter_dynamic_doit(tb, device, port); if (ret) goto err_free_msg; } nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free_msg: nlmsg_free(msg); err_put_device: ib_device_put(device); return ret; } static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index, port, qpn, cntn; int ret; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] || !tb[RDMA_NLDEV_ATTR_RES_LQPN]) return -EINVAL; if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_SET), 0, 0); if (!nlh) { ret = -EMSGSIZE; goto err_fill; } cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); if (fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { ret = -EMSGSIZE; goto err_fill; } ret = rdma_counter_unbind_qpn(device, port, qpn, cntn); if (ret) goto err_fill; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_fill: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int stat_get_doit_default_counter(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, struct nlattr *tb[]) { struct rdma_hw_stats *stats; struct nlattr *table_attr; struct ib_device *device; int ret, num_cnts, i; struct sk_buff *msg; u32 index, port; u64 v; if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (!device->ops.alloc_hw_port_stats || !device->ops.get_hw_stats) { ret = -EINVAL; goto err; } port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); stats = ib_get_hw_stats_port(device, port); if (!stats) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET), 0, 0); if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { ret = -EMSGSIZE; goto err_msg; } mutex_lock(&stats->lock); num_cnts = device->ops.get_hw_stats(device, stats, port, 0); if (num_cnts < 0) { ret = -EINVAL; goto err_stats; } table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); if (!table_attr) { ret = -EMSGSIZE; goto err_stats; } for (i = 0; i < num_cnts; i++) { if (test_bit(i, stats->is_disabled)) continue; v = stats->value[i] + rdma_counter_get_hwstat_value(device, port, i); if (rdma_nl_stat_hwcounter_entry(msg, stats->descs[i].name, v)) { ret = -EMSGSIZE; goto err_table; } } nla_nest_end(msg, table_attr); mutex_unlock(&stats->lock); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_table: nla_nest_cancel(msg, table_attr); err_stats: mutex_unlock(&stats->lock); err_msg: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, struct nlattr *tb[]) { static enum rdma_nl_counter_mode mode; static enum rdma_nl_counter_mask mask; struct ib_device *device; struct sk_buff *msg; u32 index, port; int ret; if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) return nldev_res_get_counter_doit(skb, nlh, extack); if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET), 0, 0); if (!nlh) { ret = -EMSGSIZE; goto err_msg; } ret = rdma_counter_get_mode(device, port, &mode, &mask); if (ret) goto err_msg; if (fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { ret = -EMSGSIZE; goto err_msg; } if ((mode == RDMA_COUNTER_MODE_AUTO) && nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { ret = -EMSGSIZE; goto err_msg; } nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_msg: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; int ret; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret) return -EINVAL; if (!tb[RDMA_NLDEV_ATTR_STAT_RES]) return stat_get_doit_default_counter(skb, nlh, extack, tb); switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { case RDMA_NLDEV_ATTR_RES_QP: ret = stat_get_doit_qp(skb, nlh, extack, tb); break; case RDMA_NLDEV_ATTR_RES_MR: ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR, fill_stat_mr_entry); break; default: ret = -EINVAL; break; } return ret; } static int nldev_stat_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; int ret; ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NULL); if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES]) return -EINVAL; switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { case RDMA_NLDEV_ATTR_RES_QP: ret = nldev_res_get_counter_dumpit(skb, cb); break; case RDMA_NLDEV_ATTR_RES_MR: ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR, fill_stat_mr_entry); break; default: ret = -EINVAL; break; } return ret; } static int nldev_stat_get_counter_status_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry; struct rdma_hw_stats *stats; struct ib_device *device; struct sk_buff *msg; u32 devid, port; int ret, i; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), devid); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } stats = ib_get_hw_stats_port(device, port); if (!stats) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put( msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS), 0, 0); ret = -EMSGSIZE; if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) goto err_msg; table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); if (!table) goto err_msg; mutex_lock(&stats->lock); for (i = 0; i < stats->num_counters; i++) { entry = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); if (!entry) goto err_msg_table; if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, stats->descs[i].name) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i)) goto err_msg_entry; if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) && (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, !test_bit(i, stats->is_disabled)))) goto err_msg_entry; nla_nest_end(msg, entry); } mutex_unlock(&stats->lock); nla_nest_end(msg, table); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_msg_entry: nla_nest_cancel(msg, entry); err_msg_table: mutex_unlock(&stats->lock); nla_nest_cancel(msg, table); err_msg: nlmsg_free(msg); err: ib_device_put(device); return ret; } static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { [RDMA_NLDEV_CMD_GET] = { .doit = nldev_get_doit, .dump = nldev_get_dumpit, }, [RDMA_NLDEV_CMD_GET_CHARDEV] = { .doit = nldev_get_chardev, }, [RDMA_NLDEV_CMD_SET] = { .doit = nldev_set_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_NEWLINK] = { .doit = nldev_newlink, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_DELLINK] = { .doit = nldev_dellink, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_PORT_GET] = { .doit = nldev_port_get_doit, .dump = nldev_port_get_dumpit, }, [RDMA_NLDEV_CMD_RES_GET] = { .doit = nldev_res_get_doit, .dump = nldev_res_get_dumpit, }, [RDMA_NLDEV_CMD_RES_QP_GET] = { .doit = nldev_res_get_qp_doit, .dump = nldev_res_get_qp_dumpit, }, [RDMA_NLDEV_CMD_RES_CM_ID_GET] = { .doit = nldev_res_get_cm_id_doit, .dump = nldev_res_get_cm_id_dumpit, }, [RDMA_NLDEV_CMD_RES_CQ_GET] = { .doit = nldev_res_get_cq_doit, .dump = nldev_res_get_cq_dumpit, }, [RDMA_NLDEV_CMD_RES_MR_GET] = { .doit = nldev_res_get_mr_doit, .dump = nldev_res_get_mr_dumpit, }, [RDMA_NLDEV_CMD_RES_PD_GET] = { .doit = nldev_res_get_pd_doit, .dump = nldev_res_get_pd_dumpit, }, [RDMA_NLDEV_CMD_RES_CTX_GET] = { .doit = nldev_res_get_ctx_doit, .dump = nldev_res_get_ctx_dumpit, }, [RDMA_NLDEV_CMD_RES_SRQ_GET] = { .doit = nldev_res_get_srq_doit, .dump = nldev_res_get_srq_dumpit, }, [RDMA_NLDEV_CMD_SYS_GET] = { .doit = nldev_sys_get_doit, }, [RDMA_NLDEV_CMD_SYS_SET] = { .doit = nldev_set_sys_set_doit, }, [RDMA_NLDEV_CMD_STAT_SET] = { .doit = nldev_stat_set_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_STAT_GET] = { .doit = nldev_stat_get_doit, .dump = nldev_stat_get_dumpit, }, [RDMA_NLDEV_CMD_STAT_DEL] = { .doit = nldev_stat_del_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = { .doit = nldev_res_get_qp_raw_doit, .dump = nldev_res_get_qp_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = { .doit = nldev_res_get_cq_raw_doit, .dump = nldev_res_get_cq_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = { .doit = nldev_res_get_mr_raw_doit, .dump = nldev_res_get_mr_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_STAT_GET_STATUS] = { .doit = nldev_stat_get_counter_status_doit, }, }; void __init nldev_init(void) { rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); } void nldev_exit(void) { rdma_nl_unregister(RDMA_NL_NLDEV); } MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
linux-master
drivers/infiniband/core/nldev.c
/* * Copyright (c) 2005 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/export.h> #include <rdma/ib_marshall.h> #define OPA_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL) static int rdma_ah_conv_opa_to_ib(struct ib_device *dev, struct rdma_ah_attr *ib, struct rdma_ah_attr *opa) { struct ib_port_attr port_attr; int ret = 0; /* Do structure copy and the over-write fields */ *ib = *opa; ib->type = RDMA_AH_ATTR_TYPE_IB; rdma_ah_set_grh(ib, NULL, 0, 0, 1, 0); if (ib_query_port(dev, opa->port_num, &port_attr)) { /* Set to default subnet to indicate error */ rdma_ah_set_subnet_prefix(ib, OPA_DEFAULT_GID_PREFIX); ret = -EINVAL; } else { rdma_ah_set_subnet_prefix(ib, cpu_to_be64(port_attr.subnet_prefix)); } rdma_ah_set_interface_id(ib, OPA_MAKE_ID(rdma_ah_get_dlid(opa))); return ret; } void ib_copy_ah_attr_to_user(struct ib_device *device, struct ib_uverbs_ah_attr *dst, struct rdma_ah_attr *ah_attr) { struct rdma_ah_attr *src = ah_attr; struct rdma_ah_attr conv_ah; memset(&dst->grh, 0, sizeof(dst->grh)); if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) && (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) && (!rdma_ah_conv_opa_to_ib(device, &conv_ah, ah_attr))) src = &conv_ah; dst->dlid = rdma_ah_get_dlid(src); dst->sl = rdma_ah_get_sl(src); dst->src_path_bits = rdma_ah_get_path_bits(src); dst->static_rate = rdma_ah_get_static_rate(src); dst->is_global = rdma_ah_get_ah_flags(src) & IB_AH_GRH ? 1 : 0; if (dst->is_global) { const struct ib_global_route *grh = rdma_ah_read_grh(src); memcpy(dst->grh.dgid, grh->dgid.raw, sizeof(grh->dgid)); dst->grh.flow_label = grh->flow_label; dst->grh.sgid_index = grh->sgid_index; dst->grh.hop_limit = grh->hop_limit; dst->grh.traffic_class = grh->traffic_class; } dst->port_num = rdma_ah_get_port_num(src); dst->reserved = 0; } EXPORT_SYMBOL(ib_copy_ah_attr_to_user); void ib_copy_qp_attr_to_user(struct ib_device *device, struct ib_uverbs_qp_attr *dst, struct ib_qp_attr *src) { dst->qp_state = src->qp_state; dst->cur_qp_state = src->cur_qp_state; dst->path_mtu = src->path_mtu; dst->path_mig_state = src->path_mig_state; dst->qkey = src->qkey; dst->rq_psn = src->rq_psn; dst->sq_psn = src->sq_psn; dst->dest_qp_num = src->dest_qp_num; dst->qp_access_flags = src->qp_access_flags; dst->max_send_wr = src->cap.max_send_wr; dst->max_recv_wr = src->cap.max_recv_wr; dst->max_send_sge = src->cap.max_send_sge; dst->max_recv_sge = src->cap.max_recv_sge; dst->max_inline_data = src->cap.max_inline_data; ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr); ib_copy_ah_attr_to_user(device, &dst->alt_ah_attr, &src->alt_ah_attr); dst->pkey_index = src->pkey_index; dst->alt_pkey_index = src->alt_pkey_index; dst->en_sqd_async_notify = src->en_sqd_async_notify; dst->sq_draining = src->sq_draining; dst->max_rd_atomic = src->max_rd_atomic; dst->max_dest_rd_atomic = src->max_dest_rd_atomic; dst->min_rnr_timer = src->min_rnr_timer; dst->port_num = src->port_num; dst->timeout = src->timeout; dst->retry_cnt = src->retry_cnt; dst->rnr_retry = src->rnr_retry; dst->alt_port_num = src->alt_port_num; dst->alt_timeout = src->alt_timeout; memset(dst->reserved, 0, sizeof(dst->reserved)); } EXPORT_SYMBOL(ib_copy_qp_attr_to_user); static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, struct sa_path_rec *src) { memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid)); memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid)); dst->dlid = htons(ntohl(sa_path_get_dlid(src))); dst->slid = htons(ntohl(sa_path_get_slid(src))); dst->raw_traffic = sa_path_get_raw_traffic(src); dst->flow_label = src->flow_label; dst->hop_limit = src->hop_limit; dst->traffic_class = src->traffic_class; dst->reversible = src->reversible; dst->numb_path = src->numb_path; dst->pkey = src->pkey; dst->sl = src->sl; dst->mtu_selector = src->mtu_selector; dst->mtu = src->mtu; dst->rate_selector = src->rate_selector; dst->rate = src->rate; dst->packet_life_time = src->packet_life_time; dst->preference = src->preference; dst->packet_life_time_selector = src->packet_life_time_selector; } void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, struct sa_path_rec *src) { struct sa_path_rec rec; if (src->rec_type == SA_PATH_REC_TYPE_OPA) { sa_convert_path_opa_to_ib(&rec, src); __ib_copy_path_rec_to_user(dst, &rec); return; } __ib_copy_path_rec_to_user(dst, src); } EXPORT_SYMBOL(ib_copy_path_rec_to_user); void ib_copy_path_rec_from_user(struct sa_path_rec *dst, struct ib_user_path_rec *src) { u32 slid, dlid; memset(dst, 0, sizeof(*dst)); if ((ib_is_opa_gid((union ib_gid *)src->sgid)) || (ib_is_opa_gid((union ib_gid *)src->dgid))) { dst->rec_type = SA_PATH_REC_TYPE_OPA; slid = opa_get_lid_from_gid((union ib_gid *)src->sgid); dlid = opa_get_lid_from_gid((union ib_gid *)src->dgid); } else { dst->rec_type = SA_PATH_REC_TYPE_IB; slid = ntohs(src->slid); dlid = ntohs(src->dlid); } memcpy(dst->dgid.raw, src->dgid, sizeof dst->dgid); memcpy(dst->sgid.raw, src->sgid, sizeof dst->sgid); sa_path_set_dlid(dst, dlid); sa_path_set_slid(dst, slid); sa_path_set_raw_traffic(dst, src->raw_traffic); dst->flow_label = src->flow_label; dst->hop_limit = src->hop_limit; dst->traffic_class = src->traffic_class; dst->reversible = src->reversible; dst->numb_path = src->numb_path; dst->pkey = src->pkey; dst->sl = src->sl; dst->mtu_selector = src->mtu_selector; dst->mtu = src->mtu; dst->rate_selector = src->rate_selector; dst->rate = src->rate; dst->packet_life_time = src->packet_life_time; dst->preference = src->preference; dst->packet_life_time_selector = src->packet_life_time_selector; /* TODO: No need to set this */ sa_path_set_dmac_zero(dst); } EXPORT_SYMBOL(ib_copy_path_rec_from_user);
linux-master
drivers/infiniband/core/uverbs_marshall.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2019 Mellanox Technologies. All rights reserved. */ #include <rdma/ib_verbs.h> #include <rdma/rdma_counter.h> #include "core_priv.h" #include "restrack.h" #define ALL_AUTO_MODE_MASKS (RDMA_COUNTER_MASK_QP_TYPE | RDMA_COUNTER_MASK_PID) static int __counter_set_mode(struct rdma_port_counter *port_counter, enum rdma_nl_counter_mode new_mode, enum rdma_nl_counter_mask new_mask) { if (new_mode == RDMA_COUNTER_MODE_AUTO) { if (new_mask & (~ALL_AUTO_MODE_MASKS)) return -EINVAL; if (port_counter->num_counters) return -EBUSY; } port_counter->mode.mode = new_mode; port_counter->mode.mask = new_mask; return 0; } /* * rdma_counter_set_auto_mode() - Turn on/off per-port auto mode * * @dev: Device to operate * @port: Port to use * @mask: Mask to configure * @extack: Message to the user * * Return 0 on success. If counter mode wasn't changed then it is considered * as success as well. * Return -EBUSY when changing to auto mode while there are bounded counters. * */ int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mask mask, struct netlink_ext_ack *extack) { struct rdma_port_counter *port_counter; enum rdma_nl_counter_mode mode; int ret; port_counter = &dev->port_data[port].port_counter; if (!port_counter->hstats) return -EOPNOTSUPP; mutex_lock(&port_counter->lock); if (mask) mode = RDMA_COUNTER_MODE_AUTO; else mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL : RDMA_COUNTER_MODE_NONE; if (port_counter->mode.mode == mode && port_counter->mode.mask == mask) { ret = 0; goto out; } ret = __counter_set_mode(port_counter, mode, mask); out: mutex_unlock(&port_counter->lock); if (ret == -EBUSY) NL_SET_ERR_MSG( extack, "Modifying auto mode is not allowed when there is a bound QP"); return ret; } static void auto_mode_init_counter(struct rdma_counter *counter, const struct ib_qp *qp, enum rdma_nl_counter_mask new_mask) { struct auto_mode_param *param = &counter->mode.param; counter->mode.mode = RDMA_COUNTER_MODE_AUTO; counter->mode.mask = new_mask; if (new_mask & RDMA_COUNTER_MASK_QP_TYPE) param->qp_type = qp->qp_type; } static int __rdma_counter_bind_qp(struct rdma_counter *counter, struct ib_qp *qp) { int ret; if (qp->counter) return -EINVAL; if (!qp->device->ops.counter_bind_qp) return -EOPNOTSUPP; mutex_lock(&counter->lock); ret = qp->device->ops.counter_bind_qp(counter, qp); mutex_unlock(&counter->lock); return ret; } int rdma_counter_modify(struct ib_device *dev, u32 port, unsigned int index, bool enable) { struct rdma_hw_stats *stats; int ret = 0; if (!dev->ops.modify_hw_stat) return -EOPNOTSUPP; stats = ib_get_hw_stats_port(dev, port); if (!stats || index >= stats->num_counters || !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) return -EINVAL; mutex_lock(&stats->lock); if (enable != test_bit(index, stats->is_disabled)) goto out; ret = dev->ops.modify_hw_stat(dev, port, index, enable); if (ret) goto out; if (enable) clear_bit(index, stats->is_disabled); else set_bit(index, stats->is_disabled); out: mutex_unlock(&stats->lock); return ret; } static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port, struct ib_qp *qp, enum rdma_nl_counter_mode mode) { struct rdma_port_counter *port_counter; struct rdma_counter *counter; int ret; if (!dev->ops.counter_dealloc || !dev->ops.counter_alloc_stats) return NULL; counter = kzalloc(sizeof(*counter), GFP_KERNEL); if (!counter) return NULL; counter->device = dev; counter->port = port; rdma_restrack_new(&counter->res, RDMA_RESTRACK_COUNTER); counter->stats = dev->ops.counter_alloc_stats(counter); if (!counter->stats) goto err_stats; port_counter = &dev->port_data[port].port_counter; mutex_lock(&port_counter->lock); switch (mode) { case RDMA_COUNTER_MODE_MANUAL: ret = __counter_set_mode(port_counter, RDMA_COUNTER_MODE_MANUAL, 0); if (ret) { mutex_unlock(&port_counter->lock); goto err_mode; } break; case RDMA_COUNTER_MODE_AUTO: auto_mode_init_counter(counter, qp, port_counter->mode.mask); break; default: ret = -EOPNOTSUPP; mutex_unlock(&port_counter->lock); goto err_mode; } port_counter->num_counters++; mutex_unlock(&port_counter->lock); counter->mode.mode = mode; kref_init(&counter->kref); mutex_init(&counter->lock); ret = __rdma_counter_bind_qp(counter, qp); if (ret) goto err_mode; rdma_restrack_parent_name(&counter->res, &qp->res); rdma_restrack_add(&counter->res); return counter; err_mode: rdma_free_hw_stats_struct(counter->stats); err_stats: rdma_restrack_put(&counter->res); kfree(counter); return NULL; } static void rdma_counter_free(struct rdma_counter *counter) { struct rdma_port_counter *port_counter; port_counter = &counter->device->port_data[counter->port].port_counter; mutex_lock(&port_counter->lock); port_counter->num_counters--; if (!port_counter->num_counters && (port_counter->mode.mode == RDMA_COUNTER_MODE_MANUAL)) __counter_set_mode(port_counter, RDMA_COUNTER_MODE_NONE, 0); mutex_unlock(&port_counter->lock); rdma_restrack_del(&counter->res); rdma_free_hw_stats_struct(counter->stats); kfree(counter); } static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter, enum rdma_nl_counter_mask auto_mask) { struct auto_mode_param *param = &counter->mode.param; bool match = true; if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) match &= (param->qp_type == qp->qp_type); if (auto_mask & RDMA_COUNTER_MASK_PID) match &= (task_pid_nr(counter->res.task) == task_pid_nr(qp->res.task)); return match; } static int __rdma_counter_unbind_qp(struct ib_qp *qp) { struct rdma_counter *counter = qp->counter; int ret; if (!qp->device->ops.counter_unbind_qp) return -EOPNOTSUPP; mutex_lock(&counter->lock); ret = qp->device->ops.counter_unbind_qp(qp); mutex_unlock(&counter->lock); return ret; } static void counter_history_stat_update(struct rdma_counter *counter) { struct ib_device *dev = counter->device; struct rdma_port_counter *port_counter; int i; port_counter = &dev->port_data[counter->port].port_counter; if (!port_counter->hstats) return; rdma_counter_query_stats(counter); for (i = 0; i < counter->stats->num_counters; i++) port_counter->hstats->value[i] += counter->stats->value[i]; } /* * rdma_get_counter_auto_mode - Find the counter that @qp should be bound * with in auto mode * * Return: The counter (with ref-count increased) if found */ static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp, u32 port) { struct rdma_port_counter *port_counter; struct rdma_counter *counter = NULL; struct ib_device *dev = qp->device; struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; unsigned long id = 0; port_counter = &dev->port_data[port].port_counter; rt = &dev->res[RDMA_RESTRACK_COUNTER]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { counter = container_of(res, struct rdma_counter, res); if ((counter->device != qp->device) || (counter->port != port)) goto next; if (auto_mode_match(qp, counter, port_counter->mode.mask)) break; next: counter = NULL; } if (counter && !kref_get_unless_zero(&counter->kref)) counter = NULL; xa_unlock(&rt->xa); return counter; } static void counter_release(struct kref *kref) { struct rdma_counter *counter; counter = container_of(kref, struct rdma_counter, kref); counter_history_stat_update(counter); counter->device->ops.counter_dealloc(counter); rdma_counter_free(counter); } /* * rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on * the auto-mode rule */ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port) { struct rdma_port_counter *port_counter; struct ib_device *dev = qp->device; struct rdma_counter *counter; int ret; if (!rdma_restrack_is_tracked(&qp->res) || rdma_is_kernel_res(&qp->res)) return 0; if (!rdma_is_port_valid(dev, port)) return -EINVAL; port_counter = &dev->port_data[port].port_counter; if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) return 0; counter = rdma_get_counter_auto_mode(qp, port); if (counter) { ret = __rdma_counter_bind_qp(counter, qp); if (ret) { kref_put(&counter->kref, counter_release); return ret; } } else { counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_AUTO); if (!counter) return -ENOMEM; } return 0; } /* * rdma_counter_unbind_qp - Unbind a qp from a counter * @force: * true - Decrease the counter ref-count anyway (e.g., qp destroy) */ int rdma_counter_unbind_qp(struct ib_qp *qp, bool force) { struct rdma_counter *counter = qp->counter; int ret; if (!counter) return -EINVAL; ret = __rdma_counter_unbind_qp(qp); if (ret && !force) return ret; kref_put(&counter->kref, counter_release); return 0; } int rdma_counter_query_stats(struct rdma_counter *counter) { struct ib_device *dev = counter->device; int ret; if (!dev->ops.counter_update_stats) return -EINVAL; mutex_lock(&counter->lock); ret = dev->ops.counter_update_stats(counter); mutex_unlock(&counter->lock); return ret; } static u64 get_running_counters_hwstat_sum(struct ib_device *dev, u32 port, u32 index) { struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; struct rdma_counter *counter; unsigned long id = 0; u64 sum = 0; rt = &dev->res[RDMA_RESTRACK_COUNTER]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { if (!rdma_restrack_get(res)) continue; xa_unlock(&rt->xa); counter = container_of(res, struct rdma_counter, res); if ((counter->device != dev) || (counter->port != port) || rdma_counter_query_stats(counter)) goto next; sum += counter->stats->value[index]; next: xa_lock(&rt->xa); rdma_restrack_put(res); } xa_unlock(&rt->xa); return sum; } /* * rdma_counter_get_hwstat_value() - Get the sum value of all counters on a * specific port, including the running ones and history data */ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index) { struct rdma_port_counter *port_counter; u64 sum; port_counter = &dev->port_data[port].port_counter; if (!port_counter->hstats) return 0; sum = get_running_counters_hwstat_sum(dev, port, index); sum += port_counter->hstats->value[index]; return sum; } static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num) { struct rdma_restrack_entry *res = NULL; struct ib_qp *qp = NULL; res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_QP, qp_num); if (IS_ERR(res)) return NULL; qp = container_of(res, struct ib_qp, res); if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) goto err; return qp; err: rdma_restrack_put(res); return NULL; } static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev, u32 counter_id) { struct rdma_restrack_entry *res; struct rdma_counter *counter; res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_COUNTER, counter_id); if (IS_ERR(res)) return NULL; counter = container_of(res, struct rdma_counter, res); kref_get(&counter->kref); rdma_restrack_put(res); return counter; } /* * rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id */ int rdma_counter_bind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id) { struct rdma_port_counter *port_counter; struct rdma_counter *counter; struct ib_qp *qp; int ret; port_counter = &dev->port_data[port].port_counter; if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO) return -EINVAL; qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; counter = rdma_get_counter_by_id(dev, counter_id); if (!counter) { ret = -ENOENT; goto err; } if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) { ret = -EINVAL; goto err_task; } if ((counter->device != qp->device) || (counter->port != qp->port)) { ret = -EINVAL; goto err_task; } ret = __rdma_counter_bind_qp(counter, qp); if (ret) goto err_task; rdma_restrack_put(&qp->res); return 0; err_task: kref_put(&counter->kref, counter_release); err: rdma_restrack_put(&qp->res); return ret; } /* * rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it * The id of new counter is returned in @counter_id */ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port, u32 qp_num, u32 *counter_id) { struct rdma_port_counter *port_counter; struct rdma_counter *counter; struct ib_qp *qp; int ret; if (!rdma_is_port_valid(dev, port)) return -EINVAL; port_counter = &dev->port_data[port].port_counter; if (!port_counter->hstats) return -EOPNOTSUPP; if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO) return -EINVAL; qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) { ret = -EINVAL; goto err; } counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_MANUAL); if (!counter) { ret = -ENOMEM; goto err; } if (counter_id) *counter_id = counter->id; rdma_restrack_put(&qp->res); return 0; err: rdma_restrack_put(&qp->res); return ret; } /* * rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter */ int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id) { struct rdma_port_counter *port_counter; struct ib_qp *qp; int ret; if (!rdma_is_port_valid(dev, port)) return -EINVAL; qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) { ret = -EINVAL; goto out; } port_counter = &dev->port_data[port].port_counter; if (!qp->counter || qp->counter->id != counter_id || port_counter->mode.mode != RDMA_COUNTER_MODE_MANUAL) { ret = -EINVAL; goto out; } ret = rdma_counter_unbind_qp(qp, false); out: rdma_restrack_put(&qp->res); return ret; } int rdma_counter_get_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mode *mode, enum rdma_nl_counter_mask *mask) { struct rdma_port_counter *port_counter; port_counter = &dev->port_data[port].port_counter; *mode = port_counter->mode.mode; *mask = port_counter->mode.mask; return 0; } void rdma_counter_init(struct ib_device *dev) { struct rdma_port_counter *port_counter; u32 port, i; if (!dev->port_data) return; rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; port_counter->mode.mode = RDMA_COUNTER_MODE_NONE; mutex_init(&port_counter->lock); if (!dev->ops.alloc_hw_port_stats) continue; port_counter->hstats = dev->ops.alloc_hw_port_stats(dev, port); if (!port_counter->hstats) goto fail; } return; fail: for (i = port; i >= rdma_start_port(dev); i--) { port_counter = &dev->port_data[port].port_counter; rdma_free_hw_stats_struct(port_counter->hstats); port_counter->hstats = NULL; mutex_destroy(&port_counter->lock); } } void rdma_counter_release(struct ib_device *dev) { struct rdma_port_counter *port_counter; u32 port; rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; rdma_free_hw_stats_struct(port_counter->hstats); mutex_destroy(&port_counter->lock); } }
linux-master
drivers/infiniband/core/counters.c
/* * Copyright (c) 2005 Voltaire Inc. All rights reserved. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mutex.h> #include <linux/inetdevice.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <net/arp.h> #include <net/neighbour.h> #include <net/route.h> #include <net/netevent.h> #include <net/ipv6_stubs.h> #include <net/ip6_route.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> #include <rdma/ib_sa.h> #include <rdma/ib.h> #include <rdma/rdma_netlink.h> #include <net/netlink.h> #include "core_priv.h" struct addr_req { struct list_head list; struct sockaddr_storage src_addr; struct sockaddr_storage dst_addr; struct rdma_dev_addr *addr; void *context; void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context); unsigned long timeout; struct delayed_work work; bool resolve_by_gid_attr; /* Consider gid attr in resolve phase */ int status; u32 seq; }; static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0); static DEFINE_SPINLOCK(lock); static LIST_HEAD(req_list); static struct workqueue_struct *addr_wq; static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, .len = sizeof(struct rdma_nla_ls_gid), .validation_type = NLA_VALIDATE_MIN, .min = sizeof(struct rdma_nla_ls_gid)}, }; static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) { struct nlattr *tb[LS_NLA_TYPE_MAX] = {}; int ret; if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) return false; ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), nlmsg_len(nlh), ib_nl_addr_policy, NULL); if (ret) return false; return true; } static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) { const struct nlattr *head, *curr; union ib_gid gid; struct addr_req *req; int len, rem; int found = 0; head = (const struct nlattr *)nlmsg_data(nlh); len = nlmsg_len(nlh); nla_for_each_attr(curr, head, len, rem) { if (curr->nla_type == LS_NLA_TYPE_DGID) memcpy(&gid, nla_data(curr), nla_len(curr)); } spin_lock_bh(&lock); list_for_each_entry(req, &req_list, list) { if (nlh->nlmsg_seq != req->seq) continue; /* We set the DGID part, the rest was set earlier */ rdma_addr_set_dgid(req->addr, &gid); req->status = 0; found = 1; break; } spin_unlock_bh(&lock); if (!found) pr_info("Couldn't find request waiting for DGID: %pI6\n", &gid); } int ib_nl_handle_ip_res_resp(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { if ((nlh->nlmsg_flags & NLM_F_REQUEST) || !(NETLINK_CB(skb).sk)) return -EPERM; if (ib_nl_is_good_ip_resp(nlh)) ib_nl_process_good_ip_rsep(nlh); return 0; } static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, const void *daddr, u32 seq, u16 family) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; struct rdma_ls_ip_resolve_header *header; void *data; size_t size; int attrtype; int len; if (family == AF_INET) { size = sizeof(struct in_addr); attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4; } else { size = sizeof(struct in6_addr); attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6; } len = nla_total_size(sizeof(size)); len += NLMSG_ALIGN(sizeof(*header)); skb = nlmsg_new(len, GFP_KERNEL); if (!skb) return -ENOMEM; data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS, RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST); if (!data) { nlmsg_free(skb); return -ENODATA; } /* Construct the family header first */ header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); header->ifindex = dev_addr->bound_dev_if; nla_put(skb, attrtype, size, daddr); /* Repair the nlmsg header length */ nlmsg_end(skb, nlh); rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL); /* Make the request retry, so when we get the response from userspace * we will have something. */ return -ENODATA; } int rdma_addr_size(const struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: return sizeof(struct sockaddr_in); case AF_INET6: return sizeof(struct sockaddr_in6); case AF_IB: return sizeof(struct sockaddr_ib); default: return 0; } } EXPORT_SYMBOL(rdma_addr_size); int rdma_addr_size_in6(struct sockaddr_in6 *addr) { int ret = rdma_addr_size((struct sockaddr *) addr); return ret <= sizeof(*addr) ? ret : 0; } EXPORT_SYMBOL(rdma_addr_size_in6); int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr) { int ret = rdma_addr_size((struct sockaddr *) addr); return ret <= sizeof(*addr) ? ret : 0; } EXPORT_SYMBOL(rdma_addr_size_kss); /** * rdma_copy_src_l2_addr - Copy netdevice source addresses * @dev_addr: Destination address pointer where to copy the addresses * @dev: Netdevice whose source addresses to copy * * rdma_copy_src_l2_addr() copies source addresses from the specified netdevice. * This includes unicast address, broadcast address, device type and * interface index. */ void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr, const struct net_device *dev) { dev_addr->dev_type = dev->type; memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN); memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN); dev_addr->bound_dev_if = dev->ifindex; } EXPORT_SYMBOL(rdma_copy_src_l2_addr); static struct net_device * rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in) { struct net_device *dev = NULL; int ret = -EADDRNOTAVAIL; switch (src_in->sa_family) { case AF_INET: dev = __ip_dev_find(net, ((const struct sockaddr_in *)src_in)->sin_addr.s_addr, false); if (dev) ret = 0; break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: for_each_netdev_rcu(net, dev) { if (ipv6_chk_addr(net, &((const struct sockaddr_in6 *)src_in)->sin6_addr, dev, 1)) { ret = 0; break; } } break; #endif } return ret ? ERR_PTR(ret) : dev; } int rdma_translate_ip(const struct sockaddr *addr, struct rdma_dev_addr *dev_addr) { struct net_device *dev; if (dev_addr->bound_dev_if) { dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); if (!dev) return -ENODEV; rdma_copy_src_l2_addr(dev_addr, dev); dev_put(dev); return 0; } rcu_read_lock(); dev = rdma_find_ndev_for_src_ip_rcu(dev_addr->net, addr); if (!IS_ERR(dev)) rdma_copy_src_l2_addr(dev_addr, dev); rcu_read_unlock(); return PTR_ERR_OR_ZERO(dev); } EXPORT_SYMBOL(rdma_translate_ip); static void set_timeout(struct addr_req *req, unsigned long time) { unsigned long delay; delay = time - jiffies; if ((long)delay < 0) delay = 0; mod_delayed_work(addr_wq, &req->work, delay); } static void queue_req(struct addr_req *req) { spin_lock_bh(&lock); list_add_tail(&req->list, &req_list); set_timeout(req, req->timeout); spin_unlock_bh(&lock); } static int ib_nl_fetch_ha(struct rdma_dev_addr *dev_addr, const void *daddr, u32 seq, u16 family) { if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) return -EADDRNOTAVAIL; return ib_nl_ip_send_msg(dev_addr, daddr, seq, family); } static int dst_fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr, const void *daddr) { struct neighbour *n; int ret = 0; n = dst_neigh_lookup(dst, daddr); if (!n) return -ENODATA; if (!(n->nud_state & NUD_VALID)) { neigh_event_send(n, NULL); ret = -ENODATA; } else { neigh_ha_snapshot(dev_addr->dst_dev_addr, n, dst->dev); } neigh_release(n); return ret; } static bool has_gateway(const struct dst_entry *dst, sa_family_t family) { struct rtable *rt; struct rt6_info *rt6; if (family == AF_INET) { rt = container_of(dst, struct rtable, dst); return rt->rt_uses_gateway; } rt6 = container_of(dst, struct rt6_info, dst); return rt6->rt6i_flags & RTF_GATEWAY; } static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr, const struct sockaddr *dst_in, u32 seq) { const struct sockaddr_in *dst_in4 = (const struct sockaddr_in *)dst_in; const struct sockaddr_in6 *dst_in6 = (const struct sockaddr_in6 *)dst_in; const void *daddr = (dst_in->sa_family == AF_INET) ? (const void *)&dst_in4->sin_addr.s_addr : (const void *)&dst_in6->sin6_addr; sa_family_t family = dst_in->sa_family; might_sleep(); /* If we have a gateway in IB mode then it must be an IB network */ if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB) return ib_nl_fetch_ha(dev_addr, daddr, seq, family); else return dst_fetch_ha(dst, dev_addr, daddr); } static int addr4_resolve(struct sockaddr *src_sock, const struct sockaddr *dst_sock, struct rdma_dev_addr *addr, struct rtable **prt) { struct sockaddr_in *src_in = (struct sockaddr_in *)src_sock; const struct sockaddr_in *dst_in = (const struct sockaddr_in *)dst_sock; __be32 src_ip = src_in->sin_addr.s_addr; __be32 dst_ip = dst_in->sin_addr.s_addr; struct rtable *rt; struct flowi4 fl4; int ret; memset(&fl4, 0, sizeof(fl4)); fl4.daddr = dst_ip; fl4.saddr = src_ip; fl4.flowi4_oif = addr->bound_dev_if; rt = ip_route_output_key(addr->net, &fl4); ret = PTR_ERR_OR_ZERO(rt); if (ret) return ret; src_in->sin_addr.s_addr = fl4.saddr; addr->hoplimit = ip4_dst_hoplimit(&rt->dst); *prt = rt; return 0; } #if IS_ENABLED(CONFIG_IPV6) static int addr6_resolve(struct sockaddr *src_sock, const struct sockaddr *dst_sock, struct rdma_dev_addr *addr, struct dst_entry **pdst) { struct sockaddr_in6 *src_in = (struct sockaddr_in6 *)src_sock; const struct sockaddr_in6 *dst_in = (const struct sockaddr_in6 *)dst_sock; struct flowi6 fl6; struct dst_entry *dst; memset(&fl6, 0, sizeof fl6); fl6.daddr = dst_in->sin6_addr; fl6.saddr = src_in->sin6_addr; fl6.flowi6_oif = addr->bound_dev_if; dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL); if (IS_ERR(dst)) return PTR_ERR(dst); if (ipv6_addr_any(&src_in->sin6_addr)) src_in->sin6_addr = fl6.saddr; addr->hoplimit = ip6_dst_hoplimit(dst); *pdst = dst; return 0; } #else static int addr6_resolve(struct sockaddr *src_sock, const struct sockaddr *dst_sock, struct rdma_dev_addr *addr, struct dst_entry **pdst) { return -EADDRNOTAVAIL; } #endif static int addr_resolve_neigh(const struct dst_entry *dst, const struct sockaddr *dst_in, struct rdma_dev_addr *addr, unsigned int ndev_flags, u32 seq) { int ret = 0; if (ndev_flags & IFF_LOOPBACK) { memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); } else { if (!(ndev_flags & IFF_NOARP)) { /* If the device doesn't do ARP internally */ ret = fetch_ha(dst, addr, dst_in, seq); } } return ret; } static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr, const struct sockaddr *dst_in, const struct dst_entry *dst, const struct net_device *ndev) { int ret = 0; if (dst->dev->flags & IFF_LOOPBACK) ret = rdma_translate_ip(dst_in, dev_addr); else rdma_copy_src_l2_addr(dev_addr, dst->dev); /* * If there's a gateway and type of device not ARPHRD_INFINIBAND, * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the * network type accordingly. */ if (has_gateway(dst, dst_in->sa_family) && ndev->type != ARPHRD_INFINIBAND) dev_addr->network = dst_in->sa_family == AF_INET ? RDMA_NETWORK_IPV4 : RDMA_NETWORK_IPV6; else dev_addr->network = RDMA_NETWORK_IB; return ret; } static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr, unsigned int *ndev_flags, const struct sockaddr *dst_in, const struct dst_entry *dst) { struct net_device *ndev = READ_ONCE(dst->dev); *ndev_flags = ndev->flags; /* A physical device must be the RDMA device to use */ if (ndev->flags & IFF_LOOPBACK) { /* * RDMA (IB/RoCE, iWarp) doesn't run on lo interface or * loopback IP address. So if route is resolved to loopback * interface, translate that to a real ndev based on non * loopback IP address. */ ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in); if (IS_ERR(ndev)) return -ENODEV; } return copy_src_l2_addr(dev_addr, dst_in, dst, ndev); } static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr) { struct net_device *ndev; ndev = rdma_read_gid_attr_ndev_rcu(addr->sgid_attr); if (IS_ERR(ndev)) return PTR_ERR(ndev); /* * Since we are holding the rcu, reading net and ifindex * are safe without any additional reference; because * change_net_namespace() in net/core/dev.c does rcu sync * after it changes the state to IFF_DOWN and before * updating netdev fields {net, ifindex}. */ addr->net = dev_net(ndev); addr->bound_dev_if = ndev->ifindex; return 0; } static void rdma_addr_set_net_defaults(struct rdma_dev_addr *addr) { addr->net = &init_net; addr->bound_dev_if = 0; } static int addr_resolve(struct sockaddr *src_in, const struct sockaddr *dst_in, struct rdma_dev_addr *addr, bool resolve_neigh, bool resolve_by_gid_attr, u32 seq) { struct dst_entry *dst = NULL; unsigned int ndev_flags = 0; struct rtable *rt = NULL; int ret; if (!addr->net) { pr_warn_ratelimited("%s: missing namespace\n", __func__); return -EINVAL; } rcu_read_lock(); if (resolve_by_gid_attr) { if (!addr->sgid_attr) { rcu_read_unlock(); pr_warn_ratelimited("%s: missing gid_attr\n", __func__); return -EINVAL; } /* * If the request is for a specific gid attribute of the * rdma_dev_addr, derive net from the netdevice of the * GID attribute. */ ret = set_addr_netns_by_gid_rcu(addr); if (ret) { rcu_read_unlock(); return ret; } } if (src_in->sa_family == AF_INET) { ret = addr4_resolve(src_in, dst_in, addr, &rt); dst = &rt->dst; } else { ret = addr6_resolve(src_in, dst_in, addr, &dst); } if (ret) { rcu_read_unlock(); goto done; } ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst); rcu_read_unlock(); /* * Resolve neighbor destination address if requested and * only if src addr translation didn't fail. */ if (!ret && resolve_neigh) ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq); if (src_in->sa_family == AF_INET) ip_rt_put(rt); else dst_release(dst); done: /* * Clear the addr net to go back to its original state, only if it was * derived from GID attribute in this context. */ if (resolve_by_gid_attr) rdma_addr_set_net_defaults(addr); return ret; } static void process_one_req(struct work_struct *_work) { struct addr_req *req; struct sockaddr *src_in, *dst_in; req = container_of(_work, struct addr_req, work.work); if (req->status == -ENODATA) { src_in = (struct sockaddr *)&req->src_addr; dst_in = (struct sockaddr *)&req->dst_addr; req->status = addr_resolve(src_in, dst_in, req->addr, true, req->resolve_by_gid_attr, req->seq); if (req->status && time_after_eq(jiffies, req->timeout)) { req->status = -ETIMEDOUT; } else if (req->status == -ENODATA) { /* requeue the work for retrying again */ spin_lock_bh(&lock); if (!list_empty(&req->list)) set_timeout(req, req->timeout); spin_unlock_bh(&lock); return; } } req->callback(req->status, (struct sockaddr *)&req->src_addr, req->addr, req->context); req->callback = NULL; spin_lock_bh(&lock); /* * Although the work will normally have been canceled by the workqueue, * it can still be requeued as long as it is on the req_list. */ cancel_delayed_work(&req->work); if (!list_empty(&req->list)) { list_del_init(&req->list); kfree(req); } spin_unlock_bh(&lock); } int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr, struct rdma_dev_addr *addr, unsigned long timeout_ms, void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context), bool resolve_by_gid_attr, void *context) { struct sockaddr *src_in, *dst_in; struct addr_req *req; int ret = 0; req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) return -ENOMEM; src_in = (struct sockaddr *) &req->src_addr; dst_in = (struct sockaddr *) &req->dst_addr; if (src_addr) { if (src_addr->sa_family != dst_addr->sa_family) { ret = -EINVAL; goto err; } memcpy(src_in, src_addr, rdma_addr_size(src_addr)); } else { src_in->sa_family = dst_addr->sa_family; } memcpy(dst_in, dst_addr, rdma_addr_size(dst_addr)); req->addr = addr; req->callback = callback; req->context = context; req->resolve_by_gid_attr = resolve_by_gid_attr; INIT_DELAYED_WORK(&req->work, process_one_req); req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); req->status = addr_resolve(src_in, dst_in, addr, true, req->resolve_by_gid_attr, req->seq); switch (req->status) { case 0: req->timeout = jiffies; queue_req(req); break; case -ENODATA: req->timeout = msecs_to_jiffies(timeout_ms) + jiffies; queue_req(req); break; default: ret = req->status; goto err; } return ret; err: kfree(req); return ret; } EXPORT_SYMBOL(rdma_resolve_ip); int roce_resolve_route_from_path(struct sa_path_rec *rec, const struct ib_gid_attr *attr) { union { struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } sgid, dgid; struct rdma_dev_addr dev_addr = {}; int ret; might_sleep(); if (rec->roce.route_resolved) return 0; rdma_gid2ip((struct sockaddr *)&sgid, &rec->sgid); rdma_gid2ip((struct sockaddr *)&dgid, &rec->dgid); if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family) return -EINVAL; if (!attr || !attr->ndev) return -EINVAL; dev_addr.net = &init_net; dev_addr.sgid_attr = attr; ret = addr_resolve((struct sockaddr *)&sgid, (struct sockaddr *)&dgid, &dev_addr, false, true, 0); if (ret) return ret; if ((dev_addr.network == RDMA_NETWORK_IPV4 || dev_addr.network == RDMA_NETWORK_IPV6) && rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2) return -EINVAL; rec->roce.route_resolved = true; return 0; } /** * rdma_addr_cancel - Cancel resolve ip request * @addr: Pointer to address structure given previously * during rdma_resolve_ip(). * rdma_addr_cancel() is synchronous function which cancels any pending * request if there is any. */ void rdma_addr_cancel(struct rdma_dev_addr *addr) { struct addr_req *req, *temp_req; struct addr_req *found = NULL; spin_lock_bh(&lock); list_for_each_entry_safe(req, temp_req, &req_list, list) { if (req->addr == addr) { /* * Removing from the list means we take ownership of * the req */ list_del_init(&req->list); found = req; break; } } spin_unlock_bh(&lock); if (!found) return; /* * sync canceling the work after removing it from the req_list * guarentees no work is running and none will be started. */ cancel_delayed_work_sync(&found->work); kfree(found); } EXPORT_SYMBOL(rdma_addr_cancel); struct resolve_cb_context { struct completion comp; int status; }; static void resolve_cb(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context) { ((struct resolve_cb_context *)context)->status = status; complete(&((struct resolve_cb_context *)context)->comp); } int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, const union ib_gid *dgid, u8 *dmac, const struct ib_gid_attr *sgid_attr, int *hoplimit) { struct rdma_dev_addr dev_addr; struct resolve_cb_context ctx; union { struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } sgid_addr, dgid_addr; int ret; rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid); rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid); memset(&dev_addr, 0, sizeof(dev_addr)); dev_addr.net = &init_net; dev_addr.sgid_attr = sgid_attr; init_completion(&ctx.comp); ret = rdma_resolve_ip((struct sockaddr *)&sgid_addr, (struct sockaddr *)&dgid_addr, &dev_addr, 1000, resolve_cb, true, &ctx); if (ret) return ret; wait_for_completion(&ctx.comp); ret = ctx.status; if (ret) return ret; memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); *hoplimit = dev_addr.hoplimit; return 0; } static int netevent_callback(struct notifier_block *self, unsigned long event, void *ctx) { struct addr_req *req; if (event == NETEVENT_NEIGH_UPDATE) { struct neighbour *neigh = ctx; if (neigh->nud_state & NUD_VALID) { spin_lock_bh(&lock); list_for_each_entry(req, &req_list, list) set_timeout(req, jiffies); spin_unlock_bh(&lock); } } return 0; } static struct notifier_block nb = { .notifier_call = netevent_callback }; int addr_init(void) { addr_wq = alloc_ordered_workqueue("ib_addr", 0); if (!addr_wq) return -ENOMEM; register_netevent_notifier(&nb); return 0; } void addr_cleanup(void) { unregister_netevent_notifier(&nb); destroy_workqueue(addr_wq); WARN_ON(!list_empty(&req_list)); }
linux-master
drivers/infiniband/core/addr.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "rdma_core.h" #include "uverbs.h" #include <rdma/uverbs_std_types.h> static int uverbs_free_counters(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_counters *counters = uobject->object; int ret; if (atomic_read(&counters->usecnt)) return -EBUSY; ret = counters->device->ops.destroy_counters(counters); if (ret) return ret; kfree(counters); return 0; } static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)( struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject( attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE); struct ib_device *ib_dev = attrs->context->device; struct ib_counters *counters; int ret; /* * This check should be removed once the infrastructure * have the ability to remove methods from parse tree once * such condition is met. */ if (!ib_dev->ops.create_counters) return -EOPNOTSUPP; counters = rdma_zalloc_drv_obj(ib_dev, ib_counters); if (!counters) return -ENOMEM; counters->device = ib_dev; counters->uobject = uobj; uobj->object = counters; atomic_set(&counters->usecnt, 0); ret = ib_dev->ops.create_counters(counters, attrs); if (ret) kfree(counters); return ret; } static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)( struct uverbs_attr_bundle *attrs) { struct ib_counters_read_attr read_attr = {}; const struct uverbs_attr *uattr; struct ib_counters *counters = uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE); int ret; if (!counters->device->ops.read_counters) return -EOPNOTSUPP; if (!atomic_read(&counters->usecnt)) return -EINVAL; ret = uverbs_get_flags32(&read_attr.flags, attrs, UVERBS_ATTR_READ_COUNTERS_FLAGS, IB_UVERBS_READ_COUNTERS_PREFER_CACHED); if (ret) return ret; uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF); if (IS_ERR(uattr)) return PTR_ERR(uattr); read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64); read_attr.counters_buff = uverbs_zalloc( attrs, array_size(read_attr.ncounters, sizeof(u64))); if (IS_ERR(read_attr.counters_buff)) return PTR_ERR(read_attr.counters_buff); ret = counters->device->ops.read_counters(counters, &read_attr, attrs); if (ret) return ret; return uverbs_copy_to(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF, read_attr.counters_buff, read_attr.ncounters * sizeof(u64)); } DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_COUNTERS_CREATE, UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_COUNTERS_HANDLE, UVERBS_OBJECT_COUNTERS, UVERBS_ACCESS_NEW, UA_MANDATORY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_COUNTERS_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_COUNTERS_HANDLE, UVERBS_OBJECT_COUNTERS, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_COUNTERS_READ, UVERBS_ATTR_IDR(UVERBS_ATTR_READ_COUNTERS_HANDLE, UVERBS_OBJECT_COUNTERS, UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_READ_COUNTERS_BUFF, UVERBS_ATTR_MIN_SIZE(0), UA_MANDATORY), UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_READ_COUNTERS_FLAGS, enum ib_uverbs_read_counters_flags)); DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COUNTERS, UVERBS_TYPE_ALLOC_IDR(uverbs_free_counters), &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_CREATE), &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_DESTROY), &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_READ)); const struct uapi_definition uverbs_def_obj_counters[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COUNTERS, UAPI_DEF_OBJ_NEEDS_FN(destroy_counters)), {} };
linux-master
drivers/infiniband/core/uverbs_std_types_counters.c
/* * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/configfs.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include "core_priv.h" #include "cma_priv.h" struct cma_device; struct cma_dev_group; struct cma_dev_port_group { u32 port_num; struct cma_dev_group *cma_dev_group; struct config_group group; }; struct cma_dev_group { char name[IB_DEVICE_NAME_MAX]; struct config_group device_group; struct config_group ports_group; struct cma_dev_port_group *ports; }; static struct cma_dev_port_group *to_dev_port_group(struct config_item *item) { struct config_group *group; if (!item) return NULL; group = container_of(item, struct config_group, cg_item); return container_of(group, struct cma_dev_port_group, group); } static bool filter_by_name(struct ib_device *ib_dev, void *cookie) { return !strcmp(dev_name(&ib_dev->dev), cookie); } static int cma_configfs_params_get(struct config_item *item, struct cma_device **pcma_dev, struct cma_dev_port_group **pgroup) { struct cma_dev_port_group *group = to_dev_port_group(item); struct cma_device *cma_dev; if (!group) return -ENODEV; cma_dev = cma_enum_devices_by_ibdev(filter_by_name, group->cma_dev_group->name); if (!cma_dev) return -ENODEV; *pcma_dev = cma_dev; *pgroup = group; return 0; } static void cma_configfs_params_put(struct cma_device *cma_dev) { cma_dev_put(cma_dev); } static ssize_t default_roce_mode_show(struct config_item *item, char *buf) { struct cma_device *cma_dev; struct cma_dev_port_group *group; int gid_type; ssize_t ret; ret = cma_configfs_params_get(item, &cma_dev, &group); if (ret) return ret; gid_type = cma_get_default_gid_type(cma_dev, group->port_num); cma_configfs_params_put(cma_dev); if (gid_type < 0) return gid_type; return sysfs_emit(buf, "%s\n", ib_cache_gid_type_str(gid_type)); } static ssize_t default_roce_mode_store(struct config_item *item, const char *buf, size_t count) { struct cma_device *cma_dev; struct cma_dev_port_group *group; int gid_type; ssize_t ret; ret = cma_configfs_params_get(item, &cma_dev, &group); if (ret) return ret; gid_type = ib_cache_gid_parse_type_str(buf); if (gid_type < 0) { cma_configfs_params_put(cma_dev); return -EINVAL; } ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type); cma_configfs_params_put(cma_dev); return !ret ? strnlen(buf, count) : ret; } CONFIGFS_ATTR(, default_roce_mode); static ssize_t default_roce_tos_show(struct config_item *item, char *buf) { struct cma_device *cma_dev; struct cma_dev_port_group *group; ssize_t ret; u8 tos; ret = cma_configfs_params_get(item, &cma_dev, &group); if (ret) return ret; tos = cma_get_default_roce_tos(cma_dev, group->port_num); cma_configfs_params_put(cma_dev); return sysfs_emit(buf, "%u\n", tos); } static ssize_t default_roce_tos_store(struct config_item *item, const char *buf, size_t count) { struct cma_device *cma_dev; struct cma_dev_port_group *group; ssize_t ret; u8 tos; ret = kstrtou8(buf, 0, &tos); if (ret) return ret; ret = cma_configfs_params_get(item, &cma_dev, &group); if (ret) return ret; ret = cma_set_default_roce_tos(cma_dev, group->port_num, tos); cma_configfs_params_put(cma_dev); return ret ? ret : strnlen(buf, count); } CONFIGFS_ATTR(, default_roce_tos); static struct configfs_attribute *cma_configfs_attributes[] = { &attr_default_roce_mode, &attr_default_roce_tos, NULL, }; static const struct config_item_type cma_port_group_type = { .ct_attrs = cma_configfs_attributes, .ct_owner = THIS_MODULE }; static int make_cma_ports(struct cma_dev_group *cma_dev_group, struct cma_device *cma_dev) { struct cma_dev_port_group *ports; struct ib_device *ibdev; u32 ports_num; u32 i; ibdev = cma_get_ib_dev(cma_dev); if (!ibdev) return -ENODEV; ports_num = ibdev->phys_port_cnt; ports = kcalloc(ports_num, sizeof(*cma_dev_group->ports), GFP_KERNEL); if (!ports) return -ENOMEM; for (i = 0; i < ports_num; i++) { char port_str[10]; ports[i].port_num = i + 1; snprintf(port_str, sizeof(port_str), "%u", i + 1); ports[i].cma_dev_group = cma_dev_group; config_group_init_type_name(&ports[i].group, port_str, &cma_port_group_type); configfs_add_default_group(&ports[i].group, &cma_dev_group->ports_group); } cma_dev_group->ports = ports; return 0; } static void release_cma_dev(struct config_item *item) { struct config_group *group = container_of(item, struct config_group, cg_item); struct cma_dev_group *cma_dev_group = container_of(group, struct cma_dev_group, device_group); kfree(cma_dev_group); }; static void release_cma_ports_group(struct config_item *item) { struct config_group *group = container_of(item, struct config_group, cg_item); struct cma_dev_group *cma_dev_group = container_of(group, struct cma_dev_group, ports_group); kfree(cma_dev_group->ports); cma_dev_group->ports = NULL; }; static struct configfs_item_operations cma_ports_item_ops = { .release = release_cma_ports_group }; static const struct config_item_type cma_ports_group_type = { .ct_item_ops = &cma_ports_item_ops, .ct_owner = THIS_MODULE }; static struct configfs_item_operations cma_device_item_ops = { .release = release_cma_dev }; static const struct config_item_type cma_device_group_type = { .ct_item_ops = &cma_device_item_ops, .ct_owner = THIS_MODULE }; static struct config_group *make_cma_dev(struct config_group *group, const char *name) { int err = -ENODEV; struct cma_device *cma_dev = cma_enum_devices_by_ibdev(filter_by_name, (void *)name); struct cma_dev_group *cma_dev_group = NULL; if (!cma_dev) goto fail; cma_dev_group = kzalloc(sizeof(*cma_dev_group), GFP_KERNEL); if (!cma_dev_group) { err = -ENOMEM; goto fail; } strscpy(cma_dev_group->name, name, sizeof(cma_dev_group->name)); config_group_init_type_name(&cma_dev_group->ports_group, "ports", &cma_ports_group_type); err = make_cma_ports(cma_dev_group, cma_dev); if (err) goto fail; config_group_init_type_name(&cma_dev_group->device_group, name, &cma_device_group_type); configfs_add_default_group(&cma_dev_group->ports_group, &cma_dev_group->device_group); cma_dev_put(cma_dev); return &cma_dev_group->device_group; fail: if (cma_dev) cma_dev_put(cma_dev); kfree(cma_dev_group); return ERR_PTR(err); } static void drop_cma_dev(struct config_group *cgroup, struct config_item *item) { struct config_group *group = container_of(item, struct config_group, cg_item); struct cma_dev_group *cma_dev_group = container_of(group, struct cma_dev_group, device_group); configfs_remove_default_groups(&cma_dev_group->ports_group); configfs_remove_default_groups(&cma_dev_group->device_group); config_item_put(item); } static struct configfs_group_operations cma_subsys_group_ops = { .make_group = make_cma_dev, .drop_item = drop_cma_dev, }; static const struct config_item_type cma_subsys_type = { .ct_group_ops = &cma_subsys_group_ops, .ct_owner = THIS_MODULE, }; static struct configfs_subsystem cma_subsys = { .su_group = { .cg_item = { .ci_namebuf = "rdma_cm", .ci_type = &cma_subsys_type, }, }, }; int __init cma_configfs_init(void) { int ret; config_group_init(&cma_subsys.su_group); mutex_init(&cma_subsys.su_mutex); ret = configfs_register_subsystem(&cma_subsys); if (ret) mutex_destroy(&cma_subsys.su_mutex); return ret; } void __exit cma_configfs_exit(void) { configfs_unregister_subsystem(&cma_subsys); mutex_destroy(&cma_subsys.su_mutex); }
linux-master
drivers/infiniband/core/cma_configfs.c
/* * Copyright (c) 2005 Intel Inc. All rights reserved. * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2014 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include "mad_priv.h" #include "mad_rmpp.h" enum rmpp_state { RMPP_STATE_ACTIVE, RMPP_STATE_TIMEOUT, RMPP_STATE_COMPLETE }; struct mad_rmpp_recv { struct ib_mad_agent_private *agent; struct list_head list; struct delayed_work timeout_work; struct delayed_work cleanup_work; struct completion comp; enum rmpp_state state; spinlock_t lock; refcount_t refcount; struct ib_ah *ah; struct ib_mad_recv_wc *rmpp_wc; struct ib_mad_recv_buf *cur_seg_buf; int last_ack; int seg_num; int newwin; int repwin; __be64 tid; u32 src_qp; u32 slid; u8 mgmt_class; u8 class_version; u8 method; u8 base_version; }; static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { if (refcount_dec_and_test(&rmpp_recv->refcount)) complete(&rmpp_recv->comp); } static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { deref_rmpp_recv(rmpp_recv); wait_for_completion(&rmpp_recv->comp); rdma_destroy_ah(rmpp_recv->ah, RDMA_DESTROY_AH_SLEEPABLE); kfree(rmpp_recv); } void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) { struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { cancel_delayed_work(&rmpp_recv->timeout_work); cancel_delayed_work(&rmpp_recv->cleanup_work); } spin_unlock_irqrestore(&agent->lock, flags); flush_workqueue(agent->qp_info->port_priv->wq); list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, &agent->rmpp_list, list) { list_del(&rmpp_recv->list); if (rmpp_recv->state != RMPP_STATE_COMPLETE) ib_free_recv_mad(rmpp_recv->rmpp_wc); destroy_rmpp_recv(rmpp_recv); } } static void format_ack(struct ib_mad_send_buf *msg, struct ib_rmpp_mad *data, struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *ack = msg->mad; unsigned long flags; memcpy(ack, &data->mad_hdr, msg->hdr_len); ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); spin_lock_irqsave(&rmpp_recv->lock, flags); rmpp_recv->last_ack = rmpp_recv->seg_num; ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); spin_unlock_irqrestore(&rmpp_recv->lock, flags); } static void ack_recv(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; int ret, hdr_len; hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL, IB_MGMT_BASE_VERSION); if (IS_ERR(msg)) return; format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); msg->ah = rmpp_recv->ah; ret = ib_post_send_mad(msg, NULL); if (ret) ib_free_send_mad(msg); } static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; struct ib_ah *ah; int hdr_len; ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, recv_wc->recv_buf.grh, agent->port_num); if (IS_ERR(ah)) return (void *) ah; hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL, IB_MGMT_BASE_VERSION); if (IS_ERR(msg)) rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); else { msg->ah = ah; msg->context[0] = ah; } return msg; } static void ack_ds_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1); ret = ib_post_send_mad(msg, NULL); if (ret) { rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(msg); } } void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) { if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah) rdma_destroy_ah(mad_send_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(mad_send_wc->send_buf); } static void nack_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) { struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = 0; ret = ib_post_send_mad(msg, NULL); if (ret) { rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(msg); } } static void recv_timeout_handler(struct work_struct *work) { struct mad_rmpp_recv *rmpp_recv = container_of(work, struct mad_rmpp_recv, timeout_work.work); struct ib_mad_recv_wc *rmpp_wc; unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); if (rmpp_recv->state != RMPP_STATE_ACTIVE) { spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); return; } rmpp_recv->state = RMPP_STATE_TIMEOUT; list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); rmpp_wc = rmpp_recv->rmpp_wc; nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); destroy_rmpp_recv(rmpp_recv); ib_free_recv_mad(rmpp_wc); } static void recv_cleanup_handler(struct work_struct *work) { struct mad_rmpp_recv *rmpp_recv = container_of(work, struct mad_rmpp_recv, cleanup_work.work); unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); destroy_rmpp_recv(rmpp_recv); } static struct mad_rmpp_recv * create_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr; rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL); if (!rmpp_recv) return NULL; rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, agent->agent.port_num); if (IS_ERR(rmpp_recv->ah)) goto error; rmpp_recv->agent = agent; init_completion(&rmpp_recv->comp); INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); spin_lock_init(&rmpp_recv->lock); rmpp_recv->state = RMPP_STATE_ACTIVE; refcount_set(&rmpp_recv->refcount, 1); rmpp_recv->rmpp_wc = mad_recv_wc; rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf; rmpp_recv->newwin = 1; rmpp_recv->seg_num = 1; rmpp_recv->last_ack = 0; rmpp_recv->repwin = 1; mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; rmpp_recv->tid = mad_hdr->tid; rmpp_recv->src_qp = mad_recv_wc->wc->src_qp; rmpp_recv->slid = mad_recv_wc->wc->slid; rmpp_recv->mgmt_class = mad_hdr->mgmt_class; rmpp_recv->class_version = mad_hdr->class_version; rmpp_recv->method = mad_hdr->method; rmpp_recv->base_version = mad_hdr->base_version; return rmpp_recv; error: kfree(rmpp_recv); return NULL; } static struct mad_rmpp_recv * find_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid == mad_hdr->tid && rmpp_recv->src_qp == mad_recv_wc->wc->src_qp && rmpp_recv->slid == mad_recv_wc->wc->slid && rmpp_recv->mgmt_class == mad_hdr->mgmt_class && rmpp_recv->class_version == mad_hdr->class_version && rmpp_recv->method == mad_hdr->method) return rmpp_recv; } return NULL; } static struct mad_rmpp_recv * acquire_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv) refcount_inc(&rmpp_recv->refcount); spin_unlock_irqrestore(&agent->lock, flags); return rmpp_recv; } static struct mad_rmpp_recv * insert_rmpp_recv(struct ib_mad_agent_private *agent, struct mad_rmpp_recv *rmpp_recv) { struct mad_rmpp_recv *cur_rmpp_recv; cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc); if (!cur_rmpp_recv) list_add_tail(&rmpp_recv->list, &agent->rmpp_list); return cur_rmpp_recv; } static inline int get_last_flag(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST; } static inline int get_seg_num(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); } static inline struct ib_mad_recv_buf *get_next_seg(struct list_head *rmpp_list, struct ib_mad_recv_buf *seg) { if (seg->list.next == rmpp_list) return NULL; return container_of(seg->list.next, struct ib_mad_recv_buf, list); } static inline int window_size(struct ib_mad_agent_private *agent) { return max(agent->qp_info->recv_queue.max_active >> 3, 1); } static struct ib_mad_recv_buf *find_seg_location(struct list_head *rmpp_list, int seg_num) { struct ib_mad_recv_buf *seg_buf; int cur_seg_num; list_for_each_entry_reverse(seg_buf, rmpp_list, list) { cur_seg_num = get_seg_num(seg_buf); if (seg_num > cur_seg_num) return seg_buf; if (seg_num == cur_seg_num) break; } return NULL; } static void update_seg_num(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_buf *new_buf) { struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list; while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) { rmpp_recv->cur_seg_buf = new_buf; rmpp_recv->seg_num++; new_buf = get_next_seg(rmpp_list, new_buf); } } static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *rmpp_mad; int hdr_size, data_size, pad; bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device, rmpp_recv->agent->qp_info->port_priv->port_num); rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) { data_size = sizeof(struct opa_rmpp_mad) - hdr_size; pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > OPA_MGMT_RMPP_DATA || pad < 0) pad = 0; } else { data_size = sizeof(struct ib_rmpp_mad) - hdr_size; pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > IB_MGMT_RMPP_DATA || pad < 0) pad = 0; } return hdr_size + rmpp_recv->seg_num * data_size - pad; } static struct ib_mad_recv_wc *complete_rmpp(struct mad_rmpp_recv *rmpp_recv) { struct ib_mad_recv_wc *rmpp_wc; ack_recv(rmpp_recv, rmpp_recv->rmpp_wc); if (rmpp_recv->seg_num > 1) cancel_delayed_work(&rmpp_recv->timeout_work); rmpp_wc = rmpp_recv->rmpp_wc; rmpp_wc->mad_len = get_mad_len(rmpp_recv); /* 10 seconds until we can find the packet lifetime */ queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, &rmpp_recv->cleanup_work, msecs_to_jiffies(10000)); return rmpp_wc; } static struct ib_mad_recv_wc * continue_rmpp(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_recv_buf *prev_buf; struct ib_mad_recv_wc *done_wc; int seg_num; unsigned long flags; rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc); if (!rmpp_recv) goto drop1; seg_num = get_seg_num(&mad_recv_wc->recv_buf); spin_lock_irqsave(&rmpp_recv->lock, flags); if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) || (seg_num > rmpp_recv->newwin)) goto drop3; if ((seg_num <= rmpp_recv->last_ack) || (rmpp_recv->state == RMPP_STATE_COMPLETE)) { spin_unlock_irqrestore(&rmpp_recv->lock, flags); ack_recv(rmpp_recv, mad_recv_wc); goto drop2; } prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num); if (!prev_buf) goto drop3; done_wc = NULL; list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list); if (rmpp_recv->cur_seg_buf == prev_buf) { update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf); if (get_last_flag(rmpp_recv->cur_seg_buf)) { rmpp_recv->state = RMPP_STATE_COMPLETE; spin_unlock_irqrestore(&rmpp_recv->lock, flags); done_wc = complete_rmpp(rmpp_recv); goto out; } else if (rmpp_recv->seg_num == rmpp_recv->newwin) { rmpp_recv->newwin += window_size(agent); spin_unlock_irqrestore(&rmpp_recv->lock, flags); ack_recv(rmpp_recv, mad_recv_wc); goto out; } } spin_unlock_irqrestore(&rmpp_recv->lock, flags); out: deref_rmpp_recv(rmpp_recv); return done_wc; drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags); drop2: deref_rmpp_recv(rmpp_recv); drop1: ib_free_recv_mad(mad_recv_wc); return NULL; } static struct ib_mad_recv_wc * start_rmpp(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; unsigned long flags; rmpp_recv = create_rmpp_recv(agent, mad_recv_wc); if (!rmpp_recv) { ib_free_recv_mad(mad_recv_wc); return NULL; } spin_lock_irqsave(&agent->lock, flags); if (insert_rmpp_recv(agent, rmpp_recv)) { spin_unlock_irqrestore(&agent->lock, flags); /* duplicate first MAD */ destroy_rmpp_recv(rmpp_recv); return continue_rmpp(agent, mad_recv_wc); } refcount_inc(&rmpp_recv->refcount); if (get_last_flag(&mad_recv_wc->recv_buf)) { rmpp_recv->state = RMPP_STATE_COMPLETE; spin_unlock_irqrestore(&agent->lock, flags); complete_rmpp(rmpp_recv); } else { spin_unlock_irqrestore(&agent->lock, flags); /* 40 seconds until we can find the packet lifetimes */ queue_delayed_work(agent->qp_info->port_priv->wq, &rmpp_recv->timeout_work, msecs_to_jiffies(40000)); rmpp_recv->newwin += window_size(agent); ack_recv(rmpp_recv, mad_recv_wc); mad_recv_wc = NULL; } deref_rmpp_recv(rmpp_recv); return mad_recv_wc; } static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int timeout; u32 paylen = 0; rmpp_mad = mad_send_wr->send_buf.mad; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num); if (mad_send_wr->seg_num == 1) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; paylen = (mad_send_wr->send_buf.seg_count * mad_send_wr->send_buf.seg_rmpp_size) - mad_send_wr->pad; } if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad; } rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen); /* 2 seconds for an ACK until we can find the packet lifetime */ timeout = mad_send_wr->send_buf.timeout_ms; if (!timeout || timeout > 2000) mad_send_wr->timeout = msecs_to_jiffies(2000); return ib_send_mad(mad_send_wr); } static void abort_send(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc wc; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); if (!mad_send_wr) goto out; /* Unmatched send */ if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&agent->lock, flags); wc.status = IB_WC_REM_ABORT_ERR; wc.vendor_err = rmpp_status; wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &wc); return; out: spin_unlock_irqrestore(&agent->lock, flags); } static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, int seg_num) { struct list_head *list; wr->last_ack = seg_num; list = &wr->last_ack_seg->list; list_for_each_entry(wr->last_ack_seg, list, list) if (wr->last_ack_seg->num == seg_num) break; } static void process_ds_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc, int newwin) { struct mad_rmpp_recv *rmpp_recv; rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE) rmpp_recv->repwin = newwin; } static void process_rmpp_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_rmpp_mad *rmpp_mad; unsigned long flags; int seg_num, newwin, ret; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); return; } seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (newwin < seg_num) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); return; } spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); if (!mad_send_wr) { if (!seg_num) process_ds_ack(agent, mad_recv_wc, newwin); goto out; /* Unmatched or DS RMPP ACK */ } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) && (mad_send_wr->timeout)) { spin_unlock_irqrestore(&agent->lock, flags); ack_ds_ack(agent, mad_recv_wc); return; /* Repeated ACK for DS RMPP transaction */ } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ if (seg_num > mad_send_wr->send_buf.seg_count || seg_num > mad_send_wr->newwin) { spin_unlock_irqrestore(&agent->lock, flags); abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); return; } if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) goto out; /* Old ACK */ if (seg_num > mad_send_wr->last_ack) { adjust_last_ack(mad_send_wr, seg_num); mad_send_wr->retries_left = mad_send_wr->max_retries; } mad_send_wr->newwin = newwin; if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { /* If no response is expected, the ACK completes the send */ if (!mad_send_wr->send_buf.timeout_ms) { struct ib_mad_send_wc wc; ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&agent->lock, flags); wc.status = IB_WC_SUCCESS; wc.vendor_err = 0; wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &wc); return; } if (mad_send_wr->refcount == 1) ib_reset_mad_timeout(mad_send_wr, mad_send_wr->send_buf.timeout_ms); spin_unlock_irqrestore(&agent->lock, flags); ack_ds_ack(agent, mad_recv_wc); return; } else if (mad_send_wr->refcount == 1 && mad_send_wr->seg_num < mad_send_wr->newwin && mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { /* Send failure will just result in a timeout/retry */ ret = send_next_seg(mad_send_wr); if (ret) goto out; mad_send_wr->refcount++; list_move_tail(&mad_send_wr->agent_list, &mad_send_wr->mad_agent_priv->send_list); } out: spin_unlock_irqrestore(&agent->lock, flags); } static struct ib_mad_recv_wc * process_rmpp_data(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_hdr *rmpp_hdr; u8 rmpp_status; rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; if (rmpp_hdr->rmpp_status) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; goto bad; } if (rmpp_hdr->seg_num == cpu_to_be32(1)) { if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; } return start_rmpp(agent, mad_recv_wc); } else { if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; } return continue_rmpp(agent, mad_recv_wc); } bad: nack_recv(agent, mad_recv_wc, rmpp_status); ib_free_recv_mad(mad_recv_wc); return NULL; } static void process_rmpp_stop(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); } static void process_rmpp_abort(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); } struct ib_mad_recv_wc * ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) return mad_recv_wc; if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); goto out; } switch (rmpp_mad->rmpp_hdr.rmpp_type) { case IB_MGMT_RMPP_TYPE_DATA: return process_rmpp_data(agent, mad_recv_wc); case IB_MGMT_RMPP_TYPE_ACK: process_rmpp_ack(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_STOP: process_rmpp_stop(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_ABORT: process_rmpp_abort(agent, mad_recv_wc); break; default: abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); break; } out: ib_free_recv_mad(mad_recv_wc); return NULL; } static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv; struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad; struct mad_rmpp_recv *rmpp_recv; struct rdma_ah_attr ah_attr; unsigned long flags; int newwin = 1; if (!(mad_hdr->method & IB_MGMT_METHOD_RESP)) goto out; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid != mad_hdr->tid || rmpp_recv->mgmt_class != mad_hdr->mgmt_class || rmpp_recv->class_version != mad_hdr->class_version || (rmpp_recv->method & IB_MGMT_METHOD_RESP)) continue; if (rdma_query_ah(mad_send_wr->send_buf.ah, &ah_attr)) continue; if (rmpp_recv->slid == rdma_ah_get_dlid(&ah_attr)) { newwin = rmpp_recv->repwin; break; } } spin_unlock_irqrestore(&agent->lock, flags); out: return newwin; } int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { mad_send_wr->seg_num = 1; return IB_RMPP_RESULT_INTERNAL; } mad_send_wr->newwin = init_newwin(mad_send_wr); /* We need to wait for the final ACK even if there isn't a response */ mad_send_wr->refcount += (mad_send_wr->timeout == 0); ret = send_next_seg(mad_send_wr); if (!ret) return IB_RMPP_RESULT_CONSUMED; return ret; } int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ if (mad_send_wc->status != IB_WC_SUCCESS || mad_send_wr->status != IB_WC_SUCCESS) return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */ if (!mad_send_wr->timeout) return IB_RMPP_RESULT_PROCESSED; /* Response received */ if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); return IB_RMPP_RESULT_PROCESSED; /* Send done */ } if (mad_send_wr->seg_num == mad_send_wr->newwin || mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */ ret = send_next_seg(mad_send_wr); if (ret) { mad_send_wc->status = IB_WC_GENERAL_ERR; return IB_RMPP_RESULT_PROCESSED; } return IB_RMPP_RESULT_CONSUMED; } int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) return IB_RMPP_RESULT_PROCESSED; mad_send_wr->seg_num = mad_send_wr->last_ack; mad_send_wr->cur_seg = mad_send_wr->last_ack_seg; ret = send_next_seg(mad_send_wr); if (ret) return IB_RMPP_RESULT_PROCESSED; return IB_RMPP_RESULT_CONSUMED; }
linux-master
drivers/infiniband/core/mad_rmpp.c
/* * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/rdma_user_ioctl.h> #include <rdma/uverbs_ioctl.h> #include "rdma_core.h" #include "uverbs.h" struct bundle_alloc_head { struct bundle_alloc_head *next; u8 data[]; }; struct bundle_priv { /* Must be first */ struct bundle_alloc_head alloc_head; struct bundle_alloc_head *allocated_mem; size_t internal_avail; size_t internal_used; struct radix_tree_root *radix; const struct uverbs_api_ioctl_method *method_elm; void __rcu **radix_slots; unsigned long radix_slots_len; u32 method_key; struct ib_uverbs_attr __user *user_attrs; struct ib_uverbs_attr *uattrs; DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN); DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN); DECLARE_BITMAP(uobj_hw_obj_valid, UVERBS_API_ATTR_BKEY_LEN); /* * Must be last. bundle ends in a flex array which overlaps * internal_buffer. */ struct uverbs_attr_bundle bundle; u64 internal_buffer[32]; }; /* * Each method has an absolute minimum amount of memory it needs to allocate, * precompute that amount and determine if the onstack memory can be used or * if allocation is need. */ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm, unsigned int num_attrs) { struct bundle_priv *pbundle; size_t bundle_size = offsetof(struct bundle_priv, internal_buffer) + sizeof(*pbundle->bundle.attrs) * method_elm->key_bitmap_len + sizeof(*pbundle->uattrs) * num_attrs; method_elm->use_stack = bundle_size <= sizeof(*pbundle); method_elm->bundle_size = ALIGN(bundle_size + 256, sizeof(*pbundle->internal_buffer)); /* Do not want order-2 allocations for this. */ WARN_ON_ONCE(method_elm->bundle_size > PAGE_SIZE); } /** * _uverbs_alloc() - Quickly allocate memory for use with a bundle * @bundle: The bundle * @size: Number of bytes to allocate * @flags: Allocator flags * * The bundle allocator is intended for allocations that are connected with * processing the system call related to the bundle. The allocated memory is * always freed once the system call completes, and cannot be freed any other * way. * * This tries to use a small pool of pre-allocated memory for performance. */ __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size, gfp_t flags) { struct bundle_priv *pbundle = container_of(bundle, struct bundle_priv, bundle); size_t new_used; void *res; if (check_add_overflow(size, pbundle->internal_used, &new_used)) return ERR_PTR(-EOVERFLOW); if (new_used > pbundle->internal_avail) { struct bundle_alloc_head *buf; buf = kvmalloc(struct_size(buf, data, size), flags); if (!buf) return ERR_PTR(-ENOMEM); buf->next = pbundle->allocated_mem; pbundle->allocated_mem = buf; return buf->data; } res = (void *)pbundle->internal_buffer + pbundle->internal_used; pbundle->internal_used = ALIGN(new_used, sizeof(*pbundle->internal_buffer)); if (want_init_on_alloc(flags)) memset(res, 0, size); return res; } EXPORT_SYMBOL(_uverbs_alloc); static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr, u16 len) { if (uattr->len > sizeof_field(struct ib_uverbs_attr, data)) return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len, uattr->len - len); return !memchr_inv((const void *)&uattr->data + len, 0, uattr->len - len); } static int uverbs_set_output(const struct uverbs_attr_bundle *bundle, const struct uverbs_attr *attr) { struct bundle_priv *pbundle = container_of(bundle, struct bundle_priv, bundle); u16 flags; flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | UVERBS_ATTR_F_VALID_OUTPUT; if (put_user(flags, &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) return -EFAULT; return 0; } static int uverbs_process_idrs_array(struct bundle_priv *pbundle, const struct uverbs_api_attr *attr_uapi, struct uverbs_objs_arr_attr *attr, struct ib_uverbs_attr *uattr, u32 attr_bkey) { const struct uverbs_attr_spec *spec = &attr_uapi->spec; size_t array_len; u32 *idr_vals; int ret = 0; size_t i; if (uattr->attr_data.reserved) return -EINVAL; if (uattr->len % sizeof(u32)) return -EINVAL; array_len = uattr->len / sizeof(u32); if (array_len < spec->u2.objs_arr.min_len || array_len > spec->u2.objs_arr.max_len) return -EINVAL; attr->uobjects = uverbs_alloc(&pbundle->bundle, array_size(array_len, sizeof(*attr->uobjects))); if (IS_ERR(attr->uobjects)) return PTR_ERR(attr->uobjects); /* * Since idr is 4B and *uobjects is >= 4B, we can use attr->uobjects * to store idrs array and avoid additional memory allocation. The * idrs array is offset to the end of the uobjects array so we will be * able to read idr and replace with a pointer. */ idr_vals = (u32 *)(attr->uobjects + array_len) - array_len; if (uattr->len > sizeof(uattr->data)) { ret = copy_from_user(idr_vals, u64_to_user_ptr(uattr->data), uattr->len); if (ret) return -EFAULT; } else { memcpy(idr_vals, &uattr->data, uattr->len); } for (i = 0; i != array_len; i++) { attr->uobjects[i] = uverbs_get_uobject_from_file( spec->u2.objs_arr.obj_type, spec->u2.objs_arr.access, idr_vals[i], &pbundle->bundle); if (IS_ERR(attr->uobjects[i])) { ret = PTR_ERR(attr->uobjects[i]); break; } } attr->len = i; __set_bit(attr_bkey, pbundle->spec_finalize); return ret; } static void uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi, struct uverbs_objs_arr_attr *attr, bool commit, struct uverbs_attr_bundle *attrs) { const struct uverbs_attr_spec *spec = &attr_uapi->spec; size_t i; for (i = 0; i != attr->len; i++) uverbs_finalize_object(attr->uobjects[i], spec->u2.objs_arr.access, false, commit, attrs); } static int uverbs_process_attr(struct bundle_priv *pbundle, const struct uverbs_api_attr *attr_uapi, struct ib_uverbs_attr *uattr, u32 attr_bkey) { const struct uverbs_attr_spec *spec = &attr_uapi->spec; struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey]; const struct uverbs_attr_spec *val_spec = spec; struct uverbs_obj_attr *o_attr; switch (spec->type) { case UVERBS_ATTR_TYPE_ENUM_IN: if (uattr->attr_data.enum_data.elem_id >= spec->u.enum_def.num_elems) return -EOPNOTSUPP; if (uattr->attr_data.enum_data.reserved) return -EINVAL; val_spec = &spec->u2.enum_def.ids[uattr->attr_data.enum_data.elem_id]; /* Currently we only support PTR_IN based enums */ if (val_spec->type != UVERBS_ATTR_TYPE_PTR_IN) return -EOPNOTSUPP; e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id; fallthrough; case UVERBS_ATTR_TYPE_PTR_IN: /* Ensure that any data provided by userspace beyond the known * struct is zero. Userspace that knows how to use some future * longer struct will fail here if used with an old kernel and * non-zero content, making ABI compat/discovery simpler. */ if (uattr->len > val_spec->u.ptr.len && val_spec->zero_trailing && !uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len)) return -EOPNOTSUPP; fallthrough; case UVERBS_ATTR_TYPE_PTR_OUT: if (uattr->len < val_spec->u.ptr.min_len || (!val_spec->zero_trailing && uattr->len > val_spec->u.ptr.len)) return -EINVAL; if (spec->type != UVERBS_ATTR_TYPE_ENUM_IN && uattr->attr_data.reserved) return -EINVAL; e->ptr_attr.uattr_idx = uattr - pbundle->uattrs; e->ptr_attr.len = uattr->len; if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) { void *p; p = uverbs_alloc(&pbundle->bundle, uattr->len); if (IS_ERR(p)) return PTR_ERR(p); e->ptr_attr.ptr = p; if (copy_from_user(p, u64_to_user_ptr(uattr->data), uattr->len)) return -EFAULT; } else { e->ptr_attr.data = uattr->data; } break; case UVERBS_ATTR_TYPE_IDR: case UVERBS_ATTR_TYPE_FD: if (uattr->attr_data.reserved) return -EINVAL; if (uattr->len != 0) return -EINVAL; o_attr = &e->obj_attr; o_attr->attr_elm = attr_uapi; /* * The type of uattr->data is u64 for UVERBS_ATTR_TYPE_IDR and * s64 for UVERBS_ATTR_TYPE_FD. We can cast the u64 to s64 * here without caring about truncation as we know that the * IDR implementation today rejects negative IDs */ o_attr->uobject = uverbs_get_uobject_from_file( spec->u.obj.obj_type, spec->u.obj.access, uattr->data_s64, &pbundle->bundle); if (IS_ERR(o_attr->uobject)) return PTR_ERR(o_attr->uobject); __set_bit(attr_bkey, pbundle->uobj_finalize); if (spec->u.obj.access == UVERBS_ACCESS_NEW) { unsigned int uattr_idx = uattr - pbundle->uattrs; s64 id = o_attr->uobject->id; /* Copy the allocated id to the user-space */ if (put_user(id, &pbundle->user_attrs[uattr_idx].data)) return -EFAULT; } break; case UVERBS_ATTR_TYPE_RAW_FD: if (uattr->attr_data.reserved || uattr->len != 0 || uattr->data_s64 < INT_MIN || uattr->data_s64 > INT_MAX) return -EINVAL; /* _uverbs_get_const_signed() is the accessor */ e->ptr_attr.data = uattr->data_s64; break; case UVERBS_ATTR_TYPE_IDRS_ARRAY: return uverbs_process_idrs_array(pbundle, attr_uapi, &e->objs_arr_attr, uattr, attr_bkey); default: return -EOPNOTSUPP; } return 0; } /* * We search the radix tree with the method prefix and now we want to fast * search the suffix bits to get a particular attribute pointer. It is not * totally clear to me if this breaks the radix tree encasulation or not, but * it uses the iter data to determine if the method iter points at the same * chunk that will store the attribute, if so it just derefs it directly. By * construction in most kernel configs the method and attrs will all fit in a * single radix chunk, so in most cases this will have no search. Other cases * this falls back to a full search. */ static void __rcu **uapi_get_attr_for_method(struct bundle_priv *pbundle, u32 attr_key) { void __rcu **slot; if (likely(attr_key < pbundle->radix_slots_len)) { void *entry; slot = pbundle->radix_slots + attr_key; entry = rcu_dereference_raw(*slot); if (likely(!radix_tree_is_internal_node(entry) && entry)) return slot; } return radix_tree_lookup_slot(pbundle->radix, pbundle->method_key | attr_key); } static int uverbs_set_attr(struct bundle_priv *pbundle, struct ib_uverbs_attr *uattr) { u32 attr_key = uapi_key_attr(uattr->attr_id); u32 attr_bkey = uapi_bkey_attr(attr_key); const struct uverbs_api_attr *attr; void __rcu **slot; int ret; slot = uapi_get_attr_for_method(pbundle, attr_key); if (!slot) { /* * Kernel does not support the attribute but user-space says it * is mandatory */ if (uattr->flags & UVERBS_ATTR_F_MANDATORY) return -EPROTONOSUPPORT; return 0; } attr = rcu_dereference_protected(*slot, true); /* Reject duplicate attributes from user-space */ if (test_bit(attr_bkey, pbundle->bundle.attr_present)) return -EINVAL; ret = uverbs_process_attr(pbundle, attr, uattr, attr_bkey); if (ret) return ret; __set_bit(attr_bkey, pbundle->bundle.attr_present); return 0; } static int ib_uverbs_run_method(struct bundle_priv *pbundle, unsigned int num_attrs) { int (*handler)(struct uverbs_attr_bundle *attrs); size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs); unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey; unsigned int i; int ret; /* See uverbs_disassociate_api() */ handler = srcu_dereference( pbundle->method_elm->handler, &pbundle->bundle.ufile->device->disassociate_srcu); if (!handler) return -EIO; pbundle->uattrs = uverbs_alloc(&pbundle->bundle, uattrs_size); if (IS_ERR(pbundle->uattrs)) return PTR_ERR(pbundle->uattrs); if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size)) return -EFAULT; for (i = 0; i != num_attrs; i++) { ret = uverbs_set_attr(pbundle, &pbundle->uattrs[i]); if (unlikely(ret)) return ret; } /* User space did not provide all the mandatory attributes */ if (unlikely(!bitmap_subset(pbundle->method_elm->attr_mandatory, pbundle->bundle.attr_present, pbundle->method_elm->key_bitmap_len))) return -EINVAL; if (pbundle->method_elm->has_udata) uverbs_fill_udata(&pbundle->bundle, &pbundle->bundle.driver_udata, UVERBS_ATTR_UHW_IN, UVERBS_ATTR_UHW_OUT); else pbundle->bundle.driver_udata = (struct ib_udata){}; if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) { struct uverbs_obj_attr *destroy_attr = &pbundle->bundle.attrs[destroy_bkey].obj_attr; ret = uobj_destroy(destroy_attr->uobject, &pbundle->bundle); if (ret) return ret; __clear_bit(destroy_bkey, pbundle->uobj_finalize); ret = handler(&pbundle->bundle); uobj_put_destroy(destroy_attr->uobject); } else { ret = handler(&pbundle->bundle); } /* * Until the drivers are revised to use the bundle directly we have to * assume that the driver wrote to its UHW_OUT and flag userspace * appropriately. */ if (!ret && pbundle->method_elm->has_udata) { const struct uverbs_attr *attr = uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT); if (!IS_ERR(attr)) ret = uverbs_set_output(&pbundle->bundle, attr); } /* * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can * not invoke the method because the request is not supported. No * other cases should return this code. */ if (WARN_ON_ONCE(ret == -EPROTONOSUPPORT)) return -EINVAL; return ret; } static void bundle_destroy(struct bundle_priv *pbundle, bool commit) { unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len; struct bundle_alloc_head *memblock; unsigned int i; /* fast path for simple uobjects */ i = -1; while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len, i + 1)) < key_bitmap_len) { struct uverbs_attr *attr = &pbundle->bundle.attrs[i]; uverbs_finalize_object( attr->obj_attr.uobject, attr->obj_attr.attr_elm->spec.u.obj.access, test_bit(i, pbundle->uobj_hw_obj_valid), commit, &pbundle->bundle); } i = -1; while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len, i + 1)) < key_bitmap_len) { struct uverbs_attr *attr = &pbundle->bundle.attrs[i]; const struct uverbs_api_attr *attr_uapi; void __rcu **slot; slot = uapi_get_attr_for_method( pbundle, pbundle->method_key | uapi_bkey_to_key_attr(i)); if (WARN_ON(!slot)) continue; attr_uapi = rcu_dereference_protected(*slot, true); if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) { uverbs_free_idrs_array(attr_uapi, &attr->objs_arr_attr, commit, &pbundle->bundle); } } for (memblock = pbundle->allocated_mem; memblock;) { struct bundle_alloc_head *tmp = memblock; memblock = memblock->next; kvfree(tmp); } } static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile, struct ib_uverbs_ioctl_hdr *hdr, struct ib_uverbs_attr __user *user_attrs) { const struct uverbs_api_ioctl_method *method_elm; struct uverbs_api *uapi = ufile->device->uapi; struct radix_tree_iter attrs_iter; struct bundle_priv *pbundle; struct bundle_priv onstack; void __rcu **slot; int ret; if (unlikely(hdr->driver_id != uapi->driver_id)) return -EINVAL; slot = radix_tree_iter_lookup( &uapi->radix, &attrs_iter, uapi_key_obj(hdr->object_id) | uapi_key_ioctl_method(hdr->method_id)); if (unlikely(!slot)) return -EPROTONOSUPPORT; method_elm = rcu_dereference_protected(*slot, true); if (!method_elm->use_stack) { pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL); if (!pbundle) return -ENOMEM; pbundle->internal_avail = method_elm->bundle_size - offsetof(struct bundle_priv, internal_buffer); pbundle->alloc_head.next = NULL; pbundle->allocated_mem = &pbundle->alloc_head; } else { pbundle = &onstack; pbundle->internal_avail = sizeof(pbundle->internal_buffer); pbundle->allocated_mem = NULL; } /* Space for the pbundle->bundle.attrs flex array */ pbundle->method_elm = method_elm; pbundle->method_key = attrs_iter.index; pbundle->bundle.ufile = ufile; pbundle->bundle.context = NULL; /* only valid if bundle has uobject */ pbundle->radix = &uapi->radix; pbundle->radix_slots = slot; pbundle->radix_slots_len = radix_tree_chunk_size(&attrs_iter); pbundle->user_attrs = user_attrs; pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len * sizeof(*pbundle->bundle.attrs), sizeof(*pbundle->internal_buffer)); memset(pbundle->bundle.attr_present, 0, sizeof(pbundle->bundle.attr_present)); memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize)); memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize)); memset(pbundle->uobj_hw_obj_valid, 0, sizeof(pbundle->uobj_hw_obj_valid)); ret = ib_uverbs_run_method(pbundle, hdr->num_attrs); bundle_destroy(pbundle, ret == 0); return ret; } long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_ioctl_hdr __user *user_hdr = (struct ib_uverbs_ioctl_hdr __user *)arg; struct ib_uverbs_ioctl_hdr hdr; int srcu_key; int err; if (unlikely(cmd != RDMA_VERBS_IOCTL)) return -ENOIOCTLCMD; err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); if (err) return -EFAULT; if (hdr.length > PAGE_SIZE || hdr.length != struct_size(&hdr, attrs, hdr.num_attrs)) return -EINVAL; if (hdr.reserved1 || hdr.reserved2) return -EPROTONOSUPPORT; srcu_key = srcu_read_lock(&file->device->disassociate_srcu); err = ib_uverbs_cmd_verbs(file, &hdr, user_hdr->attrs); srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); return err; } int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, u64 allowed_bits) { const struct uverbs_attr *attr; u64 flags; attr = uverbs_attr_get(attrs_bundle, idx); /* Missing attribute means 0 flags */ if (IS_ERR(attr)) { *to = 0; return 0; } /* * New userspace code should use 8 bytes to pass flags, but we * transparently support old userspaces that were using 4 bytes as * well. */ if (attr->ptr_attr.len == 8) flags = attr->ptr_attr.data; else if (attr->ptr_attr.len == 4) flags = *(u32 *)&attr->ptr_attr.data; else return -EINVAL; if (flags & ~allowed_bits) return -EINVAL; *to = flags; return 0; } EXPORT_SYMBOL(uverbs_get_flags64); int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, u64 allowed_bits) { u64 flags; int ret; ret = uverbs_get_flags64(&flags, attrs_bundle, idx, allowed_bits); if (ret) return ret; if (flags > U32_MAX) return -EINVAL; *to = flags; return 0; } EXPORT_SYMBOL(uverbs_get_flags32); /* * Fill a ib_udata struct (core or uhw) using the given attribute IDs. * This is primarily used to convert the UVERBS_ATTR_UHW() into the * ib_udata format used by the drivers. */ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, struct ib_udata *udata, unsigned int attr_in, unsigned int attr_out) { struct bundle_priv *pbundle = container_of(bundle, struct bundle_priv, bundle); const struct uverbs_attr *in = uverbs_attr_get(&pbundle->bundle, attr_in); const struct uverbs_attr *out = uverbs_attr_get(&pbundle->bundle, attr_out); if (!IS_ERR(in)) { udata->inlen = in->ptr_attr.len; if (uverbs_attr_ptr_is_inline(in)) udata->inbuf = &pbundle->user_attrs[in->ptr_attr.uattr_idx] .data; else udata->inbuf = u64_to_user_ptr(in->ptr_attr.data); } else { udata->inbuf = NULL; udata->inlen = 0; } if (!IS_ERR(out)) { udata->outbuf = u64_to_user_ptr(out->ptr_attr.data); udata->outlen = out->ptr_attr.len; } else { udata->outbuf = NULL; udata->outlen = 0; } } int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, const void *from, size_t size) { const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); size_t min_size; if (IS_ERR(attr)) return PTR_ERR(attr); min_size = min_t(size_t, attr->ptr_attr.len, size); if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) return -EFAULT; return uverbs_set_output(bundle, attr); } EXPORT_SYMBOL(uverbs_copy_to); /* * This is only used if the caller has directly used copy_to_use to write the * data. It signals to user space that the buffer is filled in. */ int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx) { const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); if (IS_ERR(attr)) return PTR_ERR(attr); return uverbs_set_output(bundle, attr); } int _uverbs_get_const_signed(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, s64 lower_bound, u64 upper_bound, s64 *def_val) { const struct uverbs_attr *attr; attr = uverbs_attr_get(attrs_bundle, idx); if (IS_ERR(attr)) { if ((PTR_ERR(attr) != -ENOENT) || !def_val) return PTR_ERR(attr); *to = *def_val; } else { *to = attr->ptr_attr.data; } if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound)) return -EINVAL; return 0; } EXPORT_SYMBOL(_uverbs_get_const_signed); int _uverbs_get_const_unsigned(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, u64 upper_bound, u64 *def_val) { const struct uverbs_attr *attr; attr = uverbs_attr_get(attrs_bundle, idx); if (IS_ERR(attr)) { if ((PTR_ERR(attr) != -ENOENT) || !def_val) return PTR_ERR(attr); *to = *def_val; } else { *to = attr->ptr_attr.data; } if (*to > upper_bound) return -EINVAL; return 0; } EXPORT_SYMBOL(_uverbs_get_const_unsigned); int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, size_t idx, const void *from, size_t size) { const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); if (IS_ERR(attr)) return PTR_ERR(attr); if (size < attr->ptr_attr.len) { if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size, attr->ptr_attr.len - size)) return -EFAULT; } return uverbs_copy_to(bundle, idx, from, size); } EXPORT_SYMBOL(uverbs_copy_to_struct_or_zero); /* Once called an abort will call through to the type's destroy_hw() */ void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle, u16 idx) { struct bundle_priv *pbundle = container_of(bundle, struct bundle_priv, bundle); __set_bit(uapi_bkey_attr(uapi_key_attr(idx)), pbundle->uobj_hw_obj_valid); } EXPORT_SYMBOL(uverbs_finalize_uobj_create);
linux-master
drivers/infiniband/core/uverbs_ioctl.c
/* * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/uverbs_std_types.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_verbs.h> #include <linux/bug.h> #include <linux/file.h> #include <rdma/restrack.h> #include "rdma_core.h" #include "uverbs.h" static int uverbs_free_ah(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { return rdma_destroy_ah_user((struct ib_ah *)uobject->object, RDMA_DESTROY_AH_SLEEPABLE, &attrs->driver_udata); } static int uverbs_free_flow(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_flow *flow = (struct ib_flow *)uobject->object; struct ib_uflow_object *uflow = container_of(uobject, struct ib_uflow_object, uobject); struct ib_qp *qp = flow->qp; int ret; ret = flow->device->ops.destroy_flow(flow); if (!ret) { if (qp) atomic_dec(&qp->usecnt); ib_uverbs_flow_resources_free(uflow->resources); } return ret; } static int uverbs_free_mw(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { return uverbs_dealloc_mw((struct ib_mw *)uobject->object); } static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object; struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl; u32 table_size = (1 << rwq_ind_tbl->log_ind_tbl_size); int ret, i; if (atomic_read(&rwq_ind_tbl->usecnt)) return -EBUSY; ret = rwq_ind_tbl->device->ops.destroy_rwq_ind_table(rwq_ind_tbl); if (ret) return ret; for (i = 0; i < table_size; i++) atomic_dec(&ind_tbl[i]->usecnt); kfree(rwq_ind_tbl); kfree(ind_tbl); return 0; } static int uverbs_free_xrcd(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_xrcd *xrcd = uobject->object; struct ib_uxrcd_object *uxrcd = container_of(uobject, struct ib_uxrcd_object, uobject); int ret; if (atomic_read(&uxrcd->refcnt)) return -EBUSY; mutex_lock(&attrs->ufile->device->xrcd_tree_mutex); ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why, attrs); mutex_unlock(&attrs->ufile->device->xrcd_tree_mutex); return ret; } static int uverbs_free_pd(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_pd *pd = uobject->object; if (atomic_read(&pd->usecnt)) return -EBUSY; return ib_dealloc_pd_user(pd, &attrs->driver_udata); } void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue) { struct ib_uverbs_event *entry, *tmp; spin_lock_irq(&event_queue->lock); /* * The user must ensure that no new items are added to the event_list * once is_closed is set. */ event_queue->is_closed = 1; spin_unlock_irq(&event_queue->lock); wake_up_interruptible(&event_queue->poll_wait); kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN); spin_lock_irq(&event_queue->lock); list_for_each_entry_safe(entry, tmp, &event_queue->event_list, list) { if (entry->counter) list_del(&entry->obj_list); list_del(&entry->list); kfree(entry); } spin_unlock_irq(&event_queue->lock); } static void uverbs_completion_event_file_destroy_uobj(struct ib_uobject *uobj, enum rdma_remove_reason why) { struct ib_uverbs_completion_event_file *file = container_of(uobj, struct ib_uverbs_completion_event_file, uobj); ib_uverbs_free_event_queue(&file->ev_queue); } int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs) { return 0; } EXPORT_SYMBOL(uverbs_destroy_def_handler); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_COMP_CHANNEL, UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file), uverbs_completion_event_file_destroy_uobj, &uverbs_event_fops, "[infinibandevent]", O_RDONLY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_MW_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MW_HANDLE, UVERBS_OBJECT_MW, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW, UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw), &UVERBS_METHOD(UVERBS_METHOD_MW_DESTROY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_AH_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_AH_HANDLE, UVERBS_OBJECT_AH, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_AH, UVERBS_TYPE_ALLOC_IDR(uverbs_free_ah), &UVERBS_METHOD(UVERBS_METHOD_AH_DESTROY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_FLOW_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_HANDLE, UVERBS_OBJECT_FLOW, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_FLOW, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object), uverbs_free_flow), &UVERBS_METHOD(UVERBS_METHOD_FLOW_DESTROY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_RWQ_IND_TBL_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_RWQ_IND_TBL_HANDLE, UVERBS_OBJECT_RWQ_IND_TBL, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL, UVERBS_TYPE_ALLOC_IDR(uverbs_free_rwq_ind_tbl), &UVERBS_METHOD(UVERBS_METHOD_RWQ_IND_TBL_DESTROY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_XRCD_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_XRCD_HANDLE, UVERBS_OBJECT_XRCD, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_XRCD, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), uverbs_free_xrcd), &UVERBS_METHOD(UVERBS_METHOD_XRCD_DESTROY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_PD_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_PD_HANDLE, UVERBS_OBJECT_PD, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_PD, UVERBS_TYPE_ALLOC_IDR(uverbs_free_pd), &UVERBS_METHOD(UVERBS_METHOD_PD_DESTROY)); const struct uapi_definition uverbs_def_obj_intf[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_PD, UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COMP_CHANNEL, UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_AH, UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MW, UAPI_DEF_OBJ_NEEDS_FN(dealloc_mw)), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_FLOW, UAPI_DEF_OBJ_NEEDS_FN(destroy_flow)), UAPI_DEF_CHAIN_OBJ_TREE_NAMED( UVERBS_OBJECT_RWQ_IND_TBL, UAPI_DEF_OBJ_NEEDS_FN(destroy_rwq_ind_table)), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_XRCD, UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)), {} };
linux-master
drivers/infiniband/core/uverbs_std_types.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2020 Mellanox Technologies. All rights reserved. */ #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> #include <rdma/lag.h> static struct sk_buff *rdma_build_skb(struct net_device *netdev, struct rdma_ah_attr *ah_attr, gfp_t flags) { struct ipv6hdr *ip6h; struct sk_buff *skb; struct ethhdr *eth; struct iphdr *iph; struct udphdr *uh; u8 smac[ETH_ALEN]; bool is_ipv4; int hdr_len; is_ipv4 = ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw); hdr_len = ETH_HLEN + sizeof(struct udphdr) + LL_RESERVED_SPACE(netdev); hdr_len += is_ipv4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr); skb = alloc_skb(hdr_len, flags); if (!skb) return NULL; skb->dev = netdev; skb_reserve(skb, hdr_len); skb_push(skb, sizeof(struct udphdr)); skb_reset_transport_header(skb); uh = udp_hdr(skb); uh->source = htons(rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label)); uh->dest = htons(ROCE_V2_UDP_DPORT); uh->len = htons(sizeof(struct udphdr)); if (is_ipv4) { skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->frag_off = 0; iph->version = 4; iph->protocol = IPPROTO_UDP; iph->ihl = 0x5; iph->tot_len = htons(sizeof(struct udphdr) + sizeof(struct iphdr)); memcpy(&iph->saddr, ah_attr->grh.sgid_attr->gid.raw + 12, sizeof(struct in_addr)); memcpy(&iph->daddr, ah_attr->grh.dgid.raw + 12, sizeof(struct in_addr)); } else { skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ip6h = ipv6_hdr(skb); ip6h->version = 6; ip6h->nexthdr = IPPROTO_UDP; memcpy(&ip6h->flow_lbl, &ah_attr->grh.flow_label, sizeof(*ip6h->flow_lbl)); memcpy(&ip6h->saddr, ah_attr->grh.sgid_attr->gid.raw, sizeof(struct in6_addr)); memcpy(&ip6h->daddr, ah_attr->grh.dgid.raw, sizeof(struct in6_addr)); } skb_push(skb, sizeof(struct ethhdr)); skb_reset_mac_header(skb); eth = eth_hdr(skb); skb->protocol = eth->h_proto = htons(is_ipv4 ? ETH_P_IP : ETH_P_IPV6); rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, NULL, smac); memcpy(eth->h_source, smac, ETH_ALEN); memcpy(eth->h_dest, ah_attr->roce.dmac, ETH_ALEN); return skb; } static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device, struct net_device *master, struct rdma_ah_attr *ah_attr, gfp_t flags) { struct net_device *slave; struct sk_buff *skb; skb = rdma_build_skb(master, ah_attr, flags); if (!skb) return ERR_PTR(-ENOMEM); rcu_read_lock(); slave = netdev_get_xmit_slave(master, skb, !!(device->lag_flags & RDMA_LAG_FLAGS_HASH_ALL_SLAVES)); if (slave) dev_hold(slave); rcu_read_unlock(); kfree_skb(skb); return slave; } void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave) { if (xmit_slave) dev_put(xmit_slave); } struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device, struct rdma_ah_attr *ah_attr, gfp_t flags) { struct net_device *slave = NULL; struct net_device *master; if (!(ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE && ah_attr->grh.sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && ah_attr->grh.flow_label)) return NULL; rcu_read_lock(); master = rdma_read_gid_attr_ndev_rcu(ah_attr->grh.sgid_attr); if (IS_ERR(master)) { rcu_read_unlock(); return master; } dev_hold(master); rcu_read_unlock(); if (!netif_is_bond_master(master)) goto put; slave = rdma_get_xmit_slave_udp(device, master, ah_attr, flags); put: dev_put(master); return slave; }
linux-master
drivers/infiniband/core/lag.c
/* * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "core_priv.h" #include <linux/in.h> #include <linux/in6.h> /* For in6_dev_get/in6_dev_put */ #include <net/addrconf.h> #include <net/bonding.h> #include <rdma/ib_cache.h> #include <rdma/ib_addr.h> static struct workqueue_struct *gid_cache_wq; enum gid_op_type { GID_DEL = 0, GID_ADD }; struct update_gid_event_work { struct work_struct work; union ib_gid gid; struct ib_gid_attr gid_attr; enum gid_op_type gid_op; }; #define ROCE_NETDEV_CALLBACK_SZ 3 struct netdev_event_work_cmd { roce_netdev_callback cb; roce_netdev_filter filter; struct net_device *ndev; struct net_device *filter_ndev; }; struct netdev_event_work { struct work_struct work; struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ]; }; static const struct { bool (*is_supported)(const struct ib_device *device, u32 port_num); enum ib_gid_type gid_type; } PORT_CAP_TO_GID_TYPE[] = { {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE}, {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP}, }; #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE) unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port) { int i; unsigned int ret_flags = 0; if (!rdma_protocol_roce(ib_dev, port)) return 1UL << IB_GID_TYPE_IB; for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++) if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port)) ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type; return ret_flags; } EXPORT_SYMBOL(roce_gid_type_mask_support); static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *gid_attr) { int i; unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port); for (i = 0; i < IB_GID_TYPE_SIZE; i++) { if ((1UL << i) & gid_type_mask) { gid_attr->gid_type = i; switch (gid_op) { case GID_ADD: ib_cache_gid_add(ib_dev, port, gid, gid_attr); break; case GID_DEL: ib_cache_gid_del(ib_dev, port, gid, gid_attr); break; } } } } enum bonding_slave_state { BONDING_SLAVE_STATE_ACTIVE = 1UL << 0, BONDING_SLAVE_STATE_INACTIVE = 1UL << 1, /* No primary slave or the device isn't a slave in bonding */ BONDING_SLAVE_STATE_NA = 1UL << 2, }; static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev, struct net_device *upper) { if (upper && netif_is_bond_master(upper)) { struct net_device *pdev = bond_option_active_slave_get_rcu(netdev_priv(upper)); if (pdev) return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE : BONDING_SLAVE_STATE_INACTIVE; } return BONDING_SLAVE_STATE_NA; } #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \ BONDING_SLAVE_STATE_NA) static bool is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *real_dev; bool res; if (!rdma_ndev) return false; rcu_read_lock(); real_dev = rdma_vlan_dev_real_dev(cookie); if (!real_dev) real_dev = cookie; res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) && (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) & REQUIRED_BOND_STATES)) || real_dev == rdma_ndev); rcu_read_unlock(); return res; } static bool is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *master_dev; bool res; if (!rdma_ndev) return false; rcu_read_lock(); master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev); res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) == BONDING_SLAVE_STATE_INACTIVE; rcu_read_unlock(); return res; } /** * is_ndev_for_default_gid_filter - Check if a given netdevice * can be considered for default GIDs or not. * @ib_dev: IB device to check * @port: Port to consider for adding default GID * @rdma_ndev: rdma netdevice pointer * @cookie: Netdevice to consider to form a default GID * * is_ndev_for_default_gid_filter() returns true if a given netdevice can be * considered for deriving default RoCE GID, returns false otherwise. */ static bool is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *cookie_ndev = cookie; bool res; if (!rdma_ndev) return false; rcu_read_lock(); /* * When rdma netdevice is used in bonding, bonding master netdevice * should be considered for default GIDs. Therefore, ignore slave rdma * netdevices when bonding is considered. * Additionally when event(cookie) netdevice is bond master device, * make sure that it the upper netdevice of rdma netdevice. */ res = ((cookie_ndev == rdma_ndev && !netif_is_bond_slave(rdma_ndev)) || (netif_is_bond_master(cookie_ndev) && rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))); rcu_read_unlock(); return res; } static bool pass_all_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { return true; } static bool upper_device_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { bool res; if (!rdma_ndev) return false; if (rdma_ndev == cookie) return true; rcu_read_lock(); res = rdma_is_upper_dev_rcu(rdma_ndev, cookie); rcu_read_unlock(); return res; } /** * is_upper_ndev_bond_master_filter - Check if a given netdevice * is bond master device of netdevice of the RDMA device of port. * @ib_dev: IB device to check * @port: Port to consider for adding default GID * @rdma_ndev: Pointer to rdma netdevice * @cookie: Netdevice to consider to form a default GID * * is_upper_ndev_bond_master_filter() returns true if a cookie_netdev * is bond master device and rdma_ndev is its lower netdevice. It might * not have been established as slave device yet. */ static bool is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *cookie_ndev = cookie; bool match = false; if (!rdma_ndev) return false; rcu_read_lock(); if (netif_is_bond_master(cookie_ndev) && rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)) match = true; rcu_read_unlock(); return match; } static void update_gid_ip(enum gid_op_type gid_op, struct ib_device *ib_dev, u32 port, struct net_device *ndev, struct sockaddr *addr) { union ib_gid gid; struct ib_gid_attr gid_attr; rdma_ip2gid(addr, &gid); memset(&gid_attr, 0, sizeof(gid_attr)); gid_attr.ndev = ndev; update_gid(gid_op, ib_dev, port, &gid, &gid_attr); } static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, struct net_device *event_ndev) { struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev); unsigned long gid_type_mask; if (!rdma_ndev) return; if (!real_dev) real_dev = event_ndev; rcu_read_lock(); if (((rdma_ndev != event_ndev && !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) || is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) == BONDING_SLAVE_STATE_INACTIVE)) { rcu_read_unlock(); return; } rcu_read_unlock(); gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask, IB_CACHE_GID_DEFAULT_MODE_DELETE); } static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { const struct in_ifaddr *ifa; struct in_device *in_dev; struct sin_list { struct list_head list; struct sockaddr_in ip; }; struct sin_list *sin_iter; struct sin_list *sin_temp; LIST_HEAD(sin_list); if (ndev->reg_state >= NETREG_UNREGISTERING) return; rcu_read_lock(); in_dev = __in_dev_get_rcu(ndev); if (!in_dev) { rcu_read_unlock(); return; } in_dev_for_each_ifa_rcu(ifa, in_dev) { struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) continue; entry->ip.sin_family = AF_INET; entry->ip.sin_addr.s_addr = ifa->ifa_address; list_add_tail(&entry->list, &sin_list); } rcu_read_unlock(); list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) { update_gid_ip(GID_ADD, ib_dev, port, ndev, (struct sockaddr *)&sin_iter->ip); list_del(&sin_iter->list); kfree(sin_iter); } } static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { struct inet6_ifaddr *ifp; struct inet6_dev *in6_dev; struct sin6_list { struct list_head list; struct sockaddr_in6 sin6; }; struct sin6_list *sin6_iter; struct sin6_list *sin6_temp; struct ib_gid_attr gid_attr = {.ndev = ndev}; LIST_HEAD(sin6_list); if (ndev->reg_state >= NETREG_UNREGISTERING) return; in6_dev = in6_dev_get(ndev); if (!in6_dev) return; read_lock_bh(&in6_dev->lock); list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) continue; entry->sin6.sin6_family = AF_INET6; entry->sin6.sin6_addr = ifp->addr; list_add_tail(&entry->list, &sin6_list); } read_unlock_bh(&in6_dev->lock); in6_dev_put(in6_dev); list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) { union ib_gid gid; rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid); update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr); list_del(&sin6_iter->list); kfree(sin6_iter); } } static void _add_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { enum_netdev_ipv4_ips(ib_dev, port, ndev); if (IS_ENABLED(CONFIG_IPV6)) enum_netdev_ipv6_ips(ib_dev, port, ndev); } static void add_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { _add_netdev_ips(ib_dev, port, cookie); } static void del_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie); } /** * del_default_gids - Delete default GIDs of the event/cookie netdevice * @ib_dev: RDMA device pointer * @port: Port of the RDMA device whose GID table to consider * @rdma_ndev: Unused rdma netdevice * @cookie: Pointer to event netdevice * * del_default_gids() deletes the default GIDs of the event/cookie netdevice. */ static void del_default_gids(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *cookie_ndev = cookie; unsigned long gid_type_mask; gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask, IB_CACHE_GID_DEFAULT_MODE_DELETE); } static void add_default_gids(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *event_ndev = cookie; unsigned long gid_type_mask; gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask, IB_CACHE_GID_DEFAULT_MODE_SET); } static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net *net; struct net_device *ndev; /* Lock the rtnl to make sure the netdevs does not move under * our feet */ rtnl_lock(); down_read(&net_rwsem); for_each_net(net) for_each_netdev(net, ndev) { /* * Filter and add default GIDs of the primary netdevice * when not in bonding mode, or add default GIDs * of bond master device, when in bonding mode. */ if (is_ndev_for_default_gid_filter(ib_dev, port, rdma_ndev, ndev)) add_default_gids(ib_dev, port, rdma_ndev, ndev); if (is_eth_port_of_netdev_filter(ib_dev, port, rdma_ndev, ndev)) _add_netdev_ips(ib_dev, port, ndev); } up_read(&net_rwsem); rtnl_unlock(); } /** * rdma_roce_rescan_device - Rescan all of the network devices in the system * and add their gids, as needed, to the relevant RoCE devices. * * @ib_dev: the rdma device */ void rdma_roce_rescan_device(struct ib_device *ib_dev) { ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL, enum_all_gids_of_dev_cb, NULL); } EXPORT_SYMBOL(rdma_roce_rescan_device); static void callback_for_addr_gid_device_scan(struct ib_device *device, u32 port, struct net_device *rdma_ndev, void *cookie) { struct update_gid_event_work *parsed = cookie; return update_gid(parsed->gid_op, device, port, &parsed->gid, &parsed->gid_attr); } struct upper_list { struct list_head list; struct net_device *upper; }; static int netdev_upper_walk(struct net_device *upper, struct netdev_nested_priv *priv) { struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); struct list_head *upper_list = (struct list_head *)priv->data; if (!entry) return 0; list_add_tail(&entry->list, upper_list); dev_hold(upper); entry->upper = upper; return 0; } static void handle_netdev_upper(struct ib_device *ib_dev, u32 port, void *cookie, void (*handle_netdev)(struct ib_device *ib_dev, u32 port, struct net_device *ndev)) { struct net_device *ndev = cookie; struct netdev_nested_priv priv; struct upper_list *upper_iter; struct upper_list *upper_temp; LIST_HEAD(upper_list); priv.data = &upper_list; rcu_read_lock(); netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &priv); rcu_read_unlock(); handle_netdev(ib_dev, port, ndev); list_for_each_entry_safe(upper_iter, upper_temp, &upper_list, list) { handle_netdev(ib_dev, port, upper_iter->upper); dev_put(upper_iter->upper); list_del(&upper_iter->list); kfree(upper_iter); } } static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, struct net_device *event_ndev) { ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev); } static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids); } static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips); } static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *master_ndev; rcu_read_lock(); master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev); if (master_ndev) dev_hold(master_ndev); rcu_read_unlock(); if (master_ndev) { bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev, master_ndev); dev_put(master_ndev); } } /* The following functions operate on all IB devices. netdevice_event and * addr_event execute ib_enum_all_roce_netdevs through a work. * ib_enum_all_roce_netdevs iterates through all IB devices. */ static void netdevice_event_work_handler(struct work_struct *_work) { struct netdev_event_work *work = container_of(_work, struct netdev_event_work, work); unsigned int i; for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) { ib_enum_all_roce_netdevs(work->cmds[i].filter, work->cmds[i].filter_ndev, work->cmds[i].cb, work->cmds[i].ndev); dev_put(work->cmds[i].ndev); dev_put(work->cmds[i].filter_ndev); } kfree(work); } static int netdevice_queue_work(struct netdev_event_work_cmd *cmds, struct net_device *ndev) { unsigned int i; struct netdev_event_work *ndev_work = kmalloc(sizeof(*ndev_work), GFP_KERNEL); if (!ndev_work) return NOTIFY_DONE; memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds)); for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) { if (!ndev_work->cmds[i].ndev) ndev_work->cmds[i].ndev = ndev; if (!ndev_work->cmds[i].filter_ndev) ndev_work->cmds[i].filter_ndev = ndev; dev_hold(ndev_work->cmds[i].ndev); dev_hold(ndev_work->cmds[i].filter_ndev); } INIT_WORK(&ndev_work->work, netdevice_event_work_handler); queue_work(gid_cache_wq, &ndev_work->work); return NOTIFY_DONE; } static const struct netdev_event_work_cmd add_cmd = { .cb = add_netdev_ips, .filter = is_eth_port_of_netdev_filter }; static const struct netdev_event_work_cmd add_cmd_upper_ips = { .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev_filter }; static void ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info, struct netdev_event_work_cmd *cmds) { static const struct netdev_event_work_cmd upper_ips_del_cmd = { .cb = del_netdev_upper_ips, .filter = upper_device_filter }; cmds[0] = upper_ips_del_cmd; cmds[0].ndev = changeupper_info->upper_dev; cmds[1] = add_cmd; } static const struct netdev_event_work_cmd bonding_default_add_cmd = { .cb = add_default_gids, .filter = is_upper_ndev_bond_master_filter }; static void ndev_event_link(struct net_device *event_ndev, struct netdev_notifier_changeupper_info *changeupper_info, struct netdev_event_work_cmd *cmds) { static const struct netdev_event_work_cmd bonding_default_del_cmd = { .cb = del_default_gids, .filter = is_upper_ndev_bond_master_filter }; /* * When a lower netdev is linked to its upper bonding * netdev, delete lower slave netdev's default GIDs. */ cmds[0] = bonding_default_del_cmd; cmds[0].ndev = event_ndev; cmds[0].filter_ndev = changeupper_info->upper_dev; /* Now add bonding upper device default GIDs */ cmds[1] = bonding_default_add_cmd; cmds[1].ndev = changeupper_info->upper_dev; cmds[1].filter_ndev = changeupper_info->upper_dev; /* Now add bonding upper device IP based GIDs */ cmds[2] = add_cmd_upper_ips; cmds[2].ndev = changeupper_info->upper_dev; cmds[2].filter_ndev = changeupper_info->upper_dev; } static void netdevice_event_changeupper(struct net_device *event_ndev, struct netdev_notifier_changeupper_info *changeupper_info, struct netdev_event_work_cmd *cmds) { if (changeupper_info->linking) ndev_event_link(event_ndev, changeupper_info, cmds); else ndev_event_unlink(changeupper_info, cmds); } static const struct netdev_event_work_cmd add_default_gid_cmd = { .cb = add_default_gids, .filter = is_ndev_for_default_gid_filter, }; static int netdevice_event(struct notifier_block *this, unsigned long event, void *ptr) { static const struct netdev_event_work_cmd del_cmd = { .cb = del_netdev_ips, .filter = pass_all_filter}; static const struct netdev_event_work_cmd bonding_default_del_cmd_join = { .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave_filter }; static const struct netdev_event_work_cmd netdev_del_cmd = { .cb = del_netdev_ips, .filter = is_eth_port_of_netdev_filter }; static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = { .cb = del_netdev_upper_ips, .filter = upper_device_filter}; struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} }; if (ndev->type != ARPHRD_ETHER) return NOTIFY_DONE; switch (event) { case NETDEV_REGISTER: case NETDEV_UP: cmds[0] = bonding_default_del_cmd_join; cmds[1] = add_default_gid_cmd; cmds[2] = add_cmd; break; case NETDEV_UNREGISTER: if (ndev->reg_state < NETREG_UNREGISTERED) cmds[0] = del_cmd; else return NOTIFY_DONE; break; case NETDEV_CHANGEADDR: cmds[0] = netdev_del_cmd; if (ndev->reg_state == NETREG_REGISTERED) { cmds[1] = add_default_gid_cmd; cmds[2] = add_cmd; } break; case NETDEV_CHANGEUPPER: netdevice_event_changeupper(ndev, container_of(ptr, struct netdev_notifier_changeupper_info, info), cmds); break; case NETDEV_BONDING_FAILOVER: cmds[0] = bonding_event_ips_del_cmd; /* Add default GIDs of the bond device */ cmds[1] = bonding_default_add_cmd; /* Add IP based GIDs of the bond device */ cmds[2] = add_cmd_upper_ips; break; default: return NOTIFY_DONE; } return netdevice_queue_work(cmds, ndev); } static void update_gid_event_work_handler(struct work_struct *_work) { struct update_gid_event_work *work = container_of(_work, struct update_gid_event_work, work); ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter, work->gid_attr.ndev, callback_for_addr_gid_device_scan, work); dev_put(work->gid_attr.ndev); kfree(work); } static int addr_event(struct notifier_block *this, unsigned long event, struct sockaddr *sa, struct net_device *ndev) { struct update_gid_event_work *work; enum gid_op_type gid_op; if (ndev->type != ARPHRD_ETHER) return NOTIFY_DONE; switch (event) { case NETDEV_UP: gid_op = GID_ADD; break; case NETDEV_DOWN: gid_op = GID_DEL; break; default: return NOTIFY_DONE; } work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) return NOTIFY_DONE; INIT_WORK(&work->work, update_gid_event_work_handler); rdma_ip2gid(sa, &work->gid); work->gid_op = gid_op; memset(&work->gid_attr, 0, sizeof(work->gid_attr)); dev_hold(ndev); work->gid_attr.ndev = ndev; queue_work(gid_cache_wq, &work->work); return NOTIFY_DONE; } static int inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct sockaddr_in in; struct net_device *ndev; struct in_ifaddr *ifa = ptr; in.sin_family = AF_INET; in.sin_addr.s_addr = ifa->ifa_address; ndev = ifa->ifa_dev->dev; return addr_event(this, event, (struct sockaddr *)&in, ndev); } static int inet6addr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct sockaddr_in6 in6; struct net_device *ndev; struct inet6_ifaddr *ifa6 = ptr; in6.sin6_family = AF_INET6; in6.sin6_addr = ifa6->addr; ndev = ifa6->idev->dev; return addr_event(this, event, (struct sockaddr *)&in6, ndev); } static struct notifier_block nb_netdevice = { .notifier_call = netdevice_event }; static struct notifier_block nb_inetaddr = { .notifier_call = inetaddr_event }; static struct notifier_block nb_inet6addr = { .notifier_call = inet6addr_event }; int __init roce_gid_mgmt_init(void) { gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0); if (!gid_cache_wq) return -ENOMEM; register_inetaddr_notifier(&nb_inetaddr); if (IS_ENABLED(CONFIG_IPV6)) register_inet6addr_notifier(&nb_inet6addr); /* We relay on the netdevice notifier to enumerate all * existing devices in the system. Register to this notifier * last to make sure we will not miss any IP add/del * callbacks. */ register_netdevice_notifier(&nb_netdevice); return 0; } void __exit roce_gid_mgmt_cleanup(void) { if (IS_ENABLED(CONFIG_IPV6)) unregister_inet6addr_notifier(&nb_inet6addr); unregister_inetaddr_notifier(&nb_inetaddr); unregister_netdevice_notifier(&nb_netdevice); /* Ensure all gid deletion tasks complete before we go down, * to avoid any reference to free'd memory. By the time * ib-core is removed, all physical devices have been removed, * so no issue with remaining hardware contexts. */ destroy_workqueue(gid_cache_wq); }
linux-master
drivers/infiniband/core/roce_gid_mgmt.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. */ #include <rdma/rdma_cm.h> #include <rdma/ib_verbs.h> #include <rdma/restrack.h> #include <rdma/rdma_counter.h> #include <linux/mutex.h> #include <linux/sched/task.h> #include <linux/pid_namespace.h> #include "cma_priv.h" #include "restrack.h" /** * rdma_restrack_init() - initialize and allocate resource tracking * @dev: IB device * * Return: 0 on success */ int rdma_restrack_init(struct ib_device *dev) { struct rdma_restrack_root *rt; int i; dev->res = kcalloc(RDMA_RESTRACK_MAX, sizeof(*rt), GFP_KERNEL); if (!dev->res) return -ENOMEM; rt = dev->res; for (i = 0; i < RDMA_RESTRACK_MAX; i++) xa_init_flags(&rt[i].xa, XA_FLAGS_ALLOC); return 0; } static const char *type2str(enum rdma_restrack_type type) { static const char * const names[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_PD] = "PD", [RDMA_RESTRACK_CQ] = "CQ", [RDMA_RESTRACK_QP] = "QP", [RDMA_RESTRACK_CM_ID] = "CM_ID", [RDMA_RESTRACK_MR] = "MR", [RDMA_RESTRACK_CTX] = "CTX", [RDMA_RESTRACK_COUNTER] = "COUNTER", [RDMA_RESTRACK_SRQ] = "SRQ", }; return names[type]; }; /** * rdma_restrack_clean() - clean resource tracking * @dev: IB device */ void rdma_restrack_clean(struct ib_device *dev) { struct rdma_restrack_root *rt = dev->res; struct rdma_restrack_entry *e; char buf[TASK_COMM_LEN]; bool found = false; const char *owner; int i; for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) { struct xarray *xa = &dev->res[i].xa; if (!xa_empty(xa)) { unsigned long index; if (!found) { pr_err("restrack: %s", CUT_HERE); dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n"); } xa_for_each(xa, index, e) { if (rdma_is_kernel_res(e)) { owner = e->kern_name; } else { /* * There is no need to call get_task_struct here, * because we can be here only if there are more * get_task_struct() call than put_task_struct(). */ get_task_comm(buf, e->task); owner = buf; } pr_err("restrack: %s %s object allocated by %s is not freed\n", rdma_is_kernel_res(e) ? "Kernel" : "User", type2str(e->type), owner); } found = true; } xa_destroy(xa); } if (found) pr_err("restrack: %s", CUT_HERE); kfree(rt); } /** * rdma_restrack_count() - the current usage of specific object * @dev: IB device * @type: actual type of object to operate */ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type) { struct rdma_restrack_root *rt = &dev->res[type]; struct rdma_restrack_entry *e; XA_STATE(xas, &rt->xa, 0); u32 cnt = 0; xa_lock(&rt->xa); xas_for_each(&xas, e, U32_MAX) cnt++; xa_unlock(&rt->xa); return cnt; } EXPORT_SYMBOL(rdma_restrack_count); static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) { switch (res->type) { case RDMA_RESTRACK_PD: return container_of(res, struct ib_pd, res)->device; case RDMA_RESTRACK_CQ: return container_of(res, struct ib_cq, res)->device; case RDMA_RESTRACK_QP: return container_of(res, struct ib_qp, res)->device; case RDMA_RESTRACK_CM_ID: return container_of(res, struct rdma_id_private, res)->id.device; case RDMA_RESTRACK_MR: return container_of(res, struct ib_mr, res)->device; case RDMA_RESTRACK_CTX: return container_of(res, struct ib_ucontext, res)->device; case RDMA_RESTRACK_COUNTER: return container_of(res, struct rdma_counter, res)->device; case RDMA_RESTRACK_SRQ: return container_of(res, struct ib_srq, res)->device; default: WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type); return NULL; } } /** * rdma_restrack_attach_task() - attach the task onto this resource, * valid for user space restrack entries. * @res: resource entry * @task: the task to attach */ static void rdma_restrack_attach_task(struct rdma_restrack_entry *res, struct task_struct *task) { if (WARN_ON_ONCE(!task)) return; if (res->task) put_task_struct(res->task); get_task_struct(task); res->task = task; res->user = true; } /** * rdma_restrack_set_name() - set the task for this resource * @res: resource entry * @caller: kernel name, the current task will be used if the caller is NULL. */ void rdma_restrack_set_name(struct rdma_restrack_entry *res, const char *caller) { if (caller) { res->kern_name = caller; return; } rdma_restrack_attach_task(res, current); } EXPORT_SYMBOL(rdma_restrack_set_name); /** * rdma_restrack_parent_name() - set the restrack name properties based * on parent restrack * @dst: destination resource entry * @parent: parent resource entry */ void rdma_restrack_parent_name(struct rdma_restrack_entry *dst, const struct rdma_restrack_entry *parent) { if (rdma_is_kernel_res(parent)) dst->kern_name = parent->kern_name; else rdma_restrack_attach_task(dst, parent->task); } EXPORT_SYMBOL(rdma_restrack_parent_name); /** * rdma_restrack_new() - Initializes new restrack entry to allow _put() interface * to release memory in fully automatic way. * @res: Entry to initialize * @type: REstrack type */ void rdma_restrack_new(struct rdma_restrack_entry *res, enum rdma_restrack_type type) { kref_init(&res->kref); init_completion(&res->comp); res->type = type; } EXPORT_SYMBOL(rdma_restrack_new); /** * rdma_restrack_add() - add object to the reource tracking database * @res: resource entry */ void rdma_restrack_add(struct rdma_restrack_entry *res) { struct ib_device *dev = res_to_dev(res); struct rdma_restrack_root *rt; int ret = 0; if (!dev) return; if (res->no_track) goto out; rt = &dev->res[res->type]; if (res->type == RDMA_RESTRACK_QP) { /* Special case to ensure that LQPN points to right QP */ struct ib_qp *qp = container_of(res, struct ib_qp, res); WARN_ONCE(qp->qp_num >> 24 || qp->port >> 8, "QP number 0x%0X and port 0x%0X", qp->qp_num, qp->port); res->id = qp->qp_num; if (qp->qp_type == IB_QPT_SMI || qp->qp_type == IB_QPT_GSI) res->id |= qp->port << 24; ret = xa_insert(&rt->xa, res->id, res, GFP_KERNEL); if (ret) res->id = 0; } else if (res->type == RDMA_RESTRACK_COUNTER) { /* Special case to ensure that cntn points to right counter */ struct rdma_counter *counter; counter = container_of(res, struct rdma_counter, res); ret = xa_insert(&rt->xa, counter->id, res, GFP_KERNEL); res->id = ret ? 0 : counter->id; } else { ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b, &rt->next_id, GFP_KERNEL); ret = (ret < 0) ? ret : 0; } out: if (!ret) res->valid = true; } EXPORT_SYMBOL(rdma_restrack_add); int __must_check rdma_restrack_get(struct rdma_restrack_entry *res) { return kref_get_unless_zero(&res->kref); } EXPORT_SYMBOL(rdma_restrack_get); /** * rdma_restrack_get_byid() - translate from ID to restrack object * @dev: IB device * @type: resource track type * @id: ID to take a look * * Return: Pointer to restrack entry or -ENOENT in case of error. */ struct rdma_restrack_entry * rdma_restrack_get_byid(struct ib_device *dev, enum rdma_restrack_type type, u32 id) { struct rdma_restrack_root *rt = &dev->res[type]; struct rdma_restrack_entry *res; xa_lock(&rt->xa); res = xa_load(&rt->xa, id); if (!res || !rdma_restrack_get(res)) res = ERR_PTR(-ENOENT); xa_unlock(&rt->xa); return res; } EXPORT_SYMBOL(rdma_restrack_get_byid); static void restrack_release(struct kref *kref) { struct rdma_restrack_entry *res; res = container_of(kref, struct rdma_restrack_entry, kref); if (res->task) { put_task_struct(res->task); res->task = NULL; } complete(&res->comp); } int rdma_restrack_put(struct rdma_restrack_entry *res) { return kref_put(&res->kref, restrack_release); } EXPORT_SYMBOL(rdma_restrack_put); /** * rdma_restrack_del() - delete object from the reource tracking database * @res: resource entry */ void rdma_restrack_del(struct rdma_restrack_entry *res) { struct rdma_restrack_entry *old; struct rdma_restrack_root *rt; struct ib_device *dev; if (!res->valid) { if (res->task) { put_task_struct(res->task); res->task = NULL; } return; } if (res->no_track) goto out; dev = res_to_dev(res); if (WARN_ON(!dev)) return; rt = &dev->res[res->type]; old = xa_erase(&rt->xa, res->id); WARN_ON(old != res); out: res->valid = false; rdma_restrack_put(res); wait_for_completion(&res->comp); } EXPORT_SYMBOL(rdma_restrack_del);
linux-master
drivers/infiniband/core/restrack.c
/* * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/file.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/idr.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/module.h> #include <linux/nsproxy.h> #include <linux/nospec.h> #include <rdma/rdma_user_cm.h> #include <rdma/ib_marshall.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_cm_ib.h> #include <rdma/ib_addr.h> #include <rdma/ib.h> #include <rdma/ib_cm.h> #include <rdma/rdma_netlink.h> #include "core_priv.h" MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); MODULE_LICENSE("Dual BSD/GPL"); static unsigned int max_backlog = 1024; static struct ctl_table_header *ucma_ctl_table_hdr; static struct ctl_table ucma_ctl_table[] = { { .procname = "max_backlog", .data = &max_backlog, .maxlen = sizeof max_backlog, .mode = 0644, .proc_handler = proc_dointvec, }, { } }; struct ucma_file { struct mutex mut; struct file *filp; struct list_head ctx_list; struct list_head event_list; wait_queue_head_t poll_wait; }; struct ucma_context { u32 id; struct completion comp; refcount_t ref; int events_reported; atomic_t backlog; struct ucma_file *file; struct rdma_cm_id *cm_id; struct mutex mutex; u64 uid; struct list_head list; struct list_head mc_list; struct work_struct close_work; }; struct ucma_multicast { struct ucma_context *ctx; u32 id; int events_reported; u64 uid; u8 join_state; struct list_head list; struct sockaddr_storage addr; }; struct ucma_event { struct ucma_context *ctx; struct ucma_context *conn_req_ctx; struct ucma_multicast *mc; struct list_head list; struct rdma_ucm_event_resp resp; }; static DEFINE_XARRAY_ALLOC(ctx_table); static DEFINE_XARRAY_ALLOC(multicast_table); static const struct file_operations ucma_fops; static int ucma_destroy_private_ctx(struct ucma_context *ctx); static inline struct ucma_context *_ucma_find_context(int id, struct ucma_file *file) { struct ucma_context *ctx; ctx = xa_load(&ctx_table, id); if (!ctx) ctx = ERR_PTR(-ENOENT); else if (ctx->file != file) ctx = ERR_PTR(-EINVAL); return ctx; } static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) { struct ucma_context *ctx; xa_lock(&ctx_table); ctx = _ucma_find_context(id, file); if (!IS_ERR(ctx)) if (!refcount_inc_not_zero(&ctx->ref)) ctx = ERR_PTR(-ENXIO); xa_unlock(&ctx_table); return ctx; } static void ucma_put_ctx(struct ucma_context *ctx) { if (refcount_dec_and_test(&ctx->ref)) complete(&ctx->comp); } /* * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the * CM_ID is bound. */ static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id) { struct ucma_context *ctx = ucma_get_ctx(file, id); if (IS_ERR(ctx)) return ctx; if (!ctx->cm_id->device) { ucma_put_ctx(ctx); return ERR_PTR(-EINVAL); } return ctx; } static void ucma_close_id(struct work_struct *work) { struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); /* once all inflight tasks are finished, we close all underlying * resources. The context is still alive till its explicit destryoing * by its creator. This puts back the xarray's reference. */ ucma_put_ctx(ctx); wait_for_completion(&ctx->comp); /* No new events will be generated after destroying the id. */ rdma_destroy_id(ctx->cm_id); /* Reading the cm_id without holding a positive ref is not allowed */ ctx->cm_id = NULL; } static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) { struct ucma_context *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; INIT_WORK(&ctx->close_work, ucma_close_id); init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); /* So list_del() will work if we don't do ucma_finish_ctx() */ INIT_LIST_HEAD(&ctx->list); ctx->file = file; mutex_init(&ctx->mutex); if (xa_alloc(&ctx_table, &ctx->id, NULL, xa_limit_32b, GFP_KERNEL)) { kfree(ctx); return NULL; } return ctx; } static void ucma_set_ctx_cm_id(struct ucma_context *ctx, struct rdma_cm_id *cm_id) { refcount_set(&ctx->ref, 1); ctx->cm_id = cm_id; } static void ucma_finish_ctx(struct ucma_context *ctx) { lockdep_assert_held(&ctx->file->mut); list_add_tail(&ctx->list, &ctx->file->ctx_list); xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL); } static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, struct rdma_conn_param *src) { if (src->private_data_len) memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; dst->responder_resources = src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num; } static void ucma_copy_ud_event(struct ib_device *device, struct rdma_ucm_ud_param *dst, struct rdma_ud_param *src) { if (src->private_data_len) memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr); dst->qp_num = src->qp_num; dst->qkey = src->qkey; } static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx, struct rdma_cm_event *event) { struct ucma_event *uevent; uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); if (!uevent) return NULL; uevent->ctx = ctx; switch (event->event) { case RDMA_CM_EVENT_MULTICAST_JOIN: case RDMA_CM_EVENT_MULTICAST_ERROR: uevent->mc = (struct ucma_multicast *) event->param.ud.private_data; uevent->resp.uid = uevent->mc->uid; uevent->resp.id = uevent->mc->id; break; default: uevent->resp.uid = ctx->uid; uevent->resp.id = ctx->id; break; } uevent->resp.event = event->event; uevent->resp.status = event->status; if (ctx->cm_id->qp_type == IB_QPT_UD) ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud, &event->param.ud); else ucma_copy_conn_event(&uevent->resp.param.conn, &event->param.conn); uevent->resp.ece.vendor_id = event->ece.vendor_id; uevent->resp.ece.attr_mod = event->ece.attr_mod; return uevent; } static int ucma_connect_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct ucma_context *listen_ctx = cm_id->context; struct ucma_context *ctx; struct ucma_event *uevent; if (!atomic_add_unless(&listen_ctx->backlog, -1, 0)) return -ENOMEM; ctx = ucma_alloc_ctx(listen_ctx->file); if (!ctx) goto err_backlog; ucma_set_ctx_cm_id(ctx, cm_id); uevent = ucma_create_uevent(listen_ctx, event); if (!uevent) goto err_alloc; uevent->conn_req_ctx = ctx; uevent->resp.id = ctx->id; ctx->cm_id->context = ctx; mutex_lock(&ctx->file->mut); ucma_finish_ctx(ctx); list_add_tail(&uevent->list, &ctx->file->event_list); mutex_unlock(&ctx->file->mut); wake_up_interruptible(&ctx->file->poll_wait); return 0; err_alloc: ucma_destroy_private_ctx(ctx); err_backlog: atomic_inc(&listen_ctx->backlog); /* Returning error causes the new ID to be destroyed */ return -ENOMEM; } static int ucma_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct ucma_event *uevent; struct ucma_context *ctx = cm_id->context; if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) return ucma_connect_event_handler(cm_id, event); /* * We ignore events for new connections until userspace has set their * context. This can only happen if an error occurs on a new connection * before the user accepts it. This is okay, since the accept will just * fail later. However, we do need to release the underlying HW * resources in case of a device removal event. */ if (ctx->uid) { uevent = ucma_create_uevent(ctx, event); if (!uevent) return 0; mutex_lock(&ctx->file->mut); list_add_tail(&uevent->list, &ctx->file->event_list); mutex_unlock(&ctx->file->mut); wake_up_interruptible(&ctx->file->poll_wait); } if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) { xa_lock(&ctx_table); if (xa_load(&ctx_table, ctx->id) == ctx) queue_work(system_unbound_wq, &ctx->close_work); xa_unlock(&ctx_table); } return 0; } static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_get_event cmd; struct ucma_event *uevent; /* * Old 32 bit user space does not send the 4 byte padding in the * reserved field. We don't care, allow it to keep working. */ if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) - sizeof(uevent->resp.ece)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&file->mut); while (list_empty(&file->event_list)) { mutex_unlock(&file->mut); if (file->filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(file->poll_wait, !list_empty(&file->event_list))) return -ERESTARTSYS; mutex_lock(&file->mut); } uevent = list_first_entry(&file->event_list, struct ucma_event, list); if (copy_to_user(u64_to_user_ptr(cmd.response), &uevent->resp, min_t(size_t, out_len, sizeof(uevent->resp)))) { mutex_unlock(&file->mut); return -EFAULT; } list_del(&uevent->list); uevent->ctx->events_reported++; if (uevent->mc) uevent->mc->events_reported++; if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) atomic_inc(&uevent->ctx->backlog); mutex_unlock(&file->mut); kfree(uevent); return 0; } static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) { switch (cmd->ps) { case RDMA_PS_TCP: *qp_type = IB_QPT_RC; return 0; case RDMA_PS_UDP: case RDMA_PS_IPOIB: *qp_type = IB_QPT_UD; return 0; case RDMA_PS_IB: *qp_type = cmd->qp_type; return 0; default: return -EINVAL; } } static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; struct rdma_cm_id *cm_id; enum ib_qp_type qp_type; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ret = ucma_get_qp_type(&cmd, &qp_type); if (ret) return ret; ctx = ucma_alloc_ctx(file); if (!ctx) return -ENOMEM; ctx->uid = cmd.uid; cm_id = rdma_create_user_id(ucma_event_handler, ctx, cmd.ps, qp_type); if (IS_ERR(cm_id)) { ret = PTR_ERR(cm_id); goto err1; } ucma_set_ctx_cm_id(ctx, cm_id); resp.id = ctx->id; if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) { ret = -EFAULT; goto err1; } mutex_lock(&file->mut); ucma_finish_ctx(ctx); mutex_unlock(&file->mut); return 0; err1: ucma_destroy_private_ctx(ctx); return ret; } static void ucma_cleanup_multicast(struct ucma_context *ctx) { struct ucma_multicast *mc, *tmp; xa_lock(&multicast_table); list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { list_del(&mc->list); /* * At this point mc->ctx->ref is 0 so the mc cannot leave the * lock on the reader and this is enough serialization */ __xa_erase(&multicast_table, mc->id); kfree(mc); } xa_unlock(&multicast_table); } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) { struct ucma_event *uevent, *tmp; rdma_lock_handler(mc->ctx->cm_id); mutex_lock(&mc->ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { if (uevent->mc != mc) continue; list_del(&uevent->list); kfree(uevent); } mutex_unlock(&mc->ctx->file->mut); rdma_unlock_handler(mc->ctx->cm_id); } static int ucma_cleanup_ctx_events(struct ucma_context *ctx) { int events_reported; struct ucma_event *uevent, *tmp; LIST_HEAD(list); /* Cleanup events not yet reported to the user.*/ mutex_lock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { if (uevent->ctx != ctx) continue; if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST && xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id, uevent->conn_req_ctx, XA_ZERO_ENTRY, GFP_KERNEL) == uevent->conn_req_ctx) { list_move_tail(&uevent->list, &list); continue; } list_del(&uevent->list); kfree(uevent); } list_del(&ctx->list); events_reported = ctx->events_reported; mutex_unlock(&ctx->file->mut); /* * If this was a listening ID then any connections spawned from it that * have not been delivered to userspace are cleaned up too. Must be done * outside any locks. */ list_for_each_entry_safe(uevent, tmp, &list, list) { ucma_destroy_private_ctx(uevent->conn_req_ctx); kfree(uevent); } return events_reported; } /* * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie * the ctx is not public to the user). This either because: * - ucma_finish_ctx() hasn't been called * - xa_cmpxchg() succeed to remove the entry (only one thread can succeed) */ static int ucma_destroy_private_ctx(struct ucma_context *ctx) { int events_reported; /* * Destroy the underlying cm_id. New work queuing is prevented now by * the removal from the xarray. Once the work is cancled ref will either * be 0 because the work ran to completion and consumed the ref from the * xarray, or it will be positive because we still have the ref from the * xarray. This can also be 0 in cases where cm_id was never set */ cancel_work_sync(&ctx->close_work); if (refcount_read(&ctx->ref)) ucma_close_id(&ctx->close_work); events_reported = ucma_cleanup_ctx_events(ctx); ucma_cleanup_multicast(ctx); WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL, GFP_KERNEL) != NULL); mutex_destroy(&ctx->mutex); kfree(ctx); return events_reported; } static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_destroy_id cmd; struct rdma_ucm_destroy_id_resp resp; struct ucma_context *ctx; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; xa_lock(&ctx_table); ctx = _ucma_find_context(cmd.id, file); if (!IS_ERR(ctx)) { if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY, GFP_KERNEL) != ctx) ctx = ERR_PTR(-ENOENT); } xa_unlock(&ctx_table); if (IS_ERR(ctx)) return PTR_ERR(ctx); resp.events_reported = ucma_destroy_private_ctx(ctx); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind_ip cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!rdma_addr_size_in6(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.reserved || !cmd.addr_size || cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_ip cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) || !rdma_addr_size_in6(&cmd.dst_addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_addr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_addr cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_route cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { struct rdma_dev_addr *dev_addr; resp->num_paths = route->num_pri_alt_paths; switch (route->num_pri_alt_paths) { case 0: dev_addr = &route->addr.dev_addr; rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); break; case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); fallthrough; case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); break; default: break; } } static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { resp->num_paths = route->num_pri_alt_paths; switch (route->num_pri_alt_paths) { case 0: rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, (union ib_gid *)&resp->ib_route[0].dgid); rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, (union ib_gid *)&resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(0xffff); break; case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); fallthrough; case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); break; default: break; } } static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { struct rdma_dev_addr *dev_addr; dev_addr = &route->addr.dev_addr; rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); } static ssize_t ucma_query_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_query cmd; struct rdma_ucm_query_route_resp resp; struct ucma_context *ctx; struct sockaddr *addr; int ret = 0; if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); if (!ctx->cm_id->device) goto out; resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; resp.ibdev_index = ctx->cm_id->device->index; resp.port_num = ctx->cm_id->port_num; if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_ib_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iboe_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iw_route(&resp, &ctx->cm_id->route); out: mutex_unlock(&ctx->mutex); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, min_t(size_t, out_len, sizeof(resp)))) ret = -EFAULT; ucma_put_ctx(ctx); return ret; } static void ucma_query_device_addr(struct rdma_cm_id *cm_id, struct rdma_ucm_query_addr_resp *resp) { if (!cm_id->device) return; resp->node_guid = (__force __u64) cm_id->device->node_guid; resp->ibdev_index = cm_id->device->index; resp->port_num = cm_id->port_num; resp->pkey = (__force __u16) cpu_to_be16( ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); } static ssize_t ucma_query_addr(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_addr_resp resp; struct sockaddr *addr; int ret = 0; if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index)) return -ENOSPC; memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; resp.src_size = rdma_addr_size(addr); memcpy(&resp.src_addr, addr, resp.src_size); addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; resp.dst_size = rdma_addr_size(addr); memcpy(&resp.dst_addr, addr, resp.dst_size); ucma_query_device_addr(ctx->cm_id, &resp); if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp)))) ret = -EFAULT; return ret; } static ssize_t ucma_query_path(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_path_resp *resp; int i, ret = 0; if (out_len < sizeof(*resp)) return -ENOSPC; resp = kzalloc(out_len, GFP_KERNEL); if (!resp) return -ENOMEM; resp->num_paths = ctx->cm_id->route.num_pri_alt_paths; for (i = 0, out_len -= sizeof(*resp); i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); i++, out_len -= sizeof(struct ib_path_rec_data)) { struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i]; resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL; if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { struct sa_path_rec ib; sa_convert_path_opa_to_ib(&ib, rec); ib_sa_pack_path(&ib, &resp->path_data[i].path_rec); } else { ib_sa_pack_path(rec, &resp->path_data[i].path_rec); } } if (copy_to_user(response, resp, struct_size(resp, path_data, i))) ret = -EFAULT; kfree(resp); return ret; } static ssize_t ucma_query_gid(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_addr_resp resp; struct sockaddr_ib *addr; int ret = 0; if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index)) return -ENOSPC; memset(&resp, 0, sizeof resp); ucma_query_device_addr(ctx->cm_id, &resp); addr = (struct sockaddr_ib *) &resp.src_addr; resp.src_size = sizeof(*addr); if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); } else { addr->sib_family = AF_IB; addr->sib_pkey = (__force __be16) resp.pkey; rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr, NULL); addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) &ctx->cm_id->route.addr.src_addr); } addr = (struct sockaddr_ib *) &resp.dst_addr; resp.dst_size = sizeof(*addr); if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); } else { addr->sib_family = AF_IB; addr->sib_pkey = (__force __be16) resp.pkey; rdma_read_gids(ctx->cm_id, NULL, (union ib_gid *)&addr->sib_addr); addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr); } if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp)))) ret = -EFAULT; return ret; } static ssize_t ucma_query(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_query cmd; struct ucma_context *ctx; void __user *response; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; response = u64_to_user_ptr(cmd.response); ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); switch (cmd.option) { case RDMA_USER_CM_QUERY_ADDR: ret = ucma_query_addr(ctx, response, out_len); break; case RDMA_USER_CM_QUERY_PATH: ret = ucma_query_path(ctx, response, out_len); break; case RDMA_USER_CM_QUERY_GID: ret = ucma_query_gid(ctx, response, out_len); break; default: ret = -ENOSYS; break; } mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static void ucma_copy_conn_param(struct rdma_cm_id *id, struct rdma_conn_param *dst, struct rdma_ucm_conn_param *src) { dst->private_data = src->private_data; dst->private_data_len = src->private_data_len; dst->responder_resources = src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num & 0xFFFFFF; dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; } static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_conn_param conn_param; struct rdma_ucm_ece ece = {}; struct rdma_ucm_connect cmd; struct ucma_context *ctx; size_t in_size; int ret; if (in_len < offsetofend(typeof(cmd), reserved)) return -EINVAL; in_size = min_t(size_t, in_len, sizeof(cmd)); if (copy_from_user(&cmd, inbuf, in_size)) return -EFAULT; if (!cmd.conn_param.valid) return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); if (offsetofend(typeof(cmd), ece) <= in_size) { ece.vendor_id = cmd.ece.vendor_id; ece.attr_mod = cmd.ece.attr_mod; } mutex_lock(&ctx->mutex); ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_listen cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); if (cmd.backlog <= 0 || cmd.backlog > max_backlog) cmd.backlog = max_backlog; atomic_set(&ctx->backlog, cmd.backlog); mutex_lock(&ctx->mutex); ret = rdma_listen(ctx->cm_id, cmd.backlog); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_accept cmd; struct rdma_conn_param conn_param; struct rdma_ucm_ece ece = {}; struct ucma_context *ctx; size_t in_size; int ret; if (in_len < offsetofend(typeof(cmd), reserved)) return -EINVAL; in_size = min_t(size_t, in_len, sizeof(cmd)); if (copy_from_user(&cmd, inbuf, in_size)) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); if (offsetofend(typeof(cmd), ece) <= in_size) { ece.vendor_id = cmd.ece.vendor_id; ece.attr_mod = cmd.ece.attr_mod; } if (cmd.conn_param.valid) { ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&ctx->mutex); rdma_lock_handler(ctx->cm_id); ret = rdma_accept_ece(ctx->cm_id, &conn_param, &ece); if (!ret) { /* The uid must be set atomically with the handler */ ctx->uid = cmd.uid; } rdma_unlock_handler(ctx->cm_id); mutex_unlock(&ctx->mutex); } else { mutex_lock(&ctx->mutex); rdma_lock_handler(ctx->cm_id); ret = rdma_accept_ece(ctx->cm_id, NULL, &ece); rdma_unlock_handler(ctx->cm_id); mutex_unlock(&ctx->mutex); } ucma_put_ctx(ctx); return ret; } static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_reject cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!cmd.reason) cmd.reason = IB_CM_REJ_CONSUMER_DEFINED; switch (cmd.reason) { case IB_CM_REJ_CONSUMER_DEFINED: case IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED: break; default: return -EINVAL; } ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len, cmd.reason); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_disconnect cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_disconnect(ctx->cm_id); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_init_qp_attr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_init_qp_attr cmd; struct ib_uverbs_qp_attr resp; struct ucma_context *ctx; struct ib_qp_attr qp_attr; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.qp_state > IB_QPS_ERR) return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; mutex_lock(&ctx->mutex); ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); mutex_unlock(&ctx->mutex); if (ret) goto out; ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; out: ucma_put_ctx(ctx); return ret; } static int ucma_set_option_id(struct ucma_context *ctx, int optname, void *optval, size_t optlen) { int ret = 0; switch (optname) { case RDMA_OPTION_ID_TOS: if (optlen != sizeof(u8)) { ret = -EINVAL; break; } rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); break; case RDMA_OPTION_ID_REUSEADDR: if (optlen != sizeof(int)) { ret = -EINVAL; break; } ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); break; case RDMA_OPTION_ID_AFONLY: if (optlen != sizeof(int)) { ret = -EINVAL; break; } ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); break; case RDMA_OPTION_ID_ACK_TIMEOUT: if (optlen != sizeof(u8)) { ret = -EINVAL; break; } ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval)); break; default: ret = -ENOSYS; } return ret; } static int ucma_set_ib_path(struct ucma_context *ctx, struct ib_path_rec_data *path_data, size_t optlen) { struct sa_path_rec sa_path; struct rdma_cm_event event; int ret; if (optlen % sizeof(*path_data)) return -EINVAL; for (; optlen; optlen -= sizeof(*path_data), path_data++) { if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL)) break; } if (!optlen) return -EINVAL; if (!ctx->cm_id->device) return -EINVAL; memset(&sa_path, 0, sizeof(sa_path)); sa_path.rec_type = SA_PATH_REC_TYPE_IB; ib_sa_unpack_path(path_data->path_rec, &sa_path); if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) { struct sa_path_rec opa; sa_convert_path_ib_to_opa(&opa, &sa_path); mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &opa); mutex_unlock(&ctx->mutex); } else { mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &sa_path); mutex_unlock(&ctx->mutex); } if (ret) return ret; memset(&event, 0, sizeof event); event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; return ucma_event_handler(ctx->cm_id, &event); } static int ucma_set_option_ib(struct ucma_context *ctx, int optname, void *optval, size_t optlen) { int ret; switch (optname) { case RDMA_OPTION_IB_PATH: ret = ucma_set_ib_path(ctx, optval, optlen); break; default: ret = -ENOSYS; } return ret; } static int ucma_set_option_level(struct ucma_context *ctx, int level, int optname, void *optval, size_t optlen) { int ret; switch (level) { case RDMA_OPTION_ID: mutex_lock(&ctx->mutex); ret = ucma_set_option_id(ctx, optname, optval, optlen); mutex_unlock(&ctx->mutex); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); break; default: ret = -ENOSYS; } return ret; } static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_set_option cmd; struct ucma_context *ctx; void *optval; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); optval = memdup_user(u64_to_user_ptr(cmd.optval), cmd.optlen); if (IS_ERR(optval)) { ret = PTR_ERR(optval); goto out; } ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, cmd.optlen); kfree(optval); out: ucma_put_ctx(ctx); return ret; } static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_notify cmd; struct ucma_context *ctx; int ret = -EINVAL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); if (ctx->cm_id->device) ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_process_join(struct ucma_file *file, struct rdma_ucm_join_mcast *cmd, int out_len) { struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; struct ucma_multicast *mc; struct sockaddr *addr; int ret; u8 join_state; if (out_len < sizeof(resp)) return -ENOSPC; addr = (struct sockaddr *) &cmd->addr; if (cmd->addr_size != rdma_addr_size(addr)) return -EINVAL; if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) join_state = BIT(FULLMEMBER_JOIN); else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER) join_state = BIT(SENDONLY_FULLMEMBER_JOIN); else return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd->id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mc = kzalloc(sizeof(*mc), GFP_KERNEL); if (!mc) { ret = -ENOMEM; goto err_put_ctx; } mc->ctx = ctx; mc->join_state = join_state; mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); xa_lock(&multicast_table); if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL)) { ret = -ENOMEM; goto err_free_mc; } list_add_tail(&mc->list, &ctx->mc_list); xa_unlock(&multicast_table); mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); mutex_unlock(&ctx->mutex); if (ret) goto err_xa_erase; resp.id = mc->id; if (copy_to_user(u64_to_user_ptr(cmd->response), &resp, sizeof(resp))) { ret = -EFAULT; goto err_leave_multicast; } xa_store(&multicast_table, mc->id, mc, 0); ucma_put_ctx(ctx); return 0; err_leave_multicast: mutex_lock(&ctx->mutex); rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_unlock(&ctx->mutex); ucma_cleanup_mc_events(mc); err_xa_erase: xa_lock(&multicast_table); list_del(&mc->list); __xa_erase(&multicast_table, mc->id); err_free_mc: xa_unlock(&multicast_table); kfree(mc); err_put_ctx: ucma_put_ctx(ctx); return ret; } static ssize_t ucma_join_ip_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_join_ip_mcast cmd; struct rdma_ucm_join_mcast join_cmd; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; join_cmd.response = cmd.response; join_cmd.uid = cmd.uid; join_cmd.id = cmd.id; join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); if (!join_cmd.addr_size) return -EINVAL; join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); return ucma_process_join(file, &join_cmd, out_len); } static ssize_t ucma_join_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_join_mcast cmd; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!rdma_addr_size_kss(&cmd.addr)) return -EINVAL; return ucma_process_join(file, &cmd, out_len); } static ssize_t ucma_leave_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_destroy_id cmd; struct rdma_ucm_destroy_id_resp resp; struct ucma_multicast *mc; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; xa_lock(&multicast_table); mc = xa_load(&multicast_table, cmd.id); if (!mc) mc = ERR_PTR(-ENOENT); else if (READ_ONCE(mc->ctx->file) != file) mc = ERR_PTR(-EINVAL); else if (!refcount_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); if (IS_ERR(mc)) { xa_unlock(&multicast_table); ret = PTR_ERR(mc); goto out; } list_del(&mc->list); __xa_erase(&multicast_table, mc->id); xa_unlock(&multicast_table); mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_unlock(&mc->ctx->mutex); ucma_cleanup_mc_events(mc); ucma_put_ctx(mc->ctx); resp.events_reported = mc->events_reported; kfree(mc); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; out: return ret; } static ssize_t ucma_migrate_id(struct ucma_file *new_file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_migrate_id cmd; struct rdma_ucm_migrate_resp resp; struct ucma_event *uevent, *tmp; struct ucma_context *ctx; LIST_HEAD(event_list); struct fd f; struct ucma_file *cur_file; int ret = 0; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; /* Get current fd to protect against it being closed */ f = fdget(cmd.fd); if (!f.file) return -ENOENT; if (f.file->f_op != &ucma_fops) { ret = -EINVAL; goto file_put; } cur_file = f.file->private_data; /* Validate current fd and prevent destruction of id. */ ctx = ucma_get_ctx(cur_file, cmd.id); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto file_put; } rdma_lock_handler(ctx->cm_id); /* * ctx->file can only be changed under the handler & xa_lock. xa_load() * must be checked again to ensure the ctx hasn't begun destruction * since the ucma_get_ctx(). */ xa_lock(&ctx_table); if (_ucma_find_context(cmd.id, cur_file) != ctx) { xa_unlock(&ctx_table); ret = -ENOENT; goto err_unlock; } ctx->file = new_file; xa_unlock(&ctx_table); mutex_lock(&cur_file->mut); list_del(&ctx->list); /* * At this point lock_handler() prevents addition of new uevents for * this ctx. */ list_for_each_entry_safe(uevent, tmp, &cur_file->event_list, list) if (uevent->ctx == ctx) list_move_tail(&uevent->list, &event_list); resp.events_reported = ctx->events_reported; mutex_unlock(&cur_file->mut); mutex_lock(&new_file->mut); list_add_tail(&ctx->list, &new_file->ctx_list); list_splice_tail(&event_list, &new_file->event_list); mutex_unlock(&new_file->mut); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; err_unlock: rdma_unlock_handler(ctx->cm_id); ucma_put_ctx(ctx); file_put: fdput(f); return ret; } static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) = { [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip, [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip, [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route, [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, [RDMA_USER_CM_CMD_REJECT] = ucma_reject, [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, [RDMA_USER_CM_CMD_GET_OPTION] = NULL, [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast, [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id, [RDMA_USER_CM_CMD_QUERY] = ucma_query, [RDMA_USER_CM_CMD_BIND] = ucma_bind, [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast }; static ssize_t ucma_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) { struct ucma_file *file = filp->private_data; struct rdma_ucm_cmd_hdr hdr; ssize_t ret; if (!ib_safe_file_access(filp)) { pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", __func__, task_tgid_vnr(current), current->comm); return -EACCES; } if (len < sizeof(hdr)) return -EINVAL; if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) return -EINVAL; hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table)); if (hdr.in + sizeof(hdr) > len) return -EINVAL; if (!ucma_cmd_table[hdr.cmd]) return -ENOSYS; ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); if (!ret) ret = len; return ret; } static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait) { struct ucma_file *file = filp->private_data; __poll_t mask = 0; poll_wait(filp, &file->poll_wait, wait); if (!list_empty(&file->event_list)) mask = EPOLLIN | EPOLLRDNORM; return mask; } /* * ucma_open() does not need the BKL: * * - no global state is referred to; * - there is no ioctl method to race against; * - no further module initialization is required for open to work * after the device is registered. */ static int ucma_open(struct inode *inode, struct file *filp) { struct ucma_file *file; file = kmalloc(sizeof *file, GFP_KERNEL); if (!file) return -ENOMEM; INIT_LIST_HEAD(&file->event_list); INIT_LIST_HEAD(&file->ctx_list); init_waitqueue_head(&file->poll_wait); mutex_init(&file->mut); filp->private_data = file; file->filp = filp; return stream_open(inode, filp); } static int ucma_close(struct inode *inode, struct file *filp) { struct ucma_file *file = filp->private_data; /* * All paths that touch ctx_list or ctx_list starting from write() are * prevented by this being a FD release function. The list_add_tail() in * ucma_connect_event_handler() can run concurrently, however it only * adds to the list *after* a listening ID. By only reading the first of * the list, and relying on ucma_destroy_private_ctx() to block * ucma_connect_event_handler(), no additional locking is needed. */ while (!list_empty(&file->ctx_list)) { struct ucma_context *ctx = list_first_entry( &file->ctx_list, struct ucma_context, list); WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY, GFP_KERNEL) != ctx); ucma_destroy_private_ctx(ctx); } kfree(file); return 0; } static const struct file_operations ucma_fops = { .owner = THIS_MODULE, .open = ucma_open, .release = ucma_close, .write = ucma_write, .poll = ucma_poll, .llseek = no_llseek, }; static struct miscdevice ucma_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "rdma_cm", .nodename = "infiniband/rdma_cm", .mode = 0666, .fops = &ucma_fops, }; static int ucma_get_global_nl_info(struct ib_client_nl_info *res) { res->abi = RDMA_USER_CM_ABI_VERSION; res->cdev = ucma_misc.this_device; return 0; } static struct ib_client rdma_cma_client = { .name = "rdma_cm", .get_global_nl_info = ucma_get_global_nl_info, }; MODULE_ALIAS_RDMA_CLIENT("rdma_cm"); static ssize_t abi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); } static DEVICE_ATTR_RO(abi_version); static int __init ucma_init(void) { int ret; ret = misc_register(&ucma_misc); if (ret) return ret; ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); if (ret) { pr_err("rdma_ucm: couldn't create abi_version attr\n"); goto err1; } ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); if (!ucma_ctl_table_hdr) { pr_err("rdma_ucm: couldn't register sysctl paths\n"); ret = -ENOMEM; goto err2; } ret = ib_register_client(&rdma_cma_client); if (ret) goto err3; return 0; err3: unregister_net_sysctl_table(ucma_ctl_table_hdr); err2: device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); err1: misc_deregister(&ucma_misc); return ret; } static void __exit ucma_cleanup(void) { ib_unregister_client(&rdma_cma_client); unregister_net_sysctl_table(ucma_ctl_table_hdr); device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); misc_deregister(&ucma_misc); } module_init(ucma_init); module_exit(ucma_cleanup);
linux-master
drivers/infiniband/core/ucma.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ #include <rdma/uverbs_std_types.h> #include "rdma_core.h" #include "uverbs.h" static int uverbs_free_srq(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_srq *srq = uobject->object; struct ib_uevent_object *uevent = container_of(uobject, struct ib_uevent_object, uobject); enum ib_srq_type srq_type = srq->srq_type; int ret; ret = ib_destroy_srq_user(srq, &attrs->driver_udata); if (ret) return ret; if (srq_type == IB_SRQT_XRC) { struct ib_usrq_object *us = container_of(uobject, struct ib_usrq_object, uevent.uobject); atomic_dec(&us->uxrcd->refcnt); } ib_uverbs_release_uevent(uevent); return 0; } static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_CREATE)( struct uverbs_attr_bundle *attrs) { struct ib_usrq_object *obj = container_of( uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_SRQ_HANDLE), typeof(*obj), uevent.uobject); struct ib_pd *pd = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_SRQ_PD_HANDLE); struct ib_srq_init_attr attr = {}; struct ib_uobject *xrcd_uobj; struct ib_srq *srq; u64 user_handle; int ret; ret = uverbs_copy_from(&attr.attr.max_sge, attrs, UVERBS_ATTR_CREATE_SRQ_MAX_SGE); if (!ret) ret = uverbs_copy_from(&attr.attr.max_wr, attrs, UVERBS_ATTR_CREATE_SRQ_MAX_WR); if (!ret) ret = uverbs_copy_from(&attr.attr.srq_limit, attrs, UVERBS_ATTR_CREATE_SRQ_LIMIT); if (!ret) ret = uverbs_copy_from(&user_handle, attrs, UVERBS_ATTR_CREATE_SRQ_USER_HANDLE); if (!ret) ret = uverbs_get_const(&attr.srq_type, attrs, UVERBS_ATTR_CREATE_SRQ_TYPE); if (ret) return ret; if (ib_srq_has_cq(attr.srq_type)) { attr.ext.cq = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE); if (IS_ERR(attr.ext.cq)) return PTR_ERR(attr.ext.cq); } switch (attr.srq_type) { case IB_UVERBS_SRQT_XRC: xrcd_uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE); if (IS_ERR(xrcd_uobj)) return PTR_ERR(xrcd_uobj); attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; if (!attr.ext.xrc.xrcd) return -EINVAL; obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); atomic_inc(&obj->uxrcd->refcnt); break; case IB_UVERBS_SRQT_TM: ret = uverbs_copy_from(&attr.ext.tag_matching.max_num_tags, attrs, UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS); if (ret) return ret; break; case IB_UVERBS_SRQT_BASIC: break; default: return -EINVAL; } obj->uevent.event_file = ib_uverbs_get_async_event(attrs, UVERBS_ATTR_CREATE_SRQ_EVENT_FD); INIT_LIST_HEAD(&obj->uevent.event_list); attr.event_handler = ib_uverbs_srq_event_handler; obj->uevent.uobject.user_handle = user_handle; srq = ib_create_srq_user(pd, &attr, obj, &attrs->driver_udata); if (IS_ERR(srq)) { ret = PTR_ERR(srq); goto err; } obj->uevent.uobject.object = srq; uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_SRQ_HANDLE); ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR, &attr.attr.max_wr, sizeof(attr.attr.max_wr)); if (ret) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE, &attr.attr.max_sge, sizeof(attr.attr.max_sge)); if (ret) return ret; if (attr.srq_type == IB_SRQT_XRC) { ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM, &srq->ext.xrc.srq_num, sizeof(srq->ext.xrc.srq_num)); if (ret) return ret; } return 0; err: if (obj->uevent.event_file) uverbs_uobject_put(&obj->uevent.event_file->uobj); if (attr.srq_type == IB_SRQT_XRC) atomic_dec(&obj->uxrcd->refcnt); return ret; }; DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_SRQ_CREATE, UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_HANDLE, UVERBS_OBJECT_SRQ, UVERBS_ACCESS_NEW, UA_MANDATORY), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_PD_HANDLE, UVERBS_OBJECT_PD, UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_SRQ_TYPE, enum ib_uverbs_srq_type, UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_USER_HANDLE, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_WR, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_SGE, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_LIMIT, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE, UVERBS_OBJECT_XRCD, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS, UVERBS_ATTR_TYPE(u32), UA_OPTIONAL), UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_SRQ_EVENT_FD, UVERBS_OBJECT_ASYNC_EVENT, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM, UVERBS_ATTR_TYPE(u32), UA_OPTIONAL), UVERBS_ATTR_UHW()); static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_DESTROY)( struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_SRQ_HANDLE); struct ib_usrq_object *obj = container_of(uobj, struct ib_usrq_object, uevent.uobject); struct ib_uverbs_destroy_srq_resp resp = { .events_reported = obj->uevent.events_reported }; return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_SRQ_RESP, &resp, sizeof(resp)); } DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_SRQ_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_SRQ_HANDLE, UVERBS_OBJECT_SRQ, UVERBS_ACCESS_DESTROY, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_SRQ_RESP, UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_srq_resp), UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_SRQ, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), uverbs_free_srq), &UVERBS_METHOD(UVERBS_METHOD_SRQ_CREATE), &UVERBS_METHOD(UVERBS_METHOD_SRQ_DESTROY) ); const struct uapi_definition uverbs_def_obj_srq[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ, UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)), {} };
linux-master
drivers/infiniband/core/uverbs_std_types_srq.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/init.h> #include <linux/err.h> #include <linux/random.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/kref.h> #include <linux/xarray.h> #include <linux/workqueue.h> #include <uapi/linux/if_ether.h> #include <rdma/ib_pack.h> #include <rdma/ib_cache.h> #include <rdma/rdma_netlink.h> #include <net/netlink.h> #include <uapi/rdma/ib_user_sa.h> #include <rdma/ib_marshall.h> #include <rdma/ib_addr.h> #include <rdma/opa_addr.h> #include <rdma/rdma_cm.h> #include "sa.h" #include "core_priv.h" #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 #define IB_SA_CPI_MAX_RETRY_CNT 3 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */ static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; struct ib_sa_sm_ah { struct ib_ah *ah; struct kref ref; u16 pkey_index; u8 src_path_mask; }; enum rdma_class_port_info_type { RDMA_CLASS_PORT_INFO_IB, RDMA_CLASS_PORT_INFO_OPA }; struct rdma_class_port_info { enum rdma_class_port_info_type type; union { struct ib_class_port_info ib; struct opa_class_port_info opa; }; }; struct ib_sa_classport_cache { bool valid; int retry_cnt; struct rdma_class_port_info data; }; struct ib_sa_port { struct ib_mad_agent *agent; struct ib_sa_sm_ah *sm_ah; struct work_struct update_task; struct ib_sa_classport_cache classport_info; struct delayed_work ib_cpi_work; spinlock_t classport_lock; /* protects class port info set */ spinlock_t ah_lock; u32 port_num; }; struct ib_sa_device { int start_port, end_port; struct ib_event_handler event_handler; struct ib_sa_port port[]; }; struct ib_sa_query { void (*callback)(struct ib_sa_query *sa_query, int status, struct ib_sa_mad *mad); void (*release)(struct ib_sa_query *); struct ib_sa_client *client; struct ib_sa_port *port; struct ib_mad_send_buf *mad_buf; struct ib_sa_sm_ah *sm_ah; int id; u32 flags; struct list_head list; /* Local svc request list */ u32 seq; /* Local svc request sequence number */ unsigned long timeout; /* Local svc timeout */ u8 path_use; /* How will the pathrecord be used */ }; #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 #define IB_SA_CANCEL 0x00000002 #define IB_SA_QUERY_OPA 0x00000004 struct ib_sa_path_query { void (*callback)(int status, struct sa_path_rec *rec, unsigned int num_paths, void *context); void *context; struct ib_sa_query sa_query; struct sa_path_rec *conv_pr; }; struct ib_sa_guidinfo_query { void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); void *context; struct ib_sa_query sa_query; }; struct ib_sa_classport_info_query { void (*callback)(void *); void *context; struct ib_sa_query sa_query; }; struct ib_sa_mcmember_query { void (*callback)(int, struct ib_sa_mcmember_rec *, void *); void *context; struct ib_sa_query sa_query; }; static LIST_HEAD(ib_nl_request_list); static DEFINE_SPINLOCK(ib_nl_request_lock); static atomic_t ib_nl_sa_request_seq; static struct workqueue_struct *ib_nl_wq; static struct delayed_work ib_nl_timed_work; static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, .len = sizeof(struct ib_path_rec_data)}, [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, .len = sizeof(struct rdma_nla_ls_gid)}, [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, .len = sizeof(struct rdma_nla_ls_gid)}, [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, }; static int ib_sa_add_one(struct ib_device *device); static void ib_sa_remove_one(struct ib_device *device, void *client_data); static struct ib_client sa_client = { .name = "sa", .add = ib_sa_add_one, .remove = ib_sa_remove_one }; static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); static DEFINE_SPINLOCK(tid_lock); static u32 tid; #define PATH_REC_FIELD(field) \ .struct_offset_bytes = offsetof(struct sa_path_rec, field), \ .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \ .field_name = "sa_path_rec:" #field static const struct ib_field path_rec_table[] = { { PATH_REC_FIELD(service_id), .offset_words = 0, .offset_bits = 0, .size_bits = 64 }, { PATH_REC_FIELD(dgid), .offset_words = 2, .offset_bits = 0, .size_bits = 128 }, { PATH_REC_FIELD(sgid), .offset_words = 6, .offset_bits = 0, .size_bits = 128 }, { PATH_REC_FIELD(ib.dlid), .offset_words = 10, .offset_bits = 0, .size_bits = 16 }, { PATH_REC_FIELD(ib.slid), .offset_words = 10, .offset_bits = 16, .size_bits = 16 }, { PATH_REC_FIELD(ib.raw_traffic), .offset_words = 11, .offset_bits = 0, .size_bits = 1 }, { RESERVED, .offset_words = 11, .offset_bits = 1, .size_bits = 3 }, { PATH_REC_FIELD(flow_label), .offset_words = 11, .offset_bits = 4, .size_bits = 20 }, { PATH_REC_FIELD(hop_limit), .offset_words = 11, .offset_bits = 24, .size_bits = 8 }, { PATH_REC_FIELD(traffic_class), .offset_words = 12, .offset_bits = 0, .size_bits = 8 }, { PATH_REC_FIELD(reversible), .offset_words = 12, .offset_bits = 8, .size_bits = 1 }, { PATH_REC_FIELD(numb_path), .offset_words = 12, .offset_bits = 9, .size_bits = 7 }, { PATH_REC_FIELD(pkey), .offset_words = 12, .offset_bits = 16, .size_bits = 16 }, { PATH_REC_FIELD(qos_class), .offset_words = 13, .offset_bits = 0, .size_bits = 12 }, { PATH_REC_FIELD(sl), .offset_words = 13, .offset_bits = 12, .size_bits = 4 }, { PATH_REC_FIELD(mtu_selector), .offset_words = 13, .offset_bits = 16, .size_bits = 2 }, { PATH_REC_FIELD(mtu), .offset_words = 13, .offset_bits = 18, .size_bits = 6 }, { PATH_REC_FIELD(rate_selector), .offset_words = 13, .offset_bits = 24, .size_bits = 2 }, { PATH_REC_FIELD(rate), .offset_words = 13, .offset_bits = 26, .size_bits = 6 }, { PATH_REC_FIELD(packet_life_time_selector), .offset_words = 14, .offset_bits = 0, .size_bits = 2 }, { PATH_REC_FIELD(packet_life_time), .offset_words = 14, .offset_bits = 2, .size_bits = 6 }, { PATH_REC_FIELD(preference), .offset_words = 14, .offset_bits = 8, .size_bits = 8 }, { RESERVED, .offset_words = 14, .offset_bits = 16, .size_bits = 48 }, }; #define OPA_PATH_REC_FIELD(field) \ .struct_offset_bytes = \ offsetof(struct sa_path_rec, field), \ .struct_size_bytes = \ sizeof_field(struct sa_path_rec, field), \ .field_name = "sa_path_rec:" #field static const struct ib_field opa_path_rec_table[] = { { OPA_PATH_REC_FIELD(service_id), .offset_words = 0, .offset_bits = 0, .size_bits = 64 }, { OPA_PATH_REC_FIELD(dgid), .offset_words = 2, .offset_bits = 0, .size_bits = 128 }, { OPA_PATH_REC_FIELD(sgid), .offset_words = 6, .offset_bits = 0, .size_bits = 128 }, { OPA_PATH_REC_FIELD(opa.dlid), .offset_words = 10, .offset_bits = 0, .size_bits = 32 }, { OPA_PATH_REC_FIELD(opa.slid), .offset_words = 11, .offset_bits = 0, .size_bits = 32 }, { OPA_PATH_REC_FIELD(opa.raw_traffic), .offset_words = 12, .offset_bits = 0, .size_bits = 1 }, { RESERVED, .offset_words = 12, .offset_bits = 1, .size_bits = 3 }, { OPA_PATH_REC_FIELD(flow_label), .offset_words = 12, .offset_bits = 4, .size_bits = 20 }, { OPA_PATH_REC_FIELD(hop_limit), .offset_words = 12, .offset_bits = 24, .size_bits = 8 }, { OPA_PATH_REC_FIELD(traffic_class), .offset_words = 13, .offset_bits = 0, .size_bits = 8 }, { OPA_PATH_REC_FIELD(reversible), .offset_words = 13, .offset_bits = 8, .size_bits = 1 }, { OPA_PATH_REC_FIELD(numb_path), .offset_words = 13, .offset_bits = 9, .size_bits = 7 }, { OPA_PATH_REC_FIELD(pkey), .offset_words = 13, .offset_bits = 16, .size_bits = 16 }, { OPA_PATH_REC_FIELD(opa.l2_8B), .offset_words = 14, .offset_bits = 0, .size_bits = 1 }, { OPA_PATH_REC_FIELD(opa.l2_10B), .offset_words = 14, .offset_bits = 1, .size_bits = 1 }, { OPA_PATH_REC_FIELD(opa.l2_9B), .offset_words = 14, .offset_bits = 2, .size_bits = 1 }, { OPA_PATH_REC_FIELD(opa.l2_16B), .offset_words = 14, .offset_bits = 3, .size_bits = 1 }, { RESERVED, .offset_words = 14, .offset_bits = 4, .size_bits = 2 }, { OPA_PATH_REC_FIELD(opa.qos_type), .offset_words = 14, .offset_bits = 6, .size_bits = 2 }, { OPA_PATH_REC_FIELD(opa.qos_priority), .offset_words = 14, .offset_bits = 8, .size_bits = 8 }, { RESERVED, .offset_words = 14, .offset_bits = 16, .size_bits = 3 }, { OPA_PATH_REC_FIELD(sl), .offset_words = 14, .offset_bits = 19, .size_bits = 5 }, { RESERVED, .offset_words = 14, .offset_bits = 24, .size_bits = 8 }, { OPA_PATH_REC_FIELD(mtu_selector), .offset_words = 15, .offset_bits = 0, .size_bits = 2 }, { OPA_PATH_REC_FIELD(mtu), .offset_words = 15, .offset_bits = 2, .size_bits = 6 }, { OPA_PATH_REC_FIELD(rate_selector), .offset_words = 15, .offset_bits = 8, .size_bits = 2 }, { OPA_PATH_REC_FIELD(rate), .offset_words = 15, .offset_bits = 10, .size_bits = 6 }, { OPA_PATH_REC_FIELD(packet_life_time_selector), .offset_words = 15, .offset_bits = 16, .size_bits = 2 }, { OPA_PATH_REC_FIELD(packet_life_time), .offset_words = 15, .offset_bits = 18, .size_bits = 6 }, { OPA_PATH_REC_FIELD(preference), .offset_words = 15, .offset_bits = 24, .size_bits = 8 }, }; #define MCMEMBER_REC_FIELD(field) \ .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \ .field_name = "sa_mcmember_rec:" #field static const struct ib_field mcmember_rec_table[] = { { MCMEMBER_REC_FIELD(mgid), .offset_words = 0, .offset_bits = 0, .size_bits = 128 }, { MCMEMBER_REC_FIELD(port_gid), .offset_words = 4, .offset_bits = 0, .size_bits = 128 }, { MCMEMBER_REC_FIELD(qkey), .offset_words = 8, .offset_bits = 0, .size_bits = 32 }, { MCMEMBER_REC_FIELD(mlid), .offset_words = 9, .offset_bits = 0, .size_bits = 16 }, { MCMEMBER_REC_FIELD(mtu_selector), .offset_words = 9, .offset_bits = 16, .size_bits = 2 }, { MCMEMBER_REC_FIELD(mtu), .offset_words = 9, .offset_bits = 18, .size_bits = 6 }, { MCMEMBER_REC_FIELD(traffic_class), .offset_words = 9, .offset_bits = 24, .size_bits = 8 }, { MCMEMBER_REC_FIELD(pkey), .offset_words = 10, .offset_bits = 0, .size_bits = 16 }, { MCMEMBER_REC_FIELD(rate_selector), .offset_words = 10, .offset_bits = 16, .size_bits = 2 }, { MCMEMBER_REC_FIELD(rate), .offset_words = 10, .offset_bits = 18, .size_bits = 6 }, { MCMEMBER_REC_FIELD(packet_life_time_selector), .offset_words = 10, .offset_bits = 24, .size_bits = 2 }, { MCMEMBER_REC_FIELD(packet_life_time), .offset_words = 10, .offset_bits = 26, .size_bits = 6 }, { MCMEMBER_REC_FIELD(sl), .offset_words = 11, .offset_bits = 0, .size_bits = 4 }, { MCMEMBER_REC_FIELD(flow_label), .offset_words = 11, .offset_bits = 4, .size_bits = 20 }, { MCMEMBER_REC_FIELD(hop_limit), .offset_words = 11, .offset_bits = 24, .size_bits = 8 }, { MCMEMBER_REC_FIELD(scope), .offset_words = 12, .offset_bits = 0, .size_bits = 4 }, { MCMEMBER_REC_FIELD(join_state), .offset_words = 12, .offset_bits = 4, .size_bits = 4 }, { MCMEMBER_REC_FIELD(proxy_join), .offset_words = 12, .offset_bits = 8, .size_bits = 1 }, { RESERVED, .offset_words = 12, .offset_bits = 9, .size_bits = 23 }, }; #define CLASSPORTINFO_REC_FIELD(field) \ .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \ .field_name = "ib_class_port_info:" #field static const struct ib_field ib_classport_info_rec_table[] = { { CLASSPORTINFO_REC_FIELD(base_version), .offset_words = 0, .offset_bits = 0, .size_bits = 8 }, { CLASSPORTINFO_REC_FIELD(class_version), .offset_words = 0, .offset_bits = 8, .size_bits = 8 }, { CLASSPORTINFO_REC_FIELD(capability_mask), .offset_words = 0, .offset_bits = 16, .size_bits = 16 }, { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), .offset_words = 1, .offset_bits = 0, .size_bits = 32 }, { CLASSPORTINFO_REC_FIELD(redirect_gid), .offset_words = 2, .offset_bits = 0, .size_bits = 128 }, { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), .offset_words = 6, .offset_bits = 0, .size_bits = 32 }, { CLASSPORTINFO_REC_FIELD(redirect_lid), .offset_words = 7, .offset_bits = 0, .size_bits = 16 }, { CLASSPORTINFO_REC_FIELD(redirect_pkey), .offset_words = 7, .offset_bits = 16, .size_bits = 16 }, { CLASSPORTINFO_REC_FIELD(redirect_qp), .offset_words = 8, .offset_bits = 0, .size_bits = 32 }, { CLASSPORTINFO_REC_FIELD(redirect_qkey), .offset_words = 9, .offset_bits = 0, .size_bits = 32 }, { CLASSPORTINFO_REC_FIELD(trap_gid), .offset_words = 10, .offset_bits = 0, .size_bits = 128 }, { CLASSPORTINFO_REC_FIELD(trap_tcslfl), .offset_words = 14, .offset_bits = 0, .size_bits = 32 }, { CLASSPORTINFO_REC_FIELD(trap_lid), .offset_words = 15, .offset_bits = 0, .size_bits = 16 }, { CLASSPORTINFO_REC_FIELD(trap_pkey), .offset_words = 15, .offset_bits = 16, .size_bits = 16 }, { CLASSPORTINFO_REC_FIELD(trap_hlqp), .offset_words = 16, .offset_bits = 0, .size_bits = 32 }, { CLASSPORTINFO_REC_FIELD(trap_qkey), .offset_words = 17, .offset_bits = 0, .size_bits = 32 }, }; #define OPA_CLASSPORTINFO_REC_FIELD(field) \ .struct_offset_bytes =\ offsetof(struct opa_class_port_info, field), \ .struct_size_bytes = \ sizeof_field(struct opa_class_port_info, field), \ .field_name = "opa_class_port_info:" #field static const struct ib_field opa_classport_info_rec_table[] = { { OPA_CLASSPORTINFO_REC_FIELD(base_version), .offset_words = 0, .offset_bits = 0, .size_bits = 8 }, { OPA_CLASSPORTINFO_REC_FIELD(class_version), .offset_words = 0, .offset_bits = 8, .size_bits = 8 }, { OPA_CLASSPORTINFO_REC_FIELD(cap_mask), .offset_words = 0, .offset_bits = 16, .size_bits = 16 }, { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), .offset_words = 1, .offset_bits = 0, .size_bits = 32 }, { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid), .offset_words = 2, .offset_bits = 0, .size_bits = 128 }, { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl), .offset_words = 6, .offset_bits = 0, .size_bits = 32 }, { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid), .offset_words = 7, .offset_bits = 0, .size_bits = 32 }, { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp), .offset_words = 8, .offset_bits = 0, .size_bits = 32 }, { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey), .offset_words = 9, .offset_bits = 0, .size_bits = 32 }, { OPA_CLASSPORTINFO_REC_FIELD(trap_gid), .offset_words = 10, .offset_bits = 0, .size_bits = 128 }, { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl), .offset_words = 14, .offset_bits = 0, .size_bits = 32 }, { OPA_CLASSPORTINFO_REC_FIELD(trap_lid), .offset_words = 15, .offset_bits = 0, .size_bits = 32 }, { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp), .offset_words = 16, .offset_bits = 0, .size_bits = 32 }, { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey), .offset_words = 17, .offset_bits = 0, .size_bits = 32 }, { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey), .offset_words = 18, .offset_bits = 0, .size_bits = 16 }, { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey), .offset_words = 18, .offset_bits = 16, .size_bits = 16 }, { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd), .offset_words = 19, .offset_bits = 0, .size_bits = 8 }, { RESERVED, .offset_words = 19, .offset_bits = 8, .size_bits = 24 }, }; #define GUIDINFO_REC_FIELD(field) \ .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \ .field_name = "sa_guidinfo_rec:" #field static const struct ib_field guidinfo_rec_table[] = { { GUIDINFO_REC_FIELD(lid), .offset_words = 0, .offset_bits = 0, .size_bits = 16 }, { GUIDINFO_REC_FIELD(block_num), .offset_words = 0, .offset_bits = 16, .size_bits = 8 }, { GUIDINFO_REC_FIELD(res1), .offset_words = 0, .offset_bits = 24, .size_bits = 8 }, { GUIDINFO_REC_FIELD(res2), .offset_words = 1, .offset_bits = 0, .size_bits = 32 }, { GUIDINFO_REC_FIELD(guid_info_list), .offset_words = 2, .offset_bits = 0, .size_bits = 512 }, }; #define RDMA_PRIMARY_PATH_MAX_REC_NUM 3 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) { query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; } static inline int ib_sa_query_cancelled(struct ib_sa_query *query) { return (query->flags & IB_SA_CANCEL); } static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, struct ib_sa_query *query) { struct sa_path_rec *sa_rec = query->mad_buf->context[1]; struct ib_sa_mad *mad = query->mad_buf->mad; ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; u16 val16; u64 val64; struct rdma_ls_resolve_header *header; query->mad_buf->context[1] = NULL; /* Construct the family header first */ header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); strscpy_pad(header->device_name, dev_name(&query->port->agent->device->dev), LS_DEVICE_NAME_MAX); header->port_num = query->port->port_num; if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && sa_rec->reversible != 0) query->path_use = LS_RESOLVE_PATH_USE_ALL; else query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; header->path_use = query->path_use; /* Now build the attributes */ if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { val64 = be64_to_cpu(sa_rec->service_id); nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, sizeof(val64), &val64); } if (comp_mask & IB_SA_PATH_REC_DGID) nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, sizeof(sa_rec->dgid), &sa_rec->dgid); if (comp_mask & IB_SA_PATH_REC_SGID) nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, sizeof(sa_rec->sgid), &sa_rec->sgid); if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); if (comp_mask & IB_SA_PATH_REC_PKEY) { val16 = be16_to_cpu(sa_rec->pkey); nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, sizeof(val16), &val16); } if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { val16 = be16_to_cpu(sa_rec->qos_class); nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, sizeof(val16), &val16); } } static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) { int len = 0; if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) len += nla_total_size(sizeof(u64)); if (comp_mask & IB_SA_PATH_REC_DGID) len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); if (comp_mask & IB_SA_PATH_REC_SGID) len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) len += nla_total_size(sizeof(u8)); if (comp_mask & IB_SA_PATH_REC_PKEY) len += nla_total_size(sizeof(u16)); if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) len += nla_total_size(sizeof(u16)); /* * Make sure that at least some of the required comp_mask bits are * set. */ if (WARN_ON(len == 0)) return len; /* Add the family header */ len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); return len; } static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; void *data; struct ib_sa_mad *mad; int len; unsigned long flags; unsigned long delay; gfp_t gfp_flag; int ret; INIT_LIST_HEAD(&query->list); query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); mad = query->mad_buf->mad; len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); if (len <= 0) return -EMSGSIZE; skb = nlmsg_new(len, gfp_mask); if (!skb) return -ENOMEM; /* Put nlmsg header only for now */ data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); if (!data) { nlmsg_free(skb); return -EMSGSIZE; } /* Add attributes */ ib_nl_set_path_rec_attrs(skb, query); /* Repair the nlmsg header length */ nlmsg_end(skb, nlh); gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC : GFP_NOWAIT; spin_lock_irqsave(&ib_nl_request_lock, flags); ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag); if (ret) goto out; /* Put the request on the list.*/ delay = msecs_to_jiffies(sa_local_svc_timeout_ms); query->timeout = delay + jiffies; list_add_tail(&query->list, &ib_nl_request_list); /* Start the timeout if this is the only request */ if (ib_nl_request_list.next == &query->list) queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); out: spin_unlock_irqrestore(&ib_nl_request_lock, flags); return ret; } static int ib_nl_cancel_request(struct ib_sa_query *query) { unsigned long flags; struct ib_sa_query *wait_query; int found = 0; spin_lock_irqsave(&ib_nl_request_lock, flags); list_for_each_entry(wait_query, &ib_nl_request_list, list) { /* Let the timeout to take care of the callback */ if (query == wait_query) { query->flags |= IB_SA_CANCEL; query->timeout = jiffies; list_move(&query->list, &ib_nl_request_list); found = 1; mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); break; } } spin_unlock_irqrestore(&ib_nl_request_lock, flags); return found; } static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc); static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, const struct nlmsghdr *nlh) { struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM]; struct ib_sa_path_query *path_query; struct ib_path_rec_data *rec_data; struct ib_mad_send_wc mad_send_wc; const struct nlattr *head, *curr; struct ib_sa_mad *mad = NULL; int len, rem, status = -EIO; unsigned int num_prs = 0; u32 mask = 0; if (!query->callback) goto out; path_query = container_of(query, struct ib_sa_path_query, sa_query); mad = query->mad_buf->mad; head = (const struct nlattr *) nlmsg_data(nlh); len = nlmsg_len(nlh); switch (query->path_use) { case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; break; case LS_RESOLVE_PATH_USE_ALL: mask = IB_PATH_PRIMARY; break; case LS_RESOLVE_PATH_USE_GMP: default: mask = IB_PATH_PRIMARY | IB_PATH_GMP | IB_PATH_BIDIRECTIONAL; break; } nla_for_each_attr(curr, head, len, rem) { if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD) continue; rec_data = nla_data(curr); if ((rec_data->flags & mask) != mask) continue; if ((query->flags & IB_SA_QUERY_OPA) || path_query->conv_pr) { mad->mad_hdr.method |= IB_MGMT_METHOD_RESP; memcpy(mad->data, rec_data->path_rec, sizeof(rec_data->path_rec)); query->callback(query, 0, mad); goto out; } status = 0; ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), rec_data->path_rec, &recs[num_prs]); recs[num_prs].flags = rec_data->flags; recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB; sa_path_set_dmac_zero(&recs[num_prs]); num_prs++; if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM) break; } if (!status) { mad->mad_hdr.method |= IB_MGMT_METHOD_RESP; path_query->callback(status, recs, num_prs, path_query->context); } else query->callback(query, status, mad); out: mad_send_wc.send_buf = query->mad_buf; mad_send_wc.status = IB_WC_SUCCESS; send_handler(query->mad_buf->mad_agent, &mad_send_wc); } static void ib_nl_request_timeout(struct work_struct *work) { unsigned long flags; struct ib_sa_query *query; unsigned long delay; struct ib_mad_send_wc mad_send_wc; int ret; spin_lock_irqsave(&ib_nl_request_lock, flags); while (!list_empty(&ib_nl_request_list)) { query = list_entry(ib_nl_request_list.next, struct ib_sa_query, list); if (time_after(query->timeout, jiffies)) { delay = query->timeout - jiffies; if ((long)delay <= 0) delay = 1; queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); break; } list_del(&query->list); ib_sa_disable_local_svc(query); /* Hold the lock to protect against query cancellation */ if (ib_sa_query_cancelled(query)) ret = -1; else ret = ib_post_send_mad(query->mad_buf, NULL); if (ret) { mad_send_wc.send_buf = query->mad_buf; mad_send_wc.status = IB_WC_WR_FLUSH_ERR; spin_unlock_irqrestore(&ib_nl_request_lock, flags); send_handler(query->port->agent, &mad_send_wc); spin_lock_irqsave(&ib_nl_request_lock, flags); } } spin_unlock_irqrestore(&ib_nl_request_lock, flags); } int ib_nl_handle_set_timeout(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { int timeout, delta, abs_delta; const struct nlattr *attr; unsigned long flags; struct ib_sa_query *query; long delay = 0; struct nlattr *tb[LS_NLA_TYPE_MAX]; int ret; if (!(nlh->nlmsg_flags & NLM_F_REQUEST) || !(NETLINK_CB(skb).sk)) return -EPERM; ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), nlmsg_len(nlh), ib_nl_policy, NULL); attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; if (ret || !attr) goto settimeout_out; timeout = *(int *) nla_data(attr); if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; delta = timeout - sa_local_svc_timeout_ms; if (delta < 0) abs_delta = -delta; else abs_delta = delta; if (delta != 0) { spin_lock_irqsave(&ib_nl_request_lock, flags); sa_local_svc_timeout_ms = timeout; list_for_each_entry(query, &ib_nl_request_list, list) { if (delta < 0 && abs_delta > query->timeout) query->timeout = 0; else query->timeout += delta; /* Get the new delay from the first entry */ if (!delay) { delay = query->timeout - jiffies; if (delay <= 0) delay = 1; } } if (delay) mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, (unsigned long)delay); spin_unlock_irqrestore(&ib_nl_request_lock, flags); } settimeout_out: return 0; } static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) { struct nlattr *tb[LS_NLA_TYPE_MAX]; int ret; if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) return 0; ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), nlmsg_len(nlh), ib_nl_policy, NULL); if (ret) return 0; return 1; } int ib_nl_handle_resolve_resp(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { unsigned long flags; struct ib_sa_query *query = NULL, *iter; struct ib_mad_send_buf *send_buf; struct ib_mad_send_wc mad_send_wc; int ret; if ((nlh->nlmsg_flags & NLM_F_REQUEST) || !(NETLINK_CB(skb).sk)) return -EPERM; spin_lock_irqsave(&ib_nl_request_lock, flags); list_for_each_entry(iter, &ib_nl_request_list, list) { /* * If the query is cancelled, let the timeout routine * take care of it. */ if (nlh->nlmsg_seq == iter->seq) { if (!ib_sa_query_cancelled(iter)) { list_del(&iter->list); query = iter; } break; } } if (!query) { spin_unlock_irqrestore(&ib_nl_request_lock, flags); goto resp_out; } send_buf = query->mad_buf; if (!ib_nl_is_good_resolve_resp(nlh)) { /* if the result is a failure, send out the packet via IB */ ib_sa_disable_local_svc(query); ret = ib_post_send_mad(query->mad_buf, NULL); spin_unlock_irqrestore(&ib_nl_request_lock, flags); if (ret) { mad_send_wc.send_buf = send_buf; mad_send_wc.status = IB_WC_GENERAL_ERR; send_handler(query->port->agent, &mad_send_wc); } } else { spin_unlock_irqrestore(&ib_nl_request_lock, flags); ib_nl_process_good_resolve_rsp(query, nlh); } resp_out: return 0; } static void free_sm_ah(struct kref *kref) { struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); rdma_destroy_ah(sm_ah->ah, 0); kfree(sm_ah); } void ib_sa_register_client(struct ib_sa_client *client) { atomic_set(&client->users, 1); init_completion(&client->comp); } EXPORT_SYMBOL(ib_sa_register_client); void ib_sa_unregister_client(struct ib_sa_client *client) { ib_sa_client_put(client); wait_for_completion(&client->comp); } EXPORT_SYMBOL(ib_sa_unregister_client); /** * ib_sa_cancel_query - try to cancel an SA query * @id:ID of query to cancel * @query:query pointer to cancel * * Try to cancel an SA query. If the id and query don't match up or * the query has already completed, nothing is done. Otherwise the * query is canceled and will complete with a status of -EINTR. */ void ib_sa_cancel_query(int id, struct ib_sa_query *query) { unsigned long flags; struct ib_mad_send_buf *mad_buf; xa_lock_irqsave(&queries, flags); if (xa_load(&queries, id) != query) { xa_unlock_irqrestore(&queries, flags); return; } mad_buf = query->mad_buf; xa_unlock_irqrestore(&queries, flags); /* * If the query is still on the netlink request list, schedule * it to be cancelled by the timeout routine. Otherwise, it has been * sent to the MAD layer and has to be cancelled from there. */ if (!ib_nl_cancel_request(query)) ib_cancel_mad(mad_buf); } EXPORT_SYMBOL(ib_sa_cancel_query); static u8 get_src_path_mask(struct ib_device *device, u32 port_num) { struct ib_sa_device *sa_dev; struct ib_sa_port *port; unsigned long flags; u8 src_path_mask; sa_dev = ib_get_client_data(device, &sa_client); if (!sa_dev) return 0x7f; port = &sa_dev->port[port_num - sa_dev->start_port]; spin_lock_irqsave(&port->ah_lock, flags); src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; spin_unlock_irqrestore(&port->ah_lock, flags); return src_path_mask; } static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num, struct sa_path_rec *rec, struct rdma_ah_attr *ah_attr, const struct ib_gid_attr *gid_attr) { enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec); if (!gid_attr) { gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type, port_num, NULL); if (IS_ERR(gid_attr)) return PTR_ERR(gid_attr); } else rdma_hold_gid_attr(gid_attr); rdma_move_grh_sgid_attr(ah_attr, &rec->dgid, be32_to_cpu(rec->flow_label), rec->hop_limit, rec->traffic_class, gid_attr); return 0; } /** * ib_init_ah_attr_from_path - Initialize address handle attributes based on * an SA path record. * @device: Device associated ah attributes initialization. * @port_num: Port on the specified device. * @rec: path record entry to use for ah attributes initialization. * @ah_attr: address handle attributes to initialization from path record. * @gid_attr: SGID attribute to consider during initialization. * * When ib_init_ah_attr_from_path() returns success, * (a) for IB link layer it optionally contains a reference to SGID attribute * when GRH is present for IB link layer. * (b) for RoCE link layer it contains a reference to SGID attribute. * User must invoke rdma_destroy_ah_attr() to release reference to SGID * attributes which are initialized using ib_init_ah_attr_from_path(). */ int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num, struct sa_path_rec *rec, struct rdma_ah_attr *ah_attr, const struct ib_gid_attr *gid_attr) { int ret = 0; memset(ah_attr, 0, sizeof(*ah_attr)); ah_attr->type = rdma_ah_find_type(device, port_num); rdma_ah_set_sl(ah_attr, rec->sl); rdma_ah_set_port_num(ah_attr, port_num); rdma_ah_set_static_rate(ah_attr, rec->rate); if (sa_path_is_roce(rec)) { ret = roce_resolve_route_from_path(rec, gid_attr); if (ret) return ret; memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN); } else { rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec))); if (sa_path_is_opa(rec) && rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) rdma_ah_set_make_grd(ah_attr, true); rdma_ah_set_path_bits(ah_attr, be32_to_cpu(sa_path_get_slid(rec)) & get_src_path_mask(device, port_num)); } if (rec->hop_limit > 0 || sa_path_is_roce(rec)) ret = init_ah_attr_grh_fields(device, port_num, rec, ah_attr, gid_attr); return ret; } EXPORT_SYMBOL(ib_init_ah_attr_from_path); static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) { struct rdma_ah_attr ah_attr; unsigned long flags; spin_lock_irqsave(&query->port->ah_lock, flags); if (!query->port->sm_ah) { spin_unlock_irqrestore(&query->port->ah_lock, flags); return -EAGAIN; } kref_get(&query->port->sm_ah->ref); query->sm_ah = query->port->sm_ah; spin_unlock_irqrestore(&query->port->ah_lock, flags); /* * Always check if sm_ah has valid dlid assigned, * before querying for class port info */ if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) || !rdma_is_valid_unicast_lid(&ah_attr)) { kref_put(&query->sm_ah->ref, free_sm_ah); return -EAGAIN; } query->mad_buf = ib_create_send_mad(query->port->agent, 1, query->sm_ah->pkey_index, 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, gfp_mask, ((query->flags & IB_SA_QUERY_OPA) ? OPA_MGMT_BASE_VERSION : IB_MGMT_BASE_VERSION)); if (IS_ERR(query->mad_buf)) { kref_put(&query->sm_ah->ref, free_sm_ah); return -ENOMEM; } query->mad_buf->ah = query->sm_ah->ah; return 0; } static void free_mad(struct ib_sa_query *query) { ib_free_send_mad(query->mad_buf); kref_put(&query->sm_ah->ref, free_sm_ah); } static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent) { struct ib_sa_mad *mad = query->mad_buf->mad; unsigned long flags; memset(mad, 0, sizeof *mad); if (query->flags & IB_SA_QUERY_OPA) { mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION; mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION; } else { mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; } mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; spin_lock_irqsave(&tid_lock, flags); mad->mad_hdr.tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); spin_unlock_irqrestore(&tid_lock, flags); } static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms, gfp_t gfp_mask) { unsigned long flags; int ret, id; const int nmbr_sa_query_retries = 10; xa_lock_irqsave(&queries, flags); ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask); xa_unlock_irqrestore(&queries, flags); if (ret < 0) return ret; query->mad_buf->timeout_ms = timeout_ms / nmbr_sa_query_retries; query->mad_buf->retries = nmbr_sa_query_retries; if (!query->mad_buf->timeout_ms) { /* Special case, very small timeout_ms */ query->mad_buf->timeout_ms = 1; query->mad_buf->retries = timeout_ms; } query->mad_buf->context[0] = query; query->id = id; if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) && (!(query->flags & IB_SA_QUERY_OPA))) { if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) { if (!ib_nl_make_request(query, gfp_mask)) return id; } ib_sa_disable_local_svc(query); } ret = ib_post_send_mad(query->mad_buf, NULL); if (ret) { xa_lock_irqsave(&queries, flags); __xa_erase(&queries, id); xa_unlock_irqrestore(&queries, flags); } /* * It's not safe to dereference query any more, because the * send may already have completed and freed the query in * another context. */ return ret ? ret : id; } void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec) { ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); } EXPORT_SYMBOL(ib_sa_unpack_path); void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute) { ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); } EXPORT_SYMBOL(ib_sa_pack_path); static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, struct ib_sa_device *sa_dev, u32 port_num) { struct ib_sa_port *port; unsigned long flags; bool ret = false; port = &sa_dev->port[port_num - sa_dev->start_port]; spin_lock_irqsave(&port->classport_lock, flags); if (!port->classport_info.valid) goto ret; if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA) ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) & OPA_CLASS_PORT_INFO_PR_SUPPORT; ret: spin_unlock_irqrestore(&port->classport_lock, flags); return ret; } enum opa_pr_supported { PR_NOT_SUPPORTED, PR_OPA_SUPPORTED, PR_IB_SUPPORTED }; /* * opa_pr_query_possible - Check if current PR query can be an OPA query. * * Retuns PR_NOT_SUPPORTED if a path record query is not * possible, PR_OPA_SUPPORTED if an OPA path record query * is possible and PR_IB_SUPPORTED if an IB path record * query is possible. */ static int opa_pr_query_possible(struct ib_sa_client *client, struct ib_sa_device *sa_dev, struct ib_device *device, u32 port_num) { struct ib_port_attr port_attr; if (ib_query_port(device, port_num, &port_attr)) return PR_NOT_SUPPORTED; if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num)) return PR_OPA_SUPPORTED; if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) return PR_NOT_SUPPORTED; else return PR_IB_SUPPORTED; } static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, int status, struct ib_sa_mad *mad) { struct ib_sa_path_query *query = container_of(sa_query, struct ib_sa_path_query, sa_query); struct sa_path_rec rec = {}; if (!mad) { query->callback(status, NULL, 0, query->context); return; } if (sa_query->flags & IB_SA_QUERY_OPA) { ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), mad->data, &rec); rec.rec_type = SA_PATH_REC_TYPE_OPA; query->callback(status, &rec, 1, query->context); return; } ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), mad->data, &rec); rec.rec_type = SA_PATH_REC_TYPE_IB; sa_path_set_dmac_zero(&rec); if (query->conv_pr) { struct sa_path_rec opa; memset(&opa, 0, sizeof(struct sa_path_rec)); sa_convert_path_ib_to_opa(&opa, &rec); query->callback(status, &opa, 1, query->context); } else { query->callback(status, &rec, 1, query->context); } } static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) { struct ib_sa_path_query *query = container_of(sa_query, struct ib_sa_path_query, sa_query); kfree(query->conv_pr); kfree(query); } /** * ib_sa_path_rec_get - Start a Path get query * @client:SA client * @device:device to send query on * @port_num: port number to send query on * @rec:Path Record to send in query * @comp_mask:component mask to send in query * @timeout_ms:time to wait for response * @gfp_mask:GFP mask to use for internal allocations * @callback:function called when query completes, times out or is * canceled * @context:opaque user context passed to callback * @sa_query:query context, used to cancel query * * Send a Path Record Get query to the SA to look up a path. The * callback function will be called when the query completes (or * fails); status is 0 for a successful response, -EINTR if the query * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error * occurred sending the query. The resp parameter of the callback is * only valid if status is 0. * * If the return value of ib_sa_path_rec_get() is negative, it is an * error code. Otherwise it is a query ID that can be used to cancel * the query. */ int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device, u32 port_num, struct sa_path_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct sa_path_rec *resp, unsigned int num_paths, void *context), void *context, struct ib_sa_query **sa_query) { struct ib_sa_path_query *query; struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); struct ib_sa_port *port; struct ib_mad_agent *agent; struct ib_sa_mad *mad; enum opa_pr_supported status; int ret; if (!sa_dev) return -ENODEV; if ((rec->rec_type != SA_PATH_REC_TYPE_IB) && (rec->rec_type != SA_PATH_REC_TYPE_OPA)) return -EINVAL; port = &sa_dev->port[port_num - sa_dev->start_port]; agent = port->agent; query = kzalloc(sizeof(*query), gfp_mask); if (!query) return -ENOMEM; query->sa_query.port = port; if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { status = opa_pr_query_possible(client, sa_dev, device, port_num); if (status == PR_NOT_SUPPORTED) { ret = -EINVAL; goto err1; } else if (status == PR_OPA_SUPPORTED) { query->sa_query.flags |= IB_SA_QUERY_OPA; } else { query->conv_pr = kmalloc(sizeof(*query->conv_pr), gfp_mask); if (!query->conv_pr) { ret = -ENOMEM; goto err1; } } } ret = alloc_mad(&query->sa_query, gfp_mask); if (ret) goto err2; ib_sa_client_get(client); query->sa_query.client = client; query->callback = callback; query->context = context; mad = query->sa_query.mad_buf->mad; init_mad(&query->sa_query, agent); query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; query->sa_query.release = ib_sa_path_rec_release; mad->mad_hdr.method = IB_MGMT_METHOD_GET; mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); mad->sa_hdr.comp_mask = comp_mask; if (query->sa_query.flags & IB_SA_QUERY_OPA) { ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), rec, mad->data); } else if (query->conv_pr) { sa_convert_path_opa_to_ib(query->conv_pr, rec); ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), query->conv_pr, mad->data); } else { ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data); } *sa_query = &query->sa_query; query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; query->sa_query.mad_buf->context[1] = (query->conv_pr) ? query->conv_pr : rec; ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); if (ret < 0) goto err3; return ret; err3: *sa_query = NULL; ib_sa_client_put(query->sa_query.client); free_mad(&query->sa_query); err2: kfree(query->conv_pr); err1: kfree(query); return ret; } EXPORT_SYMBOL(ib_sa_path_rec_get); static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, int status, struct ib_sa_mad *mad) { struct ib_sa_mcmember_query *query = container_of(sa_query, struct ib_sa_mcmember_query, sa_query); if (mad) { struct ib_sa_mcmember_rec rec; ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), mad->data, &rec); query->callback(status, &rec, query->context); } else query->callback(status, NULL, query->context); } static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) { kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); } int ib_sa_mcmember_rec_query(struct ib_sa_client *client, struct ib_device *device, u32 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_mcmember_rec *resp, void *context), void *context, struct ib_sa_query **sa_query) { struct ib_sa_mcmember_query *query; struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); struct ib_sa_port *port; struct ib_mad_agent *agent; struct ib_sa_mad *mad; int ret; if (!sa_dev) return -ENODEV; port = &sa_dev->port[port_num - sa_dev->start_port]; agent = port->agent; query = kzalloc(sizeof(*query), gfp_mask); if (!query) return -ENOMEM; query->sa_query.port = port; ret = alloc_mad(&query->sa_query, gfp_mask); if (ret) goto err1; ib_sa_client_get(client); query->sa_query.client = client; query->callback = callback; query->context = context; mad = query->sa_query.mad_buf->mad; init_mad(&query->sa_query, agent); query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; query->sa_query.release = ib_sa_mcmember_rec_release; mad->mad_hdr.method = method; mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); mad->sa_hdr.comp_mask = comp_mask; ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), rec, mad->data); *sa_query = &query->sa_query; ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); if (ret < 0) goto err2; return ret; err2: *sa_query = NULL; ib_sa_client_put(query->sa_query.client); free_mad(&query->sa_query); err1: kfree(query); return ret; } /* Support GuidInfoRecord */ static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, int status, struct ib_sa_mad *mad) { struct ib_sa_guidinfo_query *query = container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); if (mad) { struct ib_sa_guidinfo_rec rec; ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), mad->data, &rec); query->callback(status, &rec, query->context); } else query->callback(status, NULL, query->context); } static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) { kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); } int ib_sa_guid_info_rec_query(struct ib_sa_client *client, struct ib_device *device, u32 port_num, struct ib_sa_guidinfo_rec *rec, ib_sa_comp_mask comp_mask, u8 method, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_guidinfo_rec *resp, void *context), void *context, struct ib_sa_query **sa_query) { struct ib_sa_guidinfo_query *query; struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); struct ib_sa_port *port; struct ib_mad_agent *agent; struct ib_sa_mad *mad; int ret; if (!sa_dev) return -ENODEV; if (method != IB_MGMT_METHOD_GET && method != IB_MGMT_METHOD_SET && method != IB_SA_METHOD_DELETE) { return -EINVAL; } port = &sa_dev->port[port_num - sa_dev->start_port]; agent = port->agent; query = kzalloc(sizeof(*query), gfp_mask); if (!query) return -ENOMEM; query->sa_query.port = port; ret = alloc_mad(&query->sa_query, gfp_mask); if (ret) goto err1; ib_sa_client_get(client); query->sa_query.client = client; query->callback = callback; query->context = context; mad = query->sa_query.mad_buf->mad; init_mad(&query->sa_query, agent); query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; query->sa_query.release = ib_sa_guidinfo_rec_release; mad->mad_hdr.method = method; mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); mad->sa_hdr.comp_mask = comp_mask; ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, mad->data); *sa_query = &query->sa_query; ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); if (ret < 0) goto err2; return ret; err2: *sa_query = NULL; ib_sa_client_put(query->sa_query.client); free_mad(&query->sa_query); err1: kfree(query); return ret; } EXPORT_SYMBOL(ib_sa_guid_info_rec_query); struct ib_classport_info_context { struct completion done; struct ib_sa_query *sa_query; }; static void ib_classportinfo_cb(void *context) { struct ib_classport_info_context *cb_ctx = context; complete(&cb_ctx->done); } static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, int status, struct ib_sa_mad *mad) { unsigned long flags; struct ib_sa_classport_info_query *query = container_of(sa_query, struct ib_sa_classport_info_query, sa_query); struct ib_sa_classport_cache *info = &sa_query->port->classport_info; if (mad) { if (sa_query->flags & IB_SA_QUERY_OPA) { struct opa_class_port_info rec; ib_unpack(opa_classport_info_rec_table, ARRAY_SIZE(opa_classport_info_rec_table), mad->data, &rec); spin_lock_irqsave(&sa_query->port->classport_lock, flags); if (!status && !info->valid) { memcpy(&info->data.opa, &rec, sizeof(info->data.opa)); info->valid = true; info->data.type = RDMA_CLASS_PORT_INFO_OPA; } spin_unlock_irqrestore(&sa_query->port->classport_lock, flags); } else { struct ib_class_port_info rec; ib_unpack(ib_classport_info_rec_table, ARRAY_SIZE(ib_classport_info_rec_table), mad->data, &rec); spin_lock_irqsave(&sa_query->port->classport_lock, flags); if (!status && !info->valid) { memcpy(&info->data.ib, &rec, sizeof(info->data.ib)); info->valid = true; info->data.type = RDMA_CLASS_PORT_INFO_IB; } spin_unlock_irqrestore(&sa_query->port->classport_lock, flags); } } query->callback(query->context); } static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query) { kfree(container_of(sa_query, struct ib_sa_classport_info_query, sa_query)); } static int ib_sa_classport_info_rec_query(struct ib_sa_port *port, unsigned long timeout_ms, void (*callback)(void *context), void *context, struct ib_sa_query **sa_query) { struct ib_mad_agent *agent; struct ib_sa_classport_info_query *query; struct ib_sa_mad *mad; gfp_t gfp_mask = GFP_KERNEL; int ret; agent = port->agent; query = kzalloc(sizeof(*query), gfp_mask); if (!query) return -ENOMEM; query->sa_query.port = port; query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device, port->port_num) ? IB_SA_QUERY_OPA : 0; ret = alloc_mad(&query->sa_query, gfp_mask); if (ret) goto err_free; query->callback = callback; query->context = context; mad = query->sa_query.mad_buf->mad; init_mad(&query->sa_query, agent); query->sa_query.callback = ib_sa_classport_info_rec_callback; query->sa_query.release = ib_sa_classport_info_rec_release; mad->mad_hdr.method = IB_MGMT_METHOD_GET; mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); mad->sa_hdr.comp_mask = 0; *sa_query = &query->sa_query; ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); if (ret < 0) goto err_free_mad; return ret; err_free_mad: *sa_query = NULL; free_mad(&query->sa_query); err_free: kfree(query); return ret; } static void update_ib_cpi(struct work_struct *work) { struct ib_sa_port *port = container_of(work, struct ib_sa_port, ib_cpi_work.work); struct ib_classport_info_context *cb_context; unsigned long flags; int ret; /* If the classport info is valid, nothing * to do here. */ spin_lock_irqsave(&port->classport_lock, flags); if (port->classport_info.valid) { spin_unlock_irqrestore(&port->classport_lock, flags); return; } spin_unlock_irqrestore(&port->classport_lock, flags); cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL); if (!cb_context) goto err_nomem; init_completion(&cb_context->done); ret = ib_sa_classport_info_rec_query(port, 3000, ib_classportinfo_cb, cb_context, &cb_context->sa_query); if (ret < 0) goto free_cb_err; wait_for_completion(&cb_context->done); free_cb_err: kfree(cb_context); spin_lock_irqsave(&port->classport_lock, flags); /* If the classport info is still not valid, the query should have * failed for some reason. Retry issuing the query */ if (!port->classport_info.valid) { port->classport_info.retry_cnt++; if (port->classport_info.retry_cnt <= IB_SA_CPI_MAX_RETRY_CNT) { unsigned long delay = msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); queue_delayed_work(ib_wq, &port->ib_cpi_work, delay); } } spin_unlock_irqrestore(&port->classport_lock, flags); err_nomem: return; } static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; unsigned long flags; if (query->callback) switch (mad_send_wc->status) { case IB_WC_SUCCESS: /* No callback -- already got recv */ break; case IB_WC_RESP_TIMEOUT_ERR: query->callback(query, -ETIMEDOUT, NULL); break; case IB_WC_WR_FLUSH_ERR: query->callback(query, -EINTR, NULL); break; default: query->callback(query, -EIO, NULL); break; } xa_lock_irqsave(&queries, flags); __xa_erase(&queries, query->id); xa_unlock_irqrestore(&queries, flags); free_mad(query); if (query->client) ib_sa_client_put(query->client); query->release(query); } static void recv_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_buf *send_buf, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_sa_query *query; if (!send_buf) return; query = send_buf->context[0]; if (query->callback) { if (mad_recv_wc->wc->status == IB_WC_SUCCESS) query->callback(query, mad_recv_wc->recv_buf.mad->mad_hdr.status ? -EINVAL : 0, (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); else query->callback(query, -EIO, NULL); } ib_free_recv_mad(mad_recv_wc); } static void update_sm_ah(struct work_struct *work) { struct ib_sa_port *port = container_of(work, struct ib_sa_port, update_task); struct ib_sa_sm_ah *new_ah; struct ib_port_attr port_attr; struct rdma_ah_attr ah_attr; bool grh_required; if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { pr_warn("Couldn't query port\n"); return; } new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL); if (!new_ah) return; kref_init(&new_ah->ref); new_ah->src_path_mask = (1 << port_attr.lmc) - 1; new_ah->pkey_index = 0; if (ib_find_pkey(port->agent->device, port->port_num, IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) pr_err("Couldn't find index for default PKey\n"); memset(&ah_attr, 0, sizeof(ah_attr)); ah_attr.type = rdma_ah_find_type(port->agent->device, port->port_num); rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid); rdma_ah_set_sl(&ah_attr, port_attr.sm_sl); rdma_ah_set_port_num(&ah_attr, port->port_num); grh_required = rdma_is_grh_required(port->agent->device, port->port_num); /* * The OPA sm_lid of 0xFFFF needs special handling so that it can be * differentiated from a permissive LID of 0xFFFF. We set the * grh_required flag here so the SA can program the DGID in the * address handle appropriately */ if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA && (grh_required || port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))) rdma_ah_set_make_grd(&ah_attr, true); if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) { rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH); rdma_ah_set_subnet_prefix(&ah_attr, cpu_to_be64(port_attr.subnet_prefix)); rdma_ah_set_interface_id(&ah_attr, cpu_to_be64(IB_SA_WELL_KNOWN_GUID)); } new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE); if (IS_ERR(new_ah->ah)) { pr_warn("Couldn't create new SM AH\n"); kfree(new_ah); return; } spin_lock_irq(&port->ah_lock); if (port->sm_ah) kref_put(&port->sm_ah->ref, free_sm_ah); port->sm_ah = new_ah; spin_unlock_irq(&port->ah_lock); } static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event) { if (event->event == IB_EVENT_PORT_ERR || event->event == IB_EVENT_PORT_ACTIVE || event->event == IB_EVENT_LID_CHANGE || event->event == IB_EVENT_PKEY_CHANGE || event->event == IB_EVENT_SM_CHANGE || event->event == IB_EVENT_CLIENT_REREGISTER) { unsigned long flags; struct ib_sa_device *sa_dev = container_of(handler, typeof(*sa_dev), event_handler); u32 port_num = event->element.port_num - sa_dev->start_port; struct ib_sa_port *port = &sa_dev->port[port_num]; if (!rdma_cap_ib_sa(handler->device, port->port_num)) return; spin_lock_irqsave(&port->ah_lock, flags); if (port->sm_ah) kref_put(&port->sm_ah->ref, free_sm_ah); port->sm_ah = NULL; spin_unlock_irqrestore(&port->ah_lock, flags); if (event->event == IB_EVENT_SM_CHANGE || event->event == IB_EVENT_CLIENT_REREGISTER || event->event == IB_EVENT_LID_CHANGE || event->event == IB_EVENT_PORT_ACTIVE) { unsigned long delay = msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); spin_lock_irqsave(&port->classport_lock, flags); port->classport_info.valid = false; port->classport_info.retry_cnt = 0; spin_unlock_irqrestore(&port->classport_lock, flags); queue_delayed_work(ib_wq, &port->ib_cpi_work, delay); } queue_work(ib_wq, &sa_dev->port[port_num].update_task); } } static int ib_sa_add_one(struct ib_device *device) { struct ib_sa_device *sa_dev; int s, e, i; int count = 0; int ret; s = rdma_start_port(device); e = rdma_end_port(device); sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL); if (!sa_dev) return -ENOMEM; sa_dev->start_port = s; sa_dev->end_port = e; for (i = 0; i <= e - s; ++i) { spin_lock_init(&sa_dev->port[i].ah_lock); if (!rdma_cap_ib_sa(device, i + 1)) continue; sa_dev->port[i].sm_ah = NULL; sa_dev->port[i].port_num = i + s; spin_lock_init(&sa_dev->port[i].classport_lock); sa_dev->port[i].classport_info.valid = false; sa_dev->port[i].agent = ib_register_mad_agent(device, i + s, IB_QPT_GSI, NULL, 0, send_handler, recv_handler, sa_dev, 0); if (IS_ERR(sa_dev->port[i].agent)) { ret = PTR_ERR(sa_dev->port[i].agent); goto err; } INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work, update_ib_cpi); count++; } if (!count) { ret = -EOPNOTSUPP; goto free; } ib_set_client_data(device, &sa_client, sa_dev); /* * We register our event handler after everything is set up, * and then update our cached info after the event handler is * registered to avoid any problems if a port changes state * during our initialization. */ INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); ib_register_event_handler(&sa_dev->event_handler); for (i = 0; i <= e - s; ++i) { if (rdma_cap_ib_sa(device, i + 1)) update_sm_ah(&sa_dev->port[i].update_task); } return 0; err: while (--i >= 0) { if (rdma_cap_ib_sa(device, i + 1)) ib_unregister_mad_agent(sa_dev->port[i].agent); } free: kfree(sa_dev); return ret; } static void ib_sa_remove_one(struct ib_device *device, void *client_data) { struct ib_sa_device *sa_dev = client_data; int i; ib_unregister_event_handler(&sa_dev->event_handler); flush_workqueue(ib_wq); for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { if (rdma_cap_ib_sa(device, i + 1)) { cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work); ib_unregister_mad_agent(sa_dev->port[i].agent); if (sa_dev->port[i].sm_ah) kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); } } kfree(sa_dev); } int ib_sa_init(void) { int ret; get_random_bytes(&tid, sizeof tid); atomic_set(&ib_nl_sa_request_seq, 0); ret = ib_register_client(&sa_client); if (ret) { pr_err("Couldn't register ib_sa client\n"); goto err1; } ret = mcast_init(); if (ret) { pr_err("Couldn't initialize multicast handling\n"); goto err2; } ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM); if (!ib_nl_wq) { ret = -ENOMEM; goto err3; } INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); return 0; err3: mcast_cleanup(); err2: ib_unregister_client(&sa_client); err1: return ret; } void ib_sa_cleanup(void) { cancel_delayed_work(&ib_nl_timed_work); destroy_workqueue(ib_nl_wq); mcast_cleanup(); ib_unregister_client(&sa_client); WARN_ON(!xa_empty(&queries)); }
linux-master
drivers/infiniband/core/sa_query.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2008 Cisco. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define pr_fmt(fmt) "user_mad: " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/dma-mapping.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/kref.h> #include <linux/compat.h> #include <linux/sched.h> #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/nospec.h> #include <linux/uaccess.h> #include <rdma/ib_mad.h> #include <rdma/ib_user_mad.h> #include <rdma/rdma_netlink.h> #include "core_priv.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); MODULE_LICENSE("Dual BSD/GPL"); enum { IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS, IB_UMAD_MAX_AGENTS = 32, IB_UMAD_MAJOR = 231, IB_UMAD_MINOR_BASE = 0, IB_UMAD_NUM_FIXED_MINOR = 64, IB_UMAD_NUM_DYNAMIC_MINOR = IB_UMAD_MAX_PORTS - IB_UMAD_NUM_FIXED_MINOR, IB_ISSM_MINOR_BASE = IB_UMAD_NUM_FIXED_MINOR, }; /* * Our lifetime rules for these structs are the following: * device special file is opened, we take a reference on the * ib_umad_port's struct ib_umad_device. We drop these * references in the corresponding close(). * * In addition to references coming from open character devices, there * is one more reference to each ib_umad_device representing the * module's reference taken when allocating the ib_umad_device in * ib_umad_add_one(). * * When destroying an ib_umad_device, we drop the module's reference. */ struct ib_umad_port { struct cdev cdev; struct device dev; struct cdev sm_cdev; struct device sm_dev; struct semaphore sm_sem; struct mutex file_mutex; struct list_head file_list; struct ib_device *ib_dev; struct ib_umad_device *umad_dev; int dev_num; u32 port_num; }; struct ib_umad_device { struct kref kref; struct ib_umad_port ports[]; }; struct ib_umad_file { struct mutex mutex; struct ib_umad_port *port; struct list_head recv_list; struct list_head send_list; struct list_head port_list; spinlock_t send_lock; wait_queue_head_t recv_wait; struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; int agents_dead; u8 use_pkey_index; u8 already_used; }; struct ib_umad_packet { struct ib_mad_send_buf *msg; struct ib_mad_recv_wc *recv_wc; struct list_head list; int length; struct ib_user_mad mad; }; struct ib_rmpp_mad_hdr { struct ib_mad_hdr mad_hdr; struct ib_rmpp_hdr rmpp_hdr; } __packed; #define CREATE_TRACE_POINTS #include <trace/events/ib_umad.h> static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) + IB_UMAD_NUM_FIXED_MINOR; static dev_t dynamic_umad_dev; static dev_t dynamic_issm_dev; static DEFINE_IDA(umad_ida); static int ib_umad_add_one(struct ib_device *device); static void ib_umad_remove_one(struct ib_device *device, void *client_data); static void ib_umad_dev_free(struct kref *kref) { struct ib_umad_device *dev = container_of(kref, struct ib_umad_device, kref); kfree(dev); } static void ib_umad_dev_get(struct ib_umad_device *dev) { kref_get(&dev->kref); } static void ib_umad_dev_put(struct ib_umad_device *dev) { kref_put(&dev->kref, ib_umad_dev_free); } static int hdr_size(struct ib_umad_file *file) { return file->use_pkey_index ? sizeof(struct ib_user_mad_hdr) : sizeof(struct ib_user_mad_hdr_old); } /* caller must hold file->mutex */ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) { return file->agents_dead ? NULL : file->agent[id]; } static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent, struct ib_umad_packet *packet) { int ret = 1; mutex_lock(&file->mutex); for (packet->mad.hdr.id = 0; packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; packet->mad.hdr.id++) if (agent == __get_agent(file, packet->mad.hdr.id)) { list_add_tail(&packet->list, &file->recv_list); wake_up_interruptible(&file->recv_wait); ret = 0; break; } mutex_unlock(&file->mutex); return ret; } static void dequeue_send(struct ib_umad_file *file, struct ib_umad_packet *packet) { spin_lock_irq(&file->send_lock); list_del(&packet->list); spin_unlock_irq(&file->send_lock); } static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *send_wc) { struct ib_umad_file *file = agent->context; struct ib_umad_packet *packet = send_wc->send_buf->context[0]; dequeue_send(file, packet); rdma_destroy_ah(packet->msg->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(packet->msg); if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { packet->length = IB_MGMT_MAD_HDR; packet->mad.hdr.status = ETIMEDOUT; if (!queue_packet(file, agent, packet)) return; } kfree(packet); } static void recv_handler(struct ib_mad_agent *agent, struct ib_mad_send_buf *send_buf, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_umad_file *file = agent->context; struct ib_umad_packet *packet; if (mad_recv_wc->wc->status != IB_WC_SUCCESS) goto err1; packet = kzalloc(sizeof *packet, GFP_KERNEL); if (!packet) goto err1; packet->length = mad_recv_wc->mad_len; packet->recv_wc = mad_recv_wc; packet->mad.hdr.status = 0; packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); /* * On OPA devices it is okay to lose the upper 16 bits of LID as this * information is obtained elsewhere. Mask off the upper 16 bits. */ if (rdma_cap_opa_mad(agent->device, agent->port_num)) packet->mad.hdr.lid = ib_lid_be16(0xFFFF & mad_recv_wc->wc->slid); else packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); packet->mad.hdr.sl = mad_recv_wc->wc->sl; packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); if (packet->mad.hdr.grh_present) { struct rdma_ah_attr ah_attr; const struct ib_global_route *grh; int ret; ret = ib_init_ah_attr_from_wc(agent->device, agent->port_num, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, &ah_attr); if (ret) goto err2; grh = rdma_ah_read_grh(&ah_attr); packet->mad.hdr.gid_index = grh->sgid_index; packet->mad.hdr.hop_limit = grh->hop_limit; packet->mad.hdr.traffic_class = grh->traffic_class; memcpy(packet->mad.hdr.gid, &grh->dgid, 16); packet->mad.hdr.flow_label = cpu_to_be32(grh->flow_label); rdma_destroy_ah_attr(&ah_attr); } if (queue_packet(file, agent, packet)) goto err2; return; err2: kfree(packet); err1: ib_free_recv_mad(mad_recv_wc); } static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, struct ib_umad_packet *packet, size_t count) { struct ib_mad_recv_buf *recv_buf; int left, seg_payload, offset, max_seg_payload; size_t seg_size; recv_buf = &packet->recv_wc->recv_buf; seg_size = packet->recv_wc->mad_seg_size; /* We need enough room to copy the first (or only) MAD segment. */ if ((packet->length <= seg_size && count < hdr_size(file) + packet->length) || (packet->length > seg_size && count < hdr_size(file) + seg_size)) return -EINVAL; if (copy_to_user(buf, &packet->mad, hdr_size(file))) return -EFAULT; buf += hdr_size(file); seg_payload = min_t(int, packet->length, seg_size); if (copy_to_user(buf, recv_buf->mad, seg_payload)) return -EFAULT; if (seg_payload < packet->length) { /* * Multipacket RMPP MAD message. Copy remainder of message. * Note that last segment may have a shorter payload. */ if (count < hdr_size(file) + packet->length) { /* * The buffer is too small, return the first RMPP segment, * which includes the RMPP message length. */ return -ENOSPC; } offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); max_seg_payload = seg_size - offset; for (left = packet->length - seg_payload, buf += seg_payload; left; left -= seg_payload, buf += seg_payload) { recv_buf = container_of(recv_buf->list.next, struct ib_mad_recv_buf, list); seg_payload = min(left, max_seg_payload); if (copy_to_user(buf, ((void *) recv_buf->mad) + offset, seg_payload)) return -EFAULT; } } trace_ib_umad_read_recv(file, &packet->mad.hdr, &recv_buf->mad->mad_hdr); return hdr_size(file) + packet->length; } static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf, struct ib_umad_packet *packet, size_t count) { ssize_t size = hdr_size(file) + packet->length; if (count < size) return -EINVAL; if (copy_to_user(buf, &packet->mad, hdr_size(file))) return -EFAULT; buf += hdr_size(file); if (copy_to_user(buf, packet->mad.data, packet->length)) return -EFAULT; trace_ib_umad_read_send(file, &packet->mad.hdr, (struct ib_mad_hdr *)&packet->mad.data); return size; } static ssize_t ib_umad_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct ib_umad_file *file = filp->private_data; struct ib_umad_packet *packet; ssize_t ret; if (count < hdr_size(file)) return -EINVAL; mutex_lock(&file->mutex); if (file->agents_dead) { mutex_unlock(&file->mutex); return -EIO; } while (list_empty(&file->recv_list)) { mutex_unlock(&file->mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(file->recv_wait, !list_empty(&file->recv_list))) return -ERESTARTSYS; mutex_lock(&file->mutex); } if (file->agents_dead) { mutex_unlock(&file->mutex); return -EIO; } packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); list_del(&packet->list); mutex_unlock(&file->mutex); if (packet->recv_wc) ret = copy_recv_mad(file, buf, packet, count); else ret = copy_send_mad(file, buf, packet, count); if (ret < 0) { /* Requeue packet */ mutex_lock(&file->mutex); list_add(&packet->list, &file->recv_list); mutex_unlock(&file->mutex); } else { if (packet->recv_wc) ib_free_recv_mad(packet->recv_wc); kfree(packet); } return ret; } static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) { int left, seg; /* Copy class specific header */ if ((msg->hdr_len > IB_MGMT_RMPP_HDR) && copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR, msg->hdr_len - IB_MGMT_RMPP_HDR)) return -EFAULT; /* All headers are in place. Copy data segments. */ for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0; seg++, left -= msg->seg_size, buf += msg->seg_size) { if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf, min(left, msg->seg_size))) return -EFAULT; } return 0; } static int same_destination(struct ib_user_mad_hdr *hdr1, struct ib_user_mad_hdr *hdr2) { if (!hdr1->grh_present && !hdr2->grh_present) return (hdr1->lid == hdr2->lid); if (hdr1->grh_present && hdr2->grh_present) return !memcmp(hdr1->gid, hdr2->gid, 16); return 0; } static int is_duplicate(struct ib_umad_file *file, struct ib_umad_packet *packet) { struct ib_umad_packet *sent_packet; struct ib_mad_hdr *sent_hdr, *hdr; hdr = (struct ib_mad_hdr *) packet->mad.data; list_for_each_entry(sent_packet, &file->send_list, list) { sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; if ((hdr->tid != sent_hdr->tid) || (hdr->mgmt_class != sent_hdr->mgmt_class)) continue; /* * No need to be overly clever here. If two new operations have * the same TID, reject the second as a duplicate. This is more * restrictive than required by the spec. */ if (!ib_response_mad(hdr)) { if (!ib_response_mad(sent_hdr)) return 1; continue; } else if (!ib_response_mad(sent_hdr)) continue; if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) return 1; } return 0; } static ssize_t ib_umad_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ib_umad_file *file = filp->private_data; struct ib_rmpp_mad_hdr *rmpp_mad_hdr; struct ib_umad_packet *packet; struct ib_mad_agent *agent; struct rdma_ah_attr ah_attr; struct ib_ah *ah; __be64 *tid; int ret, data_len, hdr_len, copy_offset, rmpp_active; u8 base_version; if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) return -EINVAL; packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL); if (!packet) return -ENOMEM; if (copy_from_user(&packet->mad, buf, hdr_size(file))) { ret = -EFAULT; goto err; } if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { ret = -EINVAL; goto err; } buf += hdr_size(file); if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) { ret = -EFAULT; goto err; } mutex_lock(&file->mutex); trace_ib_umad_write(file, &packet->mad.hdr, (struct ib_mad_hdr *)&packet->mad.data); agent = __get_agent(file, packet->mad.hdr.id); if (!agent) { ret = -EIO; goto err_up; } memset(&ah_attr, 0, sizeof ah_attr); ah_attr.type = rdma_ah_find_type(agent->device, file->port->port_num); rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid)); rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl); rdma_ah_set_path_bits(&ah_attr, packet->mad.hdr.path_bits); rdma_ah_set_port_num(&ah_attr, file->port->port_num); if (packet->mad.hdr.grh_present) { rdma_ah_set_grh(&ah_attr, NULL, be32_to_cpu(packet->mad.hdr.flow_label), packet->mad.hdr.gid_index, packet->mad.hdr.hop_limit, packet->mad.hdr.traffic_class); rdma_ah_set_dgid_raw(&ah_attr, packet->mad.hdr.gid); } ah = rdma_create_user_ah(agent->qp->pd, &ah_attr, NULL); if (IS_ERR(ah)) { ret = PTR_ERR(ah); goto err_up; } rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data; hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class); if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class) && ib_mad_kernel_rmpp_agent(agent)) { copy_offset = IB_MGMT_RMPP_HDR; rmpp_active = ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE; } else { copy_offset = IB_MGMT_MAD_HDR; rmpp_active = 0; } base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version; data_len = count - hdr_size(file) - hdr_len; packet->msg = ib_create_send_mad(agent, be32_to_cpu(packet->mad.hdr.qpn), packet->mad.hdr.pkey_index, rmpp_active, hdr_len, data_len, GFP_KERNEL, base_version); if (IS_ERR(packet->msg)) { ret = PTR_ERR(packet->msg); goto err_ah; } packet->msg->ah = ah; packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; packet->msg->retries = packet->mad.hdr.retries; packet->msg->context[0] = packet; /* Copy MAD header. Any RMPP header is already in place. */ memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); if (!rmpp_active) { if (copy_from_user(packet->msg->mad + copy_offset, buf + copy_offset, hdr_len + data_len - copy_offset)) { ret = -EFAULT; goto err_msg; } } else { ret = copy_rmpp_mad(packet->msg, buf); if (ret) goto err_msg; } /* * Set the high-order part of the transaction ID to make MADs from * different agents unique, and allow routing responses back to the * original requestor. */ if (!ib_response_mad(packet->msg->mad)) { tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | (be64_to_cpup(tid) & 0xffffffff)); rmpp_mad_hdr->mad_hdr.tid = *tid; } if (!ib_mad_kernel_rmpp_agent(agent) && ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class) && (ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { spin_lock_irq(&file->send_lock); list_add_tail(&packet->list, &file->send_list); spin_unlock_irq(&file->send_lock); } else { spin_lock_irq(&file->send_lock); ret = is_duplicate(file, packet); if (!ret) list_add_tail(&packet->list, &file->send_list); spin_unlock_irq(&file->send_lock); if (ret) { ret = -EINVAL; goto err_msg; } } ret = ib_post_send_mad(packet->msg, NULL); if (ret) goto err_send; mutex_unlock(&file->mutex); return count; err_send: dequeue_send(file, packet); err_msg: ib_free_send_mad(packet->msg); err_ah: rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); err_up: mutex_unlock(&file->mutex); err: kfree(packet); return ret; } static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait) { struct ib_umad_file *file = filp->private_data; /* we will always be able to post a MAD send */ __poll_t mask = EPOLLOUT | EPOLLWRNORM; mutex_lock(&file->mutex); poll_wait(filp, &file->recv_wait, wait); if (!list_empty(&file->recv_list)) mask |= EPOLLIN | EPOLLRDNORM; if (file->agents_dead) mask = EPOLLERR; mutex_unlock(&file->mutex); return mask; } static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, int compat_method_mask) { struct ib_user_mad_reg_req ureq; struct ib_mad_reg_req req; struct ib_mad_agent *agent = NULL; int agent_id; int ret; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); if (!file->port->ib_dev) { dev_notice(&file->port->dev, "%s: invalid device\n", __func__); ret = -EPIPE; goto out; } if (copy_from_user(&ureq, arg, sizeof ureq)) { ret = -EFAULT; goto out; } if (ureq.qpn != 0 && ureq.qpn != 1) { dev_notice(&file->port->dev, "%s: invalid QPN %u specified\n", __func__, ureq.qpn); ret = -EINVAL; goto out; } for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) if (!__get_agent(file, agent_id)) goto found; dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__, IB_UMAD_MAX_AGENTS); ret = -ENOMEM; goto out; found: if (ureq.mgmt_class) { memset(&req, 0, sizeof(req)); req.mgmt_class = ureq.mgmt_class; req.mgmt_class_version = ureq.mgmt_class_version; memcpy(req.oui, ureq.oui, sizeof req.oui); if (compat_method_mask) { u32 *umm = (u32 *) ureq.method_mask; int i; for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i) req.method_mask[i] = umm[i * 2] | ((u64) umm[i * 2 + 1] << 32); } else memcpy(req.method_mask, ureq.method_mask, sizeof req.method_mask); } agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, ureq.mgmt_class ? &req : NULL, ureq.rmpp_version, send_handler, recv_handler, file, 0); if (IS_ERR(agent)) { ret = PTR_ERR(agent); agent = NULL; goto out; } if (put_user(agent_id, (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { ret = -EFAULT; goto out; } if (!file->already_used) { file->already_used = 1; if (!file->use_pkey_index) { dev_warn(&file->port->dev, "process %s did not enable P_Key index support.\n", current->comm); dev_warn(&file->port->dev, " Documentation/infiniband/user_mad.rst has info on the new ABI.\n"); } } file->agent[agent_id] = agent; ret = 0; out: mutex_unlock(&file->mutex); if (ret && agent) ib_unregister_mad_agent(agent); mutex_unlock(&file->port->file_mutex); return ret; } static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) { struct ib_user_mad_reg_req2 ureq; struct ib_mad_reg_req req; struct ib_mad_agent *agent = NULL; int agent_id; int ret; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); if (!file->port->ib_dev) { dev_notice(&file->port->dev, "%s: invalid device\n", __func__); ret = -EPIPE; goto out; } if (copy_from_user(&ureq, arg, sizeof(ureq))) { ret = -EFAULT; goto out; } if (ureq.qpn != 0 && ureq.qpn != 1) { dev_notice(&file->port->dev, "%s: invalid QPN %u specified\n", __func__, ureq.qpn); ret = -EINVAL; goto out; } if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) { dev_notice(&file->port->dev, "%s failed: invalid registration flags specified 0x%x; supported 0x%x\n", __func__, ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); ret = -EINVAL; if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP, (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req2, flags)))) ret = -EFAULT; goto out; } for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) if (!__get_agent(file, agent_id)) goto found; dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__, IB_UMAD_MAX_AGENTS); ret = -ENOMEM; goto out; found: if (ureq.mgmt_class) { memset(&req, 0, sizeof(req)); req.mgmt_class = ureq.mgmt_class; req.mgmt_class_version = ureq.mgmt_class_version; if (ureq.oui & 0xff000000) { dev_notice(&file->port->dev, "%s failed: oui invalid 0x%08x\n", __func__, ureq.oui); ret = -EINVAL; goto out; } req.oui[2] = ureq.oui & 0x0000ff; req.oui[1] = (ureq.oui & 0x00ff00) >> 8; req.oui[0] = (ureq.oui & 0xff0000) >> 16; memcpy(req.method_mask, ureq.method_mask, sizeof(req.method_mask)); } agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, ureq.mgmt_class ? &req : NULL, ureq.rmpp_version, send_handler, recv_handler, file, ureq.flags); if (IS_ERR(agent)) { ret = PTR_ERR(agent); agent = NULL; goto out; } if (put_user(agent_id, (u32 __user *)(arg + offsetof(struct ib_user_mad_reg_req2, id)))) { ret = -EFAULT; goto out; } if (!file->already_used) { file->already_used = 1; file->use_pkey_index = 1; } file->agent[agent_id] = agent; ret = 0; out: mutex_unlock(&file->mutex); if (ret && agent) ib_unregister_mad_agent(agent); mutex_unlock(&file->port->file_mutex); return ret; } static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) { struct ib_mad_agent *agent = NULL; u32 id; int ret = 0; if (get_user(id, arg)) return -EFAULT; if (id >= IB_UMAD_MAX_AGENTS) return -EINVAL; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); id = array_index_nospec(id, IB_UMAD_MAX_AGENTS); if (!__get_agent(file, id)) { ret = -EINVAL; goto out; } agent = file->agent[id]; file->agent[id] = NULL; out: mutex_unlock(&file->mutex); if (agent) ib_unregister_mad_agent(agent); mutex_unlock(&file->port->file_mutex); return ret; } static long ib_umad_enable_pkey(struct ib_umad_file *file) { int ret = 0; mutex_lock(&file->mutex); if (file->already_used) ret = -EINVAL; else file->use_pkey_index = 1; mutex_unlock(&file->mutex); return ret; } static long ib_umad_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case IB_USER_MAD_REGISTER_AGENT: return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0); case IB_USER_MAD_UNREGISTER_AGENT: return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); case IB_USER_MAD_ENABLE_PKEY: return ib_umad_enable_pkey(filp->private_data); case IB_USER_MAD_REGISTER_AGENT2: return ib_umad_reg_agent2(filp->private_data, (void __user *) arg); default: return -ENOIOCTLCMD; } } #ifdef CONFIG_COMPAT static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case IB_USER_MAD_REGISTER_AGENT: return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1); case IB_USER_MAD_UNREGISTER_AGENT: return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); case IB_USER_MAD_ENABLE_PKEY: return ib_umad_enable_pkey(filp->private_data); case IB_USER_MAD_REGISTER_AGENT2: return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg)); default: return -ENOIOCTLCMD; } } #endif /* * ib_umad_open() does not need the BKL: * * - the ib_umad_port structures are properly reference counted, and * everything else is purely local to the file being created, so * races against other open calls are not a problem; * - the ioctl method does not affect any global state outside of the * file structure being operated on; */ static int ib_umad_open(struct inode *inode, struct file *filp) { struct ib_umad_port *port; struct ib_umad_file *file; int ret = 0; port = container_of(inode->i_cdev, struct ib_umad_port, cdev); mutex_lock(&port->file_mutex); if (!port->ib_dev) { ret = -ENXIO; goto out; } if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) { ret = -EPERM; goto out; } file = kzalloc(sizeof(*file), GFP_KERNEL); if (!file) { ret = -ENOMEM; goto out; } mutex_init(&file->mutex); spin_lock_init(&file->send_lock); INIT_LIST_HEAD(&file->recv_list); INIT_LIST_HEAD(&file->send_list); init_waitqueue_head(&file->recv_wait); file->port = port; filp->private_data = file; list_add_tail(&file->port_list, &port->file_list); stream_open(inode, filp); out: mutex_unlock(&port->file_mutex); return ret; } static int ib_umad_close(struct inode *inode, struct file *filp) { struct ib_umad_file *file = filp->private_data; struct ib_umad_packet *packet, *tmp; int already_dead; int i; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); already_dead = file->agents_dead; file->agents_dead = 1; list_for_each_entry_safe(packet, tmp, &file->recv_list, list) { if (packet->recv_wc) ib_free_recv_mad(packet->recv_wc); kfree(packet); } list_del(&file->port_list); mutex_unlock(&file->mutex); if (!already_dead) for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) if (file->agent[i]) ib_unregister_mad_agent(file->agent[i]); mutex_unlock(&file->port->file_mutex); mutex_destroy(&file->mutex); kfree(file); return 0; } static const struct file_operations umad_fops = { .owner = THIS_MODULE, .read = ib_umad_read, .write = ib_umad_write, .poll = ib_umad_poll, .unlocked_ioctl = ib_umad_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ib_umad_compat_ioctl, #endif .open = ib_umad_open, .release = ib_umad_close, .llseek = no_llseek, }; static int ib_umad_sm_open(struct inode *inode, struct file *filp) { struct ib_umad_port *port; struct ib_port_modify props = { .set_port_cap_mask = IB_PORT_SM }; int ret; port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev); if (filp->f_flags & O_NONBLOCK) { if (down_trylock(&port->sm_sem)) { ret = -EAGAIN; goto fail; } } else { if (down_interruptible(&port->sm_sem)) { ret = -ERESTARTSYS; goto fail; } } if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) { ret = -EPERM; goto err_up_sem; } ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); if (ret) goto err_up_sem; filp->private_data = port; nonseekable_open(inode, filp); return 0; err_up_sem: up(&port->sm_sem); fail: return ret; } static int ib_umad_sm_close(struct inode *inode, struct file *filp) { struct ib_umad_port *port = filp->private_data; struct ib_port_modify props = { .clr_port_cap_mask = IB_PORT_SM }; int ret = 0; mutex_lock(&port->file_mutex); if (port->ib_dev) ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); mutex_unlock(&port->file_mutex); up(&port->sm_sem); return ret; } static const struct file_operations umad_sm_fops = { .owner = THIS_MODULE, .open = ib_umad_sm_open, .release = ib_umad_sm_close, .llseek = no_llseek, }; static struct ib_umad_port *get_port(struct ib_device *ibdev, struct ib_umad_device *umad_dev, u32 port) { if (!umad_dev) return ERR_PTR(-EOPNOTSUPP); if (!rdma_is_port_valid(ibdev, port)) return ERR_PTR(-EINVAL); if (!rdma_cap_ib_mad(ibdev, port)) return ERR_PTR(-EOPNOTSUPP); return &umad_dev->ports[port - rdma_start_port(ibdev)]; } static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data, struct ib_client_nl_info *res) { struct ib_umad_port *port = get_port(ibdev, client_data, res->port); if (IS_ERR(port)) return PTR_ERR(port); res->abi = IB_USER_MAD_ABI_VERSION; res->cdev = &port->dev; return 0; } static struct ib_client umad_client = { .name = "umad", .add = ib_umad_add_one, .remove = ib_umad_remove_one, .get_nl_info = ib_umad_get_nl_info, }; MODULE_ALIAS_RDMA_CLIENT("umad"); static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data, struct ib_client_nl_info *res) { struct ib_umad_port *port = get_port(ibdev, client_data, res->port); if (IS_ERR(port)) return PTR_ERR(port); res->abi = IB_USER_MAD_ABI_VERSION; res->cdev = &port->sm_dev; return 0; } static struct ib_client issm_client = { .name = "issm", .get_nl_info = ib_issm_get_nl_info, }; MODULE_ALIAS_RDMA_CLIENT("issm"); static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ib_umad_port *port = dev_get_drvdata(dev); if (!port) return -ENODEV; return sysfs_emit(buf, "%s\n", dev_name(&port->ib_dev->dev)); } static DEVICE_ATTR_RO(ibdev); static ssize_t port_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ib_umad_port *port = dev_get_drvdata(dev); if (!port) return -ENODEV; return sysfs_emit(buf, "%d\n", port->port_num); } static DEVICE_ATTR_RO(port); static struct attribute *umad_class_dev_attrs[] = { &dev_attr_ibdev.attr, &dev_attr_port.attr, NULL, }; ATTRIBUTE_GROUPS(umad_class_dev); static char *umad_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); } static ssize_t abi_version_show(const struct class *class, const struct class_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", IB_USER_MAD_ABI_VERSION); } static CLASS_ATTR_RO(abi_version); static struct attribute *umad_class_attrs[] = { &class_attr_abi_version.attr, NULL, }; ATTRIBUTE_GROUPS(umad_class); static struct class umad_class = { .name = "infiniband_mad", .devnode = umad_devnode, .class_groups = umad_class_groups, .dev_groups = umad_class_dev_groups, }; static void ib_umad_release_port(struct device *device) { struct ib_umad_port *port = dev_get_drvdata(device); struct ib_umad_device *umad_dev = port->umad_dev; ib_umad_dev_put(umad_dev); } static void ib_umad_init_port_dev(struct device *dev, struct ib_umad_port *port, const struct ib_device *device) { device_initialize(dev); ib_umad_dev_get(port->umad_dev); dev->class = &umad_class; dev->parent = device->dev.parent; dev_set_drvdata(dev, port); dev->release = ib_umad_release_port; } static int ib_umad_init_port(struct ib_device *device, int port_num, struct ib_umad_device *umad_dev, struct ib_umad_port *port) { int devnum; dev_t base_umad; dev_t base_issm; int ret; devnum = ida_alloc_max(&umad_ida, IB_UMAD_MAX_PORTS - 1, GFP_KERNEL); if (devnum < 0) return -1; port->dev_num = devnum; if (devnum >= IB_UMAD_NUM_FIXED_MINOR) { base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR; base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR; } else { base_umad = devnum + base_umad_dev; base_issm = devnum + base_issm_dev; } port->ib_dev = device; port->umad_dev = umad_dev; port->port_num = port_num; sema_init(&port->sm_sem, 1); mutex_init(&port->file_mutex); INIT_LIST_HEAD(&port->file_list); ib_umad_init_port_dev(&port->dev, port, device); port->dev.devt = base_umad; dev_set_name(&port->dev, "umad%d", port->dev_num); cdev_init(&port->cdev, &umad_fops); port->cdev.owner = THIS_MODULE; ret = cdev_device_add(&port->cdev, &port->dev); if (ret) goto err_cdev; ib_umad_init_port_dev(&port->sm_dev, port, device); port->sm_dev.devt = base_issm; dev_set_name(&port->sm_dev, "issm%d", port->dev_num); cdev_init(&port->sm_cdev, &umad_sm_fops); port->sm_cdev.owner = THIS_MODULE; ret = cdev_device_add(&port->sm_cdev, &port->sm_dev); if (ret) goto err_dev; return 0; err_dev: put_device(&port->sm_dev); cdev_device_del(&port->cdev, &port->dev); err_cdev: put_device(&port->dev); ida_free(&umad_ida, devnum); return ret; } static void ib_umad_kill_port(struct ib_umad_port *port) { struct ib_umad_file *file; int id; cdev_device_del(&port->sm_cdev, &port->sm_dev); cdev_device_del(&port->cdev, &port->dev); mutex_lock(&port->file_mutex); /* Mark ib_dev NULL and block ioctl or other file ops to progress * further. */ port->ib_dev = NULL; list_for_each_entry(file, &port->file_list, port_list) { mutex_lock(&file->mutex); file->agents_dead = 1; wake_up_interruptible(&file->recv_wait); mutex_unlock(&file->mutex); for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) if (file->agent[id]) ib_unregister_mad_agent(file->agent[id]); } mutex_unlock(&port->file_mutex); ida_free(&umad_ida, port->dev_num); /* balances device_initialize() */ put_device(&port->sm_dev); put_device(&port->dev); } static int ib_umad_add_one(struct ib_device *device) { struct ib_umad_device *umad_dev; int s, e, i; int count = 0; int ret; s = rdma_start_port(device); e = rdma_end_port(device); umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL); if (!umad_dev) return -ENOMEM; kref_init(&umad_dev->kref); for (i = s; i <= e; ++i) { if (!rdma_cap_ib_mad(device, i)) continue; ret = ib_umad_init_port(device, i, umad_dev, &umad_dev->ports[i - s]); if (ret) goto err; count++; } if (!count) { ret = -EOPNOTSUPP; goto free; } ib_set_client_data(device, &umad_client, umad_dev); return 0; err: while (--i >= s) { if (!rdma_cap_ib_mad(device, i)) continue; ib_umad_kill_port(&umad_dev->ports[i - s]); } free: /* balances kref_init */ ib_umad_dev_put(umad_dev); return ret; } static void ib_umad_remove_one(struct ib_device *device, void *client_data) { struct ib_umad_device *umad_dev = client_data; unsigned int i; rdma_for_each_port (device, i) { if (rdma_cap_ib_mad(device, i)) ib_umad_kill_port( &umad_dev->ports[i - rdma_start_port(device)]); } /* balances kref_init() */ ib_umad_dev_put(umad_dev); } static int __init ib_umad_init(void) { int ret; ret = register_chrdev_region(base_umad_dev, IB_UMAD_NUM_FIXED_MINOR * 2, umad_class.name); if (ret) { pr_err("couldn't register device number\n"); goto out; } ret = alloc_chrdev_region(&dynamic_umad_dev, 0, IB_UMAD_NUM_DYNAMIC_MINOR * 2, umad_class.name); if (ret) { pr_err("couldn't register dynamic device number\n"); goto out_alloc; } dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR; ret = class_register(&umad_class); if (ret) { pr_err("couldn't create class infiniband_mad\n"); goto out_chrdev; } ret = ib_register_client(&umad_client); if (ret) goto out_class; ret = ib_register_client(&issm_client); if (ret) goto out_client; return 0; out_client: ib_unregister_client(&umad_client); out_class: class_unregister(&umad_class); out_chrdev: unregister_chrdev_region(dynamic_umad_dev, IB_UMAD_NUM_DYNAMIC_MINOR * 2); out_alloc: unregister_chrdev_region(base_umad_dev, IB_UMAD_NUM_FIXED_MINOR * 2); out: return ret; } static void __exit ib_umad_cleanup(void) { ib_unregister_client(&issm_client); ib_unregister_client(&umad_client); class_unregister(&umad_class); unregister_chrdev_region(base_umad_dev, IB_UMAD_NUM_FIXED_MINOR * 2); unregister_chrdev_region(dynamic_umad_dev, IB_UMAD_NUM_DYNAMIC_MINOR * 2); } module_init(ib_umad_init); module_exit(ib_umad_cleanup);
linux-master
drivers/infiniband/core/user_mad.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016 HGST, a Western Digital Company. */ #include <rdma/ib_verbs.h> #include <rdma/mr_pool.h> struct ib_mr *ib_mr_pool_get(struct ib_qp *qp, struct list_head *list) { struct ib_mr *mr; unsigned long flags; spin_lock_irqsave(&qp->mr_lock, flags); mr = list_first_entry_or_null(list, struct ib_mr, qp_entry); if (mr) { list_del(&mr->qp_entry); qp->mrs_used++; } spin_unlock_irqrestore(&qp->mr_lock, flags); return mr; } EXPORT_SYMBOL(ib_mr_pool_get); void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr) { unsigned long flags; spin_lock_irqsave(&qp->mr_lock, flags); list_add(&mr->qp_entry, list); qp->mrs_used--; spin_unlock_irqrestore(&qp->mr_lock, flags); } EXPORT_SYMBOL(ib_mr_pool_put); int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr, enum ib_mr_type type, u32 max_num_sg, u32 max_num_meta_sg) { struct ib_mr *mr; unsigned long flags; int ret, i; for (i = 0; i < nr; i++) { if (type == IB_MR_TYPE_INTEGRITY) mr = ib_alloc_mr_integrity(qp->pd, max_num_sg, max_num_meta_sg); else mr = ib_alloc_mr(qp->pd, type, max_num_sg); if (IS_ERR(mr)) { ret = PTR_ERR(mr); goto out; } spin_lock_irqsave(&qp->mr_lock, flags); list_add_tail(&mr->qp_entry, list); spin_unlock_irqrestore(&qp->mr_lock, flags); } return 0; out: ib_mr_pool_destroy(qp, list); return ret; } EXPORT_SYMBOL(ib_mr_pool_init); void ib_mr_pool_destroy(struct ib_qp *qp, struct list_head *list) { struct ib_mr *mr; unsigned long flags; spin_lock_irqsave(&qp->mr_lock, flags); while (!list_empty(list)) { mr = list_first_entry(list, struct ib_mr, qp_entry); list_del(&mr->qp_entry); spin_unlock_irqrestore(&qp->mr_lock, flags); ib_dereg_mr(mr); spin_lock_irqsave(&qp->mr_lock, flags); } spin_unlock_irqrestore(&qp->mr_lock, flags); } EXPORT_SYMBOL(ib_mr_pool_destroy);
linux-master
drivers/infiniband/core/mr_pool.c
/* * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "rdma_core.h" #include "uverbs.h" #include <rdma/uverbs_std_types.h> static int uverbs_free_dm(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_dm *dm = uobject->object; if (atomic_read(&dm->usecnt)) return -EBUSY; return dm->device->ops.dealloc_dm(dm, attrs); } static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)( struct uverbs_attr_bundle *attrs) { struct ib_dm_alloc_attr attr = {}; struct ib_uobject *uobj = uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DM_HANDLE) ->obj_attr.uobject; struct ib_device *ib_dev = attrs->context->device; struct ib_dm *dm; int ret; if (!ib_dev->ops.alloc_dm) return -EOPNOTSUPP; ret = uverbs_copy_from(&attr.length, attrs, UVERBS_ATTR_ALLOC_DM_LENGTH); if (ret) return ret; ret = uverbs_copy_from(&attr.alignment, attrs, UVERBS_ATTR_ALLOC_DM_ALIGNMENT); if (ret) return ret; dm = ib_dev->ops.alloc_dm(ib_dev, attrs->context, &attr, attrs); if (IS_ERR(dm)) return PTR_ERR(dm); dm->device = ib_dev; dm->length = attr.length; dm->uobject = uobj; atomic_set(&dm->usecnt, 0); uobj->object = dm; return 0; } DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_DM_ALLOC, UVERBS_ATTR_IDR(UVERBS_ATTR_ALLOC_DM_HANDLE, UVERBS_OBJECT_DM, UVERBS_ACCESS_NEW, UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_LENGTH, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_ALIGNMENT, UVERBS_ATTR_TYPE(u32), UA_MANDATORY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_DM_FREE, UVERBS_ATTR_IDR(UVERBS_ATTR_FREE_DM_HANDLE, UVERBS_OBJECT_DM, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DM, UVERBS_TYPE_ALLOC_IDR(uverbs_free_dm), &UVERBS_METHOD(UVERBS_METHOD_DM_ALLOC), &UVERBS_METHOD(UVERBS_METHOD_DM_FREE)); const struct uapi_definition uverbs_def_obj_dm[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM, UAPI_DEF_OBJ_NEEDS_FN(dealloc_dm)), {} };
linux-master
drivers/infiniband/core/uverbs_std_types_dm.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved. */ #include <rdma/uverbs_std_types.h> #include <rdma/uverbs_ioctl.h> #include "rdma_core.h" #include "uverbs.h" static int UVERBS_HANDLER(UVERBS_METHOD_ASYNC_EVENT_ALLOC)( struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, UVERBS_METHOD_ASYNC_EVENT_ALLOC); ib_uverbs_init_async_event_file( container_of(uobj, struct ib_uverbs_async_event_file, uobj)); return 0; } static void uverbs_async_event_destroy_uobj(struct ib_uobject *uobj, enum rdma_remove_reason why) { struct ib_uverbs_async_event_file *event_file = container_of(uobj, struct ib_uverbs_async_event_file, uobj); ib_unregister_event_handler(&event_file->event_handler); if (why == RDMA_REMOVE_DRIVER_REMOVE) ib_uverbs_async_handler(event_file, 0, IB_EVENT_DEVICE_FATAL, NULL, NULL); } int uverbs_async_event_release(struct inode *inode, struct file *filp) { struct ib_uverbs_async_event_file *event_file; struct ib_uobject *uobj = filp->private_data; int ret; if (!uobj) return uverbs_uobject_fd_release(inode, filp); event_file = container_of(uobj, struct ib_uverbs_async_event_file, uobj); /* * The async event FD has to deliver IB_EVENT_DEVICE_FATAL even after * disassociation, so cleaning the event list must only happen after * release. The user knows it has reached the end of the event stream * when it sees IB_EVENT_DEVICE_FATAL. */ uverbs_uobject_get(uobj); ret = uverbs_uobject_fd_release(inode, filp); ib_uverbs_free_event_queue(&event_file->ev_queue); uverbs_uobject_put(uobj); return ret; } DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_ASYNC_EVENT_ALLOC, UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE, UVERBS_OBJECT_ASYNC_EVENT, UVERBS_ACCESS_NEW, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_ASYNC_EVENT, UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_async_event_file), uverbs_async_event_destroy_uobj, &uverbs_async_event_fops, "[infinibandevent]", O_RDONLY), &UVERBS_METHOD(UVERBS_METHOD_ASYNC_EVENT_ALLOC)); const struct uapi_definition uverbs_def_obj_async_fd[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_ASYNC_EVENT), {} };
linux-master
drivers/infiniband/core/uverbs_std_types_async_fd.c
// SPDX-License-Identifier: GPL-2.0-only /* * Trace points for the RDMA Connection Manager. * * Author: Chuck Lever <chuck.lever@oracle.com> * * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. */ #define CREATE_TRACE_POINTS #include <rdma/rdma_cm.h> #include <rdma/ib_cm.h> #include "cma_priv.h" #include "cma_trace.h"
linux-master
drivers/infiniband/core/cma_trace.c
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2020 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/count_zeros.h> #include <rdma/ib_umem_odp.h> #include "uverbs.h" static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) { bool make_dirty = umem->writable && dirty; struct scatterlist *sg; unsigned int i; if (dirty) ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt, DMA_BIDIRECTIONAL, 0); for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) unpin_user_page_range_dirty_lock(sg_page(sg), DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty); sg_free_append_table(&umem->sgt_append); } /** * ib_umem_find_best_pgsz - Find best HW page size to use for this MR * * @umem: umem struct * @pgsz_bitmap: bitmap of HW supported page sizes * @virt: IOVA * * This helper is intended for HW that support multiple page * sizes but can do only a single page size in an MR. * * Returns 0 if the umem requires page sizes not supported by * the driver to be mapped. Drivers always supporting PAGE_SIZE * or smaller will never see a 0 result. */ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, unsigned long virt) { struct scatterlist *sg; unsigned long va, pgoff; dma_addr_t mask; int i; umem->iova = va = virt; if (umem->is_odp) { unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); /* ODP must always be self consistent. */ if (!(pgsz_bitmap & page_size)) return 0; return page_size; } /* rdma_for_each_block() has a bug if the page size is smaller than the * page size used to build the umem. For now prevent smaller page sizes * from being returned. */ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT); /* The best result is the smallest page size that results in the minimum * number of required pages. Compute the largest page size that could * work based on VA address bits that don't change. */ mask = pgsz_bitmap & GENMASK(BITS_PER_LONG - 1, bits_per((umem->length - 1 + virt) ^ virt)); /* offset into first SGL */ pgoff = umem->address & ~PAGE_MASK; for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { /* Walk SGL and reduce max page size if VA/PA bits differ * for any address. */ mask |= (sg_dma_address(sg) + pgoff) ^ va; va += sg_dma_len(sg) - pgoff; /* Except for the last entry, the ending iova alignment sets * the maximum possible page size as the low bits of the iova * must be zero when starting the next chunk. */ if (i != (umem->sgt_append.sgt.nents - 1)) mask |= va; pgoff = 0; } /* The mask accumulates 1's in each position where the VA and physical * address differ, thus the length of trailing 0 is the largest page * size that can pass the VA through to the physical. */ if (mask) pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0); return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0; } EXPORT_SYMBOL(ib_umem_find_best_pgsz); /** * ib_umem_get - Pin and DMA map userspace memory. * * @device: IB device to connect UMEM * @addr: userspace virtual address to start at * @size: length of region to pin * @access: IB_ACCESS_xxx flags for memory being pinned */ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, size_t size, int access) { struct ib_umem *umem; struct page **page_list; unsigned long lock_limit; unsigned long new_pinned; unsigned long cur_base; unsigned long dma_attr = 0; struct mm_struct *mm; unsigned long npages; int pinned, ret; unsigned int gup_flags = FOLL_LONGTERM; /* * If the combination of the addr and size requested for this memory * region causes an integer overflow, return error. */ if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size)) return ERR_PTR(-EINVAL); if (!can_do_mlock()) return ERR_PTR(-EPERM); if (access & IB_ACCESS_ON_DEMAND) return ERR_PTR(-EOPNOTSUPP); umem = kzalloc(sizeof(*umem), GFP_KERNEL); if (!umem) return ERR_PTR(-ENOMEM); umem->ibdev = device; umem->length = size; umem->address = addr; /* * Drivers should call ib_umem_find_best_pgsz() to set the iova * correctly. */ umem->iova = addr; umem->writable = ib_access_writable(access); umem->owning_mm = mm = current->mm; mmgrab(mm); page_list = (struct page **) __get_free_page(GFP_KERNEL); if (!page_list) { ret = -ENOMEM; goto umem_kfree; } npages = ib_umem_num_pages(umem); if (npages == 0 || npages > UINT_MAX) { ret = -EINVAL; goto out; } lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; new_pinned = atomic64_add_return(npages, &mm->pinned_vm); if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) { atomic64_sub(npages, &mm->pinned_vm); ret = -ENOMEM; goto out; } cur_base = addr & PAGE_MASK; if (umem->writable) gup_flags |= FOLL_WRITE; while (npages) { cond_resched(); pinned = pin_user_pages_fast(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), gup_flags, page_list); if (pinned < 0) { ret = pinned; goto umem_release; } cur_base += pinned * PAGE_SIZE; npages -= pinned; ret = sg_alloc_append_table_from_pages( &umem->sgt_append, page_list, pinned, 0, pinned << PAGE_SHIFT, ib_dma_max_seg_size(device), npages, GFP_KERNEL); if (ret) { unpin_user_pages_dirty_lock(page_list, pinned, 0); goto umem_release; } } if (access & IB_ACCESS_RELAXED_ORDERING) dma_attr |= DMA_ATTR_WEAK_ORDERING; ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt, DMA_BIDIRECTIONAL, dma_attr); if (ret) goto umem_release; goto out; umem_release: __ib_umem_release(device, umem, 0); atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm); out: free_page((unsigned long) page_list); umem_kfree: if (ret) { mmdrop(umem->owning_mm); kfree(umem); } return ret ? ERR_PTR(ret) : umem; } EXPORT_SYMBOL(ib_umem_get); /** * ib_umem_release - release memory pinned with ib_umem_get * @umem: umem struct to release */ void ib_umem_release(struct ib_umem *umem) { if (!umem) return; if (umem->is_dmabuf) return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem)); if (umem->is_odp) return ib_umem_odp_release(to_ib_umem_odp(umem)); __ib_umem_release(umem->ibdev, umem, 1); atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm); mmdrop(umem->owning_mm); kfree(umem); } EXPORT_SYMBOL(ib_umem_release); /* * Copy from the given ib_umem's pages to the given buffer. * * umem - the umem to copy from * offset - offset to start copying from * dst - destination buffer * length - buffer length * * Returns 0 on success, or an error code. */ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) { size_t end = offset + length; int ret; if (offset > umem->length || length > umem->length - offset) { pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n", __func__, offset, umem->length, end); return -EINVAL; } ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl, umem->sgt_append.sgt.orig_nents, dst, length, offset + ib_umem_offset(umem)); if (ret < 0) return ret; else if (ret != length) return -EINVAL; else return 0; } EXPORT_SYMBOL(ib_umem_copy_from);
linux-master
drivers/infiniband/core/umem.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. * Copyright 2019 Marvell. All rights reserved. */ #include <linux/xarray.h> #include "uverbs.h" #include "core_priv.h" /** * rdma_umap_priv_init() - Initialize the private data of a vma * * @priv: The already allocated private data * @vma: The vm area struct that needs private data * @entry: entry into the mmap_xa that needs to be linked with * this vma * * Each time we map IO memory into user space this keeps track of the * mapping. When the device is hot-unplugged we 'zap' the mmaps in user space * to point to the zero page and allow the hot unplug to proceed. * * This is necessary for cases like PCI physical hot unplug as the actual BAR * memory may vanish after this and access to it from userspace could MCE. * * RDMA drivers supporting disassociation must have their user space designed * to cope in some way with their IO pages going to the zero page. * */ void rdma_umap_priv_init(struct rdma_umap_priv *priv, struct vm_area_struct *vma, struct rdma_user_mmap_entry *entry) { struct ib_uverbs_file *ufile = vma->vm_file->private_data; priv->vma = vma; if (entry) { kref_get(&entry->ref); priv->entry = entry; } vma->vm_private_data = priv; /* vm_ops is setup in ib_uverbs_mmap() to avoid module dependencies */ mutex_lock(&ufile->umap_lock); list_add(&priv->list, &ufile->umaps); mutex_unlock(&ufile->umap_lock); } EXPORT_SYMBOL(rdma_umap_priv_init); /** * rdma_user_mmap_io() - Map IO memory into a process * * @ucontext: associated user context * @vma: the vma related to the current mmap call * @pfn: pfn to map * @size: size to map * @prot: pgprot to use in remap call * @entry: mmap_entry retrieved from rdma_user_mmap_entry_get(), or NULL * if mmap_entry is not used by the driver * * This is to be called by drivers as part of their mmap() functions if they * wish to send something like PCI-E BAR memory to userspace. * * Return -EINVAL on wrong flags or size, -EAGAIN on failure to map. 0 on * success. */ int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, unsigned long pfn, unsigned long size, pgprot_t prot, struct rdma_user_mmap_entry *entry) { struct ib_uverbs_file *ufile = ucontext->ufile; struct rdma_umap_priv *priv; if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; if (vma->vm_end - vma->vm_start != size) return -EINVAL; /* Driver is using this wrong, must be called by ib_uverbs_mmap */ if (WARN_ON(!vma->vm_file || vma->vm_file->private_data != ufile)) return -EINVAL; lockdep_assert_held(&ufile->device->disassociate_srcu); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; vma->vm_page_prot = prot; if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) { kfree(priv); return -EAGAIN; } rdma_umap_priv_init(priv, vma, entry); return 0; } EXPORT_SYMBOL(rdma_user_mmap_io); /** * rdma_user_mmap_entry_get_pgoff() - Get an entry from the mmap_xa * * @ucontext: associated user context * @pgoff: The mmap offset >> PAGE_SHIFT * * This function is called when a user tries to mmap with an offset (returned * by rdma_user_mmap_get_offset()) it initially received from the driver. The * rdma_user_mmap_entry was created by the function * rdma_user_mmap_entry_insert(). This function increases the refcnt of the * entry so that it won't be deleted from the xarray in the meantime. * * Return an reference to an entry if exists or NULL if there is no * match. rdma_user_mmap_entry_put() must be called to put the reference. */ struct rdma_user_mmap_entry * rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext, unsigned long pgoff) { struct rdma_user_mmap_entry *entry; if (pgoff > U32_MAX) return NULL; xa_lock(&ucontext->mmap_xa); entry = xa_load(&ucontext->mmap_xa, pgoff); /* * If refcount is zero, entry is already being deleted, driver_removed * indicates that the no further mmaps are possible and we waiting for * the active VMAs to be closed. */ if (!entry || entry->start_pgoff != pgoff || entry->driver_removed || !kref_get_unless_zero(&entry->ref)) goto err; xa_unlock(&ucontext->mmap_xa); ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] returned\n", pgoff, entry->npages); return entry; err: xa_unlock(&ucontext->mmap_xa); return NULL; } EXPORT_SYMBOL(rdma_user_mmap_entry_get_pgoff); /** * rdma_user_mmap_entry_get() - Get an entry from the mmap_xa * * @ucontext: associated user context * @vma: the vma being mmap'd into * * This function is like rdma_user_mmap_entry_get_pgoff() except that it also * checks that the VMA is correct. */ struct rdma_user_mmap_entry * rdma_user_mmap_entry_get(struct ib_ucontext *ucontext, struct vm_area_struct *vma) { struct rdma_user_mmap_entry *entry; if (!(vma->vm_flags & VM_SHARED)) return NULL; entry = rdma_user_mmap_entry_get_pgoff(ucontext, vma->vm_pgoff); if (!entry) return NULL; if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { rdma_user_mmap_entry_put(entry); return NULL; } return entry; } EXPORT_SYMBOL(rdma_user_mmap_entry_get); static void rdma_user_mmap_entry_free(struct kref *kref) { struct rdma_user_mmap_entry *entry = container_of(kref, struct rdma_user_mmap_entry, ref); struct ib_ucontext *ucontext = entry->ucontext; unsigned long i; /* * Erase all entries occupied by this single entry, this is deferred * until all VMA are closed so that the mmap offsets remain unique. */ xa_lock(&ucontext->mmap_xa); for (i = 0; i < entry->npages; i++) __xa_erase(&ucontext->mmap_xa, entry->start_pgoff + i); xa_unlock(&ucontext->mmap_xa); ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] removed\n", entry->start_pgoff, entry->npages); if (ucontext->device->ops.mmap_free) ucontext->device->ops.mmap_free(entry); } /** * rdma_user_mmap_entry_put() - Drop reference to the mmap entry * * @entry: an entry in the mmap_xa * * This function is called when the mapping is closed if it was * an io mapping or when the driver is done with the entry for * some other reason. * Should be called after rdma_user_mmap_entry_get was called * and entry is no longer needed. This function will erase the * entry and free it if its refcnt reaches zero. */ void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry) { kref_put(&entry->ref, rdma_user_mmap_entry_free); } EXPORT_SYMBOL(rdma_user_mmap_entry_put); /** * rdma_user_mmap_entry_remove() - Drop reference to entry and * mark it as unmmapable * * @entry: the entry to insert into the mmap_xa * * Drivers can call this to prevent userspace from creating more mappings for * entry, however existing mmaps continue to exist and ops->mmap_free() will * not be called until all user mmaps are destroyed. */ void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry) { if (!entry) return; xa_lock(&entry->ucontext->mmap_xa); entry->driver_removed = true; xa_unlock(&entry->ucontext->mmap_xa); kref_put(&entry->ref, rdma_user_mmap_entry_free); } EXPORT_SYMBOL(rdma_user_mmap_entry_remove); /** * rdma_user_mmap_entry_insert_range() - Insert an entry to the mmap_xa * in a given range. * * @ucontext: associated user context. * @entry: the entry to insert into the mmap_xa * @length: length of the address that will be mmapped * @min_pgoff: minimum pgoff to be returned * @max_pgoff: maximum pgoff to be returned * * This function should be called by drivers that use the rdma_user_mmap * interface for implementing their mmap syscall A database of mmap offsets is * handled in the core and helper functions are provided to insert entries * into the database and extract entries when the user calls mmap with the * given offset. The function allocates a unique page offset in a given range * that should be provided to user, the user will use the offset to retrieve * information such as address to be mapped and how. * * Return: 0 on success and -ENOMEM on failure */ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext, struct rdma_user_mmap_entry *entry, size_t length, u32 min_pgoff, u32 max_pgoff) { struct ib_uverbs_file *ufile = ucontext->ufile; XA_STATE(xas, &ucontext->mmap_xa, min_pgoff); u32 xa_first, xa_last, npages; int err; u32 i; if (!entry) return -EINVAL; kref_init(&entry->ref); entry->ucontext = ucontext; /* * We want the whole allocation to be done without interruption from a * different thread. The allocation requires finding a free range and * storing. During the xa_insert the lock could be released, possibly * allowing another thread to choose the same range. */ mutex_lock(&ufile->umap_lock); xa_lock(&ucontext->mmap_xa); /* We want to find an empty range */ npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); entry->npages = npages; while (true) { /* First find an empty index */ xas_find_marked(&xas, max_pgoff, XA_FREE_MARK); if (xas.xa_node == XAS_RESTART) goto err_unlock; xa_first = xas.xa_index; /* Is there enough room to have the range? */ if (check_add_overflow(xa_first, npages, &xa_last)) goto err_unlock; /* * Now look for the next present entry. If an entry doesn't * exist, we found an empty range and can proceed. */ xas_next_entry(&xas, xa_last - 1); if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last) break; } for (i = xa_first; i < xa_last; i++) { err = __xa_insert(&ucontext->mmap_xa, i, entry, GFP_KERNEL); if (err) goto err_undo; } /* * Internally the kernel uses a page offset, in libc this is a byte * offset. Drivers should not return pgoff to userspace. */ entry->start_pgoff = xa_first; xa_unlock(&ucontext->mmap_xa); mutex_unlock(&ufile->umap_lock); ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#x] inserted\n", entry->start_pgoff, npages); return 0; err_undo: for (; i > xa_first; i--) __xa_erase(&ucontext->mmap_xa, i - 1); err_unlock: xa_unlock(&ucontext->mmap_xa); mutex_unlock(&ufile->umap_lock); return -ENOMEM; } EXPORT_SYMBOL(rdma_user_mmap_entry_insert_range); /** * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa. * * @ucontext: associated user context. * @entry: the entry to insert into the mmap_xa * @length: length of the address that will be mmapped * * This function should be called by drivers that use the rdma_user_mmap * interface for handling user mmapped addresses. The database is handled in * the core and helper functions are provided to insert entries into the * database and extract entries when the user calls mmap with the given offset. * The function allocates a unique page offset that should be provided to user, * the user will use the offset to retrieve information such as address to * be mapped and how. * * Return: 0 on success and -ENOMEM on failure */ int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, struct rdma_user_mmap_entry *entry, size_t length) { return rdma_user_mmap_entry_insert_range(ucontext, entry, length, 0, U32_MAX); } EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
linux-master
drivers/infiniband/core/ib_core_uverbs.c
/* * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "rdma_core.h" #include "uverbs.h" #include <rdma/uverbs_std_types.h> static int uverbs_free_flow_action(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_flow_action *action = uobject->object; if (atomic_read(&action->usecnt)) return -EBUSY; return action->device->ops.destroy_flow_action(action); } DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_FLOW_ACTION_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE, UVERBS_OBJECT_FLOW_ACTION, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_FLOW_ACTION, UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action), &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY)); const struct uapi_definition uverbs_def_obj_flow_action[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED( UVERBS_OBJECT_FLOW_ACTION, UAPI_DEF_OBJ_NEEDS_FN(destroy_flow_action)), {} };
linux-master
drivers/infiniband/core/uverbs_std_types_flow_action.c
/* * Copyright (c) 2014 Intel Corporation. All rights reserved. * Copyright (c) 2014 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "iwpm_util.h" static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser"; u16 iwpm_ulib_version = IWPM_UABI_VERSION_MIN; static int iwpm_user_pid = IWPM_PID_UNDEFINED; static atomic_t echo_nlmsg_seq; /** * iwpm_valid_pid - Check if the userspace iwarp port mapper pid is valid * * Returns true if the pid is greater than zero, otherwise returns false */ int iwpm_valid_pid(void) { return iwpm_user_pid > 0; } /** * iwpm_register_pid - Send a netlink query to userspace * to get the iwarp port mapper pid * @pm_msg: Contains driver info to send to the userspace port mapper * @nl_client: The index of the netlink client * * nlmsg attributes: * [IWPM_NLA_REG_PID_SEQ] * [IWPM_NLA_REG_IF_NAME] * [IWPM_NLA_REG_IBDEV_NAME] * [IWPM_NLA_REG_ULIB_NAME] */ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) { struct sk_buff *skb = NULL; struct iwpm_nlmsg_request *nlmsg_request = NULL; struct nlmsghdr *nlh; u32 msg_seq; const char *err_str = ""; int ret = -EINVAL; if (iwpm_check_registration(nl_client, IWPM_REG_VALID) || iwpm_user_pid == IWPM_PID_UNAVAILABLE) return 0; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client); if (!skb) { err_str = "Unable to create a nlmsg"; goto pid_query_error; } nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq, nl_client, GFP_KERNEL); if (!nlmsg_request) { err_str = "Unable to allocate netlink request"; goto pid_query_error; } msg_seq = atomic_read(&echo_nlmsg_seq); /* fill in the pid request message */ err_str = "Unable to put attribute of the nlmsg"; ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ); if (ret) goto pid_query_error; ret = ibnl_put_attr(skb, nlh, IFNAMSIZ, pm_msg->if_name, IWPM_NLA_REG_IF_NAME); if (ret) goto pid_query_error; ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE, pm_msg->dev_name, IWPM_NLA_REG_IBDEV_NAME); if (ret) goto pid_query_error; ret = ibnl_put_attr(skb, nlh, IWPM_ULIBNAME_SIZE, (char *)iwpm_ulib_name, IWPM_NLA_REG_ULIB_NAME); if (ret) goto pid_query_error; nlmsg_end(skb, nlh); pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ iwpm_user_pid = IWPM_PID_UNAVAILABLE; err_str = "Unable to send a nlmsg"; goto pid_query_error; } nlmsg_request->req_buffer = pm_msg; ret = iwpm_wait_complete_req(nlmsg_request); return ret; pid_query_error: pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client); dev_kfree_skb(skb); if (nlmsg_request) iwpm_free_nlmsg_request(&nlmsg_request->kref); return ret; } /** * iwpm_add_mapping - Send a netlink add mapping request to * the userspace port mapper * @pm_msg: Contains the local ip/tcp address info to send * @nl_client: The index of the netlink client * * nlmsg attributes: * [IWPM_NLA_MANAGE_MAPPING_SEQ] * [IWPM_NLA_MANAGE_ADDR] * [IWPM_NLA_MANAGE_FLAGS] * * If the request is successful, the pm_msg stores * the port mapper response (mapped address info) */ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) { struct sk_buff *skb = NULL; struct iwpm_nlmsg_request *nlmsg_request = NULL; struct nlmsghdr *nlh; u32 msg_seq; const char *err_str = ""; int ret = -EINVAL; if (!iwpm_valid_pid()) return 0; if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) { err_str = "Unregistered port mapper client"; goto add_mapping_error; } skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client); if (!skb) { err_str = "Unable to create a nlmsg"; goto add_mapping_error; } nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq, nl_client, GFP_KERNEL); if (!nlmsg_request) { err_str = "Unable to allocate netlink request"; goto add_mapping_error; } msg_seq = atomic_read(&echo_nlmsg_seq); /* fill in the add mapping message */ err_str = "Unable to put attribute of the nlmsg"; ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_MANAGE_MAPPING_SEQ); if (ret) goto add_mapping_error; ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage), &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); if (ret) goto add_mapping_error; /* If flags are required and we're not V4, then return a quiet error */ if (pm_msg->flags && iwpm_ulib_version == IWPM_UABI_VERSION_MIN) { ret = -EINVAL; goto add_mapping_error_nowarn; } if (iwpm_ulib_version > IWPM_UABI_VERSION_MIN) { ret = ibnl_put_attr(skb, nlh, sizeof(u32), &pm_msg->flags, IWPM_NLA_MANAGE_FLAGS); if (ret) goto add_mapping_error; } nlmsg_end(skb, nlh); nlmsg_request->req_buffer = pm_msg; ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ iwpm_user_pid = IWPM_PID_UNDEFINED; err_str = "Unable to send a nlmsg"; goto add_mapping_error; } ret = iwpm_wait_complete_req(nlmsg_request); return ret; add_mapping_error: pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client); add_mapping_error_nowarn: dev_kfree_skb(skb); if (nlmsg_request) iwpm_free_nlmsg_request(&nlmsg_request->kref); return ret; } /** * iwpm_add_and_query_mapping - Process the port mapper response to * iwpm_add_and_query_mapping request * @pm_msg: Contains the local ip/tcp address info to send * @nl_client: The index of the netlink client * * nlmsg attributes: * [IWPM_NLA_QUERY_MAPPING_SEQ] * [IWPM_NLA_QUERY_LOCAL_ADDR] * [IWPM_NLA_QUERY_REMOTE_ADDR] * [IWPM_NLA_QUERY_FLAGS] */ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) { struct sk_buff *skb = NULL; struct iwpm_nlmsg_request *nlmsg_request = NULL; struct nlmsghdr *nlh; u32 msg_seq; const char *err_str = ""; int ret = -EINVAL; if (!iwpm_valid_pid()) return 0; if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) { err_str = "Unregistered port mapper client"; goto query_mapping_error; } ret = -ENOMEM; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client); if (!skb) { err_str = "Unable to create a nlmsg"; goto query_mapping_error; } nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq, nl_client, GFP_KERNEL); if (!nlmsg_request) { err_str = "Unable to allocate netlink request"; goto query_mapping_error; } msg_seq = atomic_read(&echo_nlmsg_seq); /* fill in the query message */ err_str = "Unable to put attribute of the nlmsg"; ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_QUERY_MAPPING_SEQ); if (ret) goto query_mapping_error; ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage), &pm_msg->loc_addr, IWPM_NLA_QUERY_LOCAL_ADDR); if (ret) goto query_mapping_error; ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage), &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); if (ret) goto query_mapping_error; /* If flags are required and we're not V4, then return a quite error */ if (pm_msg->flags && iwpm_ulib_version == IWPM_UABI_VERSION_MIN) { ret = -EINVAL; goto query_mapping_error_nowarn; } if (iwpm_ulib_version > IWPM_UABI_VERSION_MIN) { ret = ibnl_put_attr(skb, nlh, sizeof(u32), &pm_msg->flags, IWPM_NLA_QUERY_FLAGS); if (ret) goto query_mapping_error; } nlmsg_end(skb, nlh); nlmsg_request->req_buffer = pm_msg; ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ err_str = "Unable to send a nlmsg"; goto query_mapping_error; } ret = iwpm_wait_complete_req(nlmsg_request); return ret; query_mapping_error: pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client); query_mapping_error_nowarn: dev_kfree_skb(skb); if (nlmsg_request) iwpm_free_nlmsg_request(&nlmsg_request->kref); return ret; } /** * iwpm_remove_mapping - Send a netlink remove mapping request * to the userspace port mapper * * @local_addr: Local ip/tcp address to remove * @nl_client: The index of the netlink client * * nlmsg attributes: * [IWPM_NLA_MANAGE_MAPPING_SEQ] * [IWPM_NLA_MANAGE_ADDR] */ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; u32 msg_seq; const char *err_str = ""; int ret = -EINVAL; if (!iwpm_valid_pid()) return 0; if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) { err_str = "Unregistered port mapper client"; goto remove_mapping_error; } skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client); if (!skb) { ret = -ENOMEM; err_str = "Unable to create a nlmsg"; goto remove_mapping_error; } msg_seq = atomic_read(&echo_nlmsg_seq); nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); err_str = "Unable to put attribute of the nlmsg"; ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_MANAGE_MAPPING_SEQ); if (ret) goto remove_mapping_error; ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage), local_addr, IWPM_NLA_MANAGE_ADDR); if (ret) goto remove_mapping_error; nlmsg_end(skb, nlh); ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ iwpm_user_pid = IWPM_PID_UNDEFINED; err_str = "Unable to send a nlmsg"; goto remove_mapping_error; } iwpm_print_sockaddr(local_addr, "remove_mapping: Local sockaddr:"); return 0; remove_mapping_error: pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client); if (skb) dev_kfree_skb_any(skb); return ret; } /* netlink attribute policy for the received response to register pid request */ static const struct nla_policy resp_reg_policy[IWPM_NLA_RREG_PID_MAX] = { [IWPM_NLA_RREG_PID_SEQ] = { .type = NLA_U32 }, [IWPM_NLA_RREG_IBDEV_NAME] = { .type = NLA_STRING, .len = IWPM_DEVNAME_SIZE - 1 }, [IWPM_NLA_RREG_ULIB_NAME] = { .type = NLA_STRING, .len = IWPM_ULIBNAME_SIZE - 1 }, [IWPM_NLA_RREG_ULIB_VER] = { .type = NLA_U16 }, [IWPM_NLA_RREG_PID_ERR] = { .type = NLA_U16 } }; /** * iwpm_register_pid_cb - Process the port mapper response to * iwpm_register_pid query * @skb: The socket buffer * @cb: Contains the received message (payload and netlink header) * * If successful, the function receives the userspace port mapper pid * which is used in future communication with the port mapper */ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb) { struct iwpm_nlmsg_request *nlmsg_request = NULL; struct nlattr *nltb[IWPM_NLA_RREG_PID_MAX]; struct iwpm_dev_data *pm_msg; char *dev_name, *iwpm_name; u32 msg_seq; u8 nl_client; u16 iwpm_version; const char *msg_type = "Register Pid response"; if (iwpm_parse_nlmsg(cb, IWPM_NLA_RREG_PID_MAX, resp_reg_policy, nltb, msg_type)) return -EINVAL; msg_seq = nla_get_u32(nltb[IWPM_NLA_RREG_PID_SEQ]); nlmsg_request = iwpm_find_nlmsg_request(msg_seq); if (!nlmsg_request) { pr_info("%s: Could not find a matching request (seq = %u)\n", __func__, msg_seq); return -EINVAL; } pm_msg = nlmsg_request->req_buffer; nl_client = nlmsg_request->nl_client; dev_name = (char *)nla_data(nltb[IWPM_NLA_RREG_IBDEV_NAME]); iwpm_name = (char *)nla_data(nltb[IWPM_NLA_RREG_ULIB_NAME]); iwpm_version = nla_get_u16(nltb[IWPM_NLA_RREG_ULIB_VER]); /* check device name, ulib name and version */ if (strcmp(pm_msg->dev_name, dev_name) || strcmp(iwpm_ulib_name, iwpm_name) || iwpm_version < IWPM_UABI_VERSION_MIN) { pr_info("%s: Incorrect info (dev = %s name = %s version = %u)\n", __func__, dev_name, iwpm_name, iwpm_version); nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR; goto register_pid_response_exit; } iwpm_user_pid = cb->nlh->nlmsg_pid; iwpm_ulib_version = iwpm_version; if (iwpm_ulib_version < IWPM_UABI_VERSION) pr_warn_once("%s: Down level iwpmd/pid %d. Continuing...", __func__, iwpm_user_pid); atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", __func__, iwpm_user_pid); iwpm_set_registration(nl_client, IWPM_REG_VALID); register_pid_response_exit: nlmsg_request->request_done = 1; /* always for found nlmsg_request */ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); barrier(); up(&nlmsg_request->sem); return 0; } /* netlink attribute policy for the received response to add mapping request */ static const struct nla_policy resp_add_policy[IWPM_NLA_RMANAGE_MAPPING_MAX] = { [IWPM_NLA_RMANAGE_MAPPING_SEQ] = { .type = NLA_U32 }, [IWPM_NLA_RMANAGE_ADDR] = { .len = sizeof(struct sockaddr_storage) }, [IWPM_NLA_RMANAGE_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) }, [IWPM_NLA_RMANAGE_MAPPING_ERR] = { .type = NLA_U16 } }; /** * iwpm_add_mapping_cb - Process the port mapper response to * iwpm_add_mapping request * @skb: The socket buffer * @cb: Contains the received message (payload and netlink header) */ int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb) { struct iwpm_sa_data *pm_msg; struct iwpm_nlmsg_request *nlmsg_request = NULL; struct nlattr *nltb[IWPM_NLA_RMANAGE_MAPPING_MAX]; struct sockaddr_storage *local_sockaddr; struct sockaddr_storage *mapped_sockaddr; const char *msg_type; u32 msg_seq; msg_type = "Add Mapping response"; if (iwpm_parse_nlmsg(cb, IWPM_NLA_RMANAGE_MAPPING_MAX, resp_add_policy, nltb, msg_type)) return -EINVAL; atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); msg_seq = nla_get_u32(nltb[IWPM_NLA_RMANAGE_MAPPING_SEQ]); nlmsg_request = iwpm_find_nlmsg_request(msg_seq); if (!nlmsg_request) { pr_info("%s: Could not find a matching request (seq = %u)\n", __func__, msg_seq); return -EINVAL; } pm_msg = nlmsg_request->req_buffer; local_sockaddr = (struct sockaddr_storage *) nla_data(nltb[IWPM_NLA_RMANAGE_ADDR]); mapped_sockaddr = (struct sockaddr_storage *) nla_data(nltb[IWPM_NLA_RMANAGE_MAPPED_LOC_ADDR]); if (iwpm_compare_sockaddr(local_sockaddr, &pm_msg->loc_addr)) { nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR; goto add_mapping_response_exit; } if (mapped_sockaddr->ss_family != local_sockaddr->ss_family) { pr_info("%s: Sockaddr family doesn't match the requested one\n", __func__); nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR; goto add_mapping_response_exit; } memcpy(&pm_msg->mapped_loc_addr, mapped_sockaddr, sizeof(*mapped_sockaddr)); iwpm_print_sockaddr(&pm_msg->loc_addr, "add_mapping: Local sockaddr:"); iwpm_print_sockaddr(&pm_msg->mapped_loc_addr, "add_mapping: Mapped local sockaddr:"); add_mapping_response_exit: nlmsg_request->request_done = 1; /* always for found request */ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); barrier(); up(&nlmsg_request->sem); return 0; } /* netlink attribute policy for the response to add and query mapping request * and response with remote address info */ static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = { [IWPM_NLA_RQUERY_MAPPING_SEQ] = { .type = NLA_U32 }, [IWPM_NLA_RQUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) }, [IWPM_NLA_RQUERY_REMOTE_ADDR] = { .len = sizeof(struct sockaddr_storage) }, [IWPM_NLA_RQUERY_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) }, [IWPM_NLA_RQUERY_MAPPED_REM_ADDR] = { .len = sizeof(struct sockaddr_storage) }, [IWPM_NLA_RQUERY_MAPPING_ERR] = { .type = NLA_U16 } }; /** * iwpm_add_and_query_mapping_cb - Process the port mapper response to * iwpm_add_and_query_mapping request * @skb: The socket buffer * @cb: Contains the received message (payload and netlink header) */ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb) { struct iwpm_sa_data *pm_msg; struct iwpm_nlmsg_request *nlmsg_request = NULL; struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX]; struct sockaddr_storage *local_sockaddr, *remote_sockaddr; struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr; const char *msg_type; u32 msg_seq; u16 err_code; msg_type = "Query Mapping response"; if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX, resp_query_policy, nltb, msg_type)) return -EINVAL; atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); msg_seq = nla_get_u32(nltb[IWPM_NLA_RQUERY_MAPPING_SEQ]); nlmsg_request = iwpm_find_nlmsg_request(msg_seq); if (!nlmsg_request) { pr_info("%s: Could not find a matching request (seq = %u)\n", __func__, msg_seq); return -EINVAL; } pm_msg = nlmsg_request->req_buffer; local_sockaddr = (struct sockaddr_storage *) nla_data(nltb[IWPM_NLA_RQUERY_LOCAL_ADDR]); remote_sockaddr = (struct sockaddr_storage *) nla_data(nltb[IWPM_NLA_RQUERY_REMOTE_ADDR]); mapped_loc_sockaddr = (struct sockaddr_storage *) nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]); mapped_rem_sockaddr = (struct sockaddr_storage *) nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]); err_code = nla_get_u16(nltb[IWPM_NLA_RQUERY_MAPPING_ERR]); if (err_code == IWPM_REMOTE_QUERY_REJECT) { pr_info("%s: Received a Reject (pid = %u, echo seq = %u)\n", __func__, cb->nlh->nlmsg_pid, msg_seq); nlmsg_request->err_code = IWPM_REMOTE_QUERY_REJECT; } if (iwpm_compare_sockaddr(local_sockaddr, &pm_msg->loc_addr) || iwpm_compare_sockaddr(remote_sockaddr, &pm_msg->rem_addr)) { pr_info("%s: Incorrect local sockaddr\n", __func__); nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR; goto query_mapping_response_exit; } if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family || mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) { pr_info("%s: Sockaddr family doesn't match the requested one\n", __func__); nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR; goto query_mapping_response_exit; } memcpy(&pm_msg->mapped_loc_addr, mapped_loc_sockaddr, sizeof(*mapped_loc_sockaddr)); memcpy(&pm_msg->mapped_rem_addr, mapped_rem_sockaddr, sizeof(*mapped_rem_sockaddr)); iwpm_print_sockaddr(&pm_msg->loc_addr, "query_mapping: Local sockaddr:"); iwpm_print_sockaddr(&pm_msg->mapped_loc_addr, "query_mapping: Mapped local sockaddr:"); iwpm_print_sockaddr(&pm_msg->rem_addr, "query_mapping: Remote sockaddr:"); iwpm_print_sockaddr(&pm_msg->mapped_rem_addr, "query_mapping: Mapped remote sockaddr:"); query_mapping_response_exit: nlmsg_request->request_done = 1; /* always for found request */ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); barrier(); up(&nlmsg_request->sem); return 0; } /** * iwpm_remote_info_cb - Process remote connecting peer address info, which * the port mapper has received from the connecting peer * @skb: The socket buffer * @cb: Contains the received message (payload and netlink header) * * Stores the IPv4/IPv6 address info in a hash table */ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX]; struct sockaddr_storage *local_sockaddr, *remote_sockaddr; struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr; struct iwpm_remote_info *rem_info; const char *msg_type; u8 nl_client; int ret = -EINVAL; msg_type = "Remote Mapping info"; if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX, resp_query_policy, nltb, msg_type)) return ret; nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type); atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); local_sockaddr = (struct sockaddr_storage *) nla_data(nltb[IWPM_NLA_RQUERY_LOCAL_ADDR]); remote_sockaddr = (struct sockaddr_storage *) nla_data(nltb[IWPM_NLA_RQUERY_REMOTE_ADDR]); mapped_loc_sockaddr = (struct sockaddr_storage *) nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]); mapped_rem_sockaddr = (struct sockaddr_storage *) nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]); if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family || mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) { pr_info("%s: Sockaddr family doesn't match the requested one\n", __func__); return ret; } rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC); if (!rem_info) { ret = -ENOMEM; return ret; } memcpy(&rem_info->mapped_loc_sockaddr, mapped_loc_sockaddr, sizeof(struct sockaddr_storage)); memcpy(&rem_info->remote_sockaddr, remote_sockaddr, sizeof(struct sockaddr_storage)); memcpy(&rem_info->mapped_rem_sockaddr, mapped_rem_sockaddr, sizeof(struct sockaddr_storage)); rem_info->nl_client = nl_client; iwpm_add_remote_info(rem_info); iwpm_print_sockaddr(local_sockaddr, "remote_info: Local sockaddr:"); iwpm_print_sockaddr(mapped_loc_sockaddr, "remote_info: Mapped local sockaddr:"); iwpm_print_sockaddr(remote_sockaddr, "remote_info: Remote sockaddr:"); iwpm_print_sockaddr(mapped_rem_sockaddr, "remote_info: Mapped remote sockaddr:"); return ret; } /* netlink attribute policy for the received request for mapping info */ static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = { [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING, .len = IWPM_ULIBNAME_SIZE - 1 }, [IWPM_NLA_MAPINFO_ULIB_VER] = { .type = NLA_U16 } }; /** * iwpm_mapping_info_cb - Process a notification that the userspace * port mapper daemon is started * @skb: The socket buffer * @cb: Contains the received message (payload and netlink header) * * Using the received port mapper pid, send all the local mapping * info records to the userspace port mapper */ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX]; const char *msg_type = "Mapping Info response"; u8 nl_client; char *iwpm_name; u16 iwpm_version; int ret = -EINVAL; if (iwpm_parse_nlmsg(cb, IWPM_NLA_MAPINFO_REQ_MAX, resp_mapinfo_policy, nltb, msg_type)) { pr_info("%s: Unable to parse nlmsg\n", __func__); return ret; } iwpm_name = (char *)nla_data(nltb[IWPM_NLA_MAPINFO_ULIB_NAME]); iwpm_version = nla_get_u16(nltb[IWPM_NLA_MAPINFO_ULIB_VER]); if (strcmp(iwpm_ulib_name, iwpm_name) || iwpm_version < IWPM_UABI_VERSION_MIN) { pr_info("%s: Invalid port mapper name = %s version = %u\n", __func__, iwpm_name, iwpm_version); return ret; } nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type); iwpm_set_registration(nl_client, IWPM_REG_INCOMPL); atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); iwpm_user_pid = cb->nlh->nlmsg_pid; if (iwpm_ulib_version < IWPM_UABI_VERSION) pr_warn_once("%s: Down level iwpmd/pid %d. Continuing...", __func__, iwpm_user_pid); if (!iwpm_mapinfo_available()) return 0; pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", __func__, iwpm_user_pid); ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid); return ret; } /* netlink attribute policy for the received mapping info ack */ static const struct nla_policy ack_mapinfo_policy[IWPM_NLA_MAPINFO_NUM_MAX] = { [IWPM_NLA_MAPINFO_SEQ] = { .type = NLA_U32 }, [IWPM_NLA_MAPINFO_SEND_NUM] = { .type = NLA_U32 }, [IWPM_NLA_MAPINFO_ACK_NUM] = { .type = NLA_U32 } }; /** * iwpm_ack_mapping_info_cb - Process the port mapper ack for * the provided local mapping info records * @skb: The socket buffer * @cb: Contains the received message (payload and netlink header) */ int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *nltb[IWPM_NLA_MAPINFO_NUM_MAX]; u32 mapinfo_send, mapinfo_ack; const char *msg_type = "Mapping Info Ack"; if (iwpm_parse_nlmsg(cb, IWPM_NLA_MAPINFO_NUM_MAX, ack_mapinfo_policy, nltb, msg_type)) return -EINVAL; mapinfo_send = nla_get_u32(nltb[IWPM_NLA_MAPINFO_SEND_NUM]); mapinfo_ack = nla_get_u32(nltb[IWPM_NLA_MAPINFO_ACK_NUM]); if (mapinfo_ack != mapinfo_send) pr_info("%s: Invalid mapinfo number (sent = %u ack-ed = %u)\n", __func__, mapinfo_send, mapinfo_ack); atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); return 0; } /* netlink attribute policy for the received port mapper error message */ static const struct nla_policy map_error_policy[IWPM_NLA_ERR_MAX] = { [IWPM_NLA_ERR_SEQ] = { .type = NLA_U32 }, [IWPM_NLA_ERR_CODE] = { .type = NLA_U16 }, }; /** * iwpm_mapping_error_cb - Process port mapper notification for error * * @skb: The socket buffer * @cb: Contains the received message (payload and netlink header) */ int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb) { struct iwpm_nlmsg_request *nlmsg_request = NULL; int nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type); struct nlattr *nltb[IWPM_NLA_ERR_MAX]; u32 msg_seq; u16 err_code; const char *msg_type = "Mapping Error Msg"; if (iwpm_parse_nlmsg(cb, IWPM_NLA_ERR_MAX, map_error_policy, nltb, msg_type)) return -EINVAL; msg_seq = nla_get_u32(nltb[IWPM_NLA_ERR_SEQ]); err_code = nla_get_u16(nltb[IWPM_NLA_ERR_CODE]); pr_info("%s: Received msg seq = %u err code = %u client = %d\n", __func__, msg_seq, err_code, nl_client); /* look for nlmsg_request */ nlmsg_request = iwpm_find_nlmsg_request(msg_seq); if (!nlmsg_request) { /* not all errors have associated requests */ pr_debug("Could not find matching req (seq = %u)\n", msg_seq); return 0; } atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); nlmsg_request->err_code = err_code; nlmsg_request->request_done = 1; /* always for found request */ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); barrier(); up(&nlmsg_request->sem); return 0; } /* netlink attribute policy for the received hello request */ static const struct nla_policy hello_policy[IWPM_NLA_HELLO_MAX] = { [IWPM_NLA_HELLO_ABI_VERSION] = { .type = NLA_U16 } }; /** * iwpm_hello_cb - Process a hello message from iwpmd * * @skb: The socket buffer * @cb: Contains the received message (payload and netlink header) * * Using the received port mapper pid, send the kernel's abi_version * after adjusting it to support the iwpmd version. */ int iwpm_hello_cb(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *nltb[IWPM_NLA_HELLO_MAX]; const char *msg_type = "Hello request"; u8 nl_client; u16 abi_version; int ret = -EINVAL; if (iwpm_parse_nlmsg(cb, IWPM_NLA_HELLO_MAX, hello_policy, nltb, msg_type)) { pr_info("%s: Unable to parse nlmsg\n", __func__); return ret; } abi_version = nla_get_u16(nltb[IWPM_NLA_HELLO_ABI_VERSION]); nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type); iwpm_set_registration(nl_client, IWPM_REG_INCOMPL); atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); iwpm_ulib_version = min_t(u16, IWPM_UABI_VERSION, abi_version); pr_debug("Using ABI version %u\n", iwpm_ulib_version); iwpm_user_pid = cb->nlh->nlmsg_pid; ret = iwpm_send_hello(nl_client, iwpm_user_pid, iwpm_ulib_version); return ret; }
linux-master
drivers/infiniband/core/iwpm_msg.c
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/cdev.h> #include <linux/anon_inodes.h> #include <linux/slab.h> #include <linux/sched/mm.h> #include <linux/uaccess.h> #include <rdma/ib.h> #include <rdma/uverbs_std_types.h> #include <rdma/rdma_netlink.h> #include "uverbs.h" #include "core_priv.h" #include "rdma_core.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace verbs access"); MODULE_LICENSE("Dual BSD/GPL"); enum { IB_UVERBS_MAJOR = 231, IB_UVERBS_BASE_MINOR = 192, IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS, IB_UVERBS_NUM_FIXED_MINOR = 32, IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR, }; #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR) static dev_t dynamic_uverbs_dev; static DEFINE_IDA(uverbs_ida); static int ib_uverbs_add_one(struct ib_device *device); static void ib_uverbs_remove_one(struct ib_device *device, void *client_data); static char *uverbs_devnode(const struct device *dev, umode_t *mode) { if (mode) *mode = 0666; return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); } static const struct class uverbs_class = { .name = "infiniband_verbs", .devnode = uverbs_devnode, }; /* * Must be called with the ufile->device->disassociate_srcu held, and the lock * must be held until use of the ucontext is finished. */ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile) { /* * We do not hold the hw_destroy_rwsem lock for this flow, instead * srcu is used. It does not matter if someone races this with * get_context, we get NULL or valid ucontext. */ struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext); if (!srcu_dereference(ufile->device->ib_dev, &ufile->device->disassociate_srcu)) return ERR_PTR(-EIO); if (!ucontext) return ERR_PTR(-EINVAL); return ucontext; } EXPORT_SYMBOL(ib_uverbs_get_ucontext_file); int uverbs_dealloc_mw(struct ib_mw *mw) { struct ib_pd *pd = mw->pd; int ret; ret = mw->device->ops.dealloc_mw(mw); if (ret) return ret; atomic_dec(&pd->usecnt); kfree(mw); return ret; } static void ib_uverbs_release_dev(struct device *device) { struct ib_uverbs_device *dev = container_of(device, struct ib_uverbs_device, dev); uverbs_destroy_api(dev->uapi); cleanup_srcu_struct(&dev->disassociate_srcu); mutex_destroy(&dev->lists_mutex); mutex_destroy(&dev->xrcd_tree_mutex); kfree(dev); } void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file, struct ib_ucq_object *uobj) { struct ib_uverbs_event *evt, *tmp; if (ev_file) { spin_lock_irq(&ev_file->ev_queue.lock); list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { list_del(&evt->list); kfree(evt); } spin_unlock_irq(&ev_file->ev_queue.lock); uverbs_uobject_put(&ev_file->uobj); } ib_uverbs_release_uevent(&uobj->uevent); } void ib_uverbs_release_uevent(struct ib_uevent_object *uobj) { struct ib_uverbs_async_event_file *async_file = uobj->event_file; struct ib_uverbs_event *evt, *tmp; if (!async_file) return; spin_lock_irq(&async_file->ev_queue.lock); list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { list_del(&evt->list); kfree(evt); } spin_unlock_irq(&async_file->ev_queue.lock); uverbs_uobject_put(&async_file->uobj); } void ib_uverbs_detach_umcast(struct ib_qp *qp, struct ib_uqp_object *uobj) { struct ib_uverbs_mcast_entry *mcast, *tmp; list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) { ib_detach_mcast(qp, &mcast->gid, mcast->lid); list_del(&mcast->list); kfree(mcast); } } static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev) { complete(&dev->comp); } void ib_uverbs_release_file(struct kref *ref) { struct ib_uverbs_file *file = container_of(ref, struct ib_uverbs_file, ref); struct ib_device *ib_dev; int srcu_key; release_ufile_idr_uobject(file); srcu_key = srcu_read_lock(&file->device->disassociate_srcu); ib_dev = srcu_dereference(file->device->ib_dev, &file->device->disassociate_srcu); if (ib_dev && !ib_dev->ops.disassociate_ucontext) module_put(ib_dev->ops.owner); srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); if (refcount_dec_and_test(&file->device->refcount)) ib_uverbs_comp_dev(file->device); if (file->default_async_file) uverbs_uobject_put(&file->default_async_file->uobj); put_device(&file->device->dev); if (file->disassociate_page) __free_pages(file->disassociate_page, 0); mutex_destroy(&file->umap_lock); mutex_destroy(&file->ucontext_lock); kfree(file); } static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, struct file *filp, char __user *buf, size_t count, loff_t *pos, size_t eventsz) { struct ib_uverbs_event *event; int ret = 0; spin_lock_irq(&ev_queue->lock); while (list_empty(&ev_queue->event_list)) { if (ev_queue->is_closed) { spin_unlock_irq(&ev_queue->lock); return -EIO; } spin_unlock_irq(&ev_queue->lock); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(ev_queue->poll_wait, (!list_empty(&ev_queue->event_list) || ev_queue->is_closed))) return -ERESTARTSYS; spin_lock_irq(&ev_queue->lock); } event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list); if (eventsz > count) { ret = -EINVAL; event = NULL; } else { list_del(ev_queue->event_list.next); if (event->counter) { ++(*event->counter); list_del(&event->obj_list); } } spin_unlock_irq(&ev_queue->lock); if (event) { if (copy_to_user(buf, event, eventsz)) ret = -EFAULT; else ret = eventsz; } kfree(event); return ret; } static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_async_event_file *file = filp->private_data; return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos, sizeof(struct ib_uverbs_async_event_desc)); } static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_completion_event_file *comp_ev_file = filp->private_data; return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count, pos, sizeof(struct ib_uverbs_comp_event_desc)); } static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, struct file *filp, struct poll_table_struct *wait) { __poll_t pollflags = 0; poll_wait(filp, &ev_queue->poll_wait, wait); spin_lock_irq(&ev_queue->lock); if (!list_empty(&ev_queue->event_list)) pollflags = EPOLLIN | EPOLLRDNORM; else if (ev_queue->is_closed) pollflags = EPOLLERR; spin_unlock_irq(&ev_queue->lock); return pollflags; } static __poll_t ib_uverbs_async_event_poll(struct file *filp, struct poll_table_struct *wait) { struct ib_uverbs_async_event_file *file = filp->private_data; return ib_uverbs_event_poll(&file->ev_queue, filp, wait); } static __poll_t ib_uverbs_comp_event_poll(struct file *filp, struct poll_table_struct *wait) { struct ib_uverbs_completion_event_file *comp_ev_file = filp->private_data; return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait); } static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on) { struct ib_uverbs_async_event_file *file = filp->private_data; return fasync_helper(fd, filp, on, &file->ev_queue.async_queue); } static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on) { struct ib_uverbs_completion_event_file *comp_ev_file = filp->private_data; return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue); } const struct file_operations uverbs_event_fops = { .owner = THIS_MODULE, .read = ib_uverbs_comp_event_read, .poll = ib_uverbs_comp_event_poll, .release = uverbs_uobject_fd_release, .fasync = ib_uverbs_comp_event_fasync, .llseek = no_llseek, }; const struct file_operations uverbs_async_event_fops = { .owner = THIS_MODULE, .read = ib_uverbs_async_event_read, .poll = ib_uverbs_async_event_poll, .release = uverbs_async_event_release, .fasync = ib_uverbs_async_event_fasync, .llseek = no_llseek, }; void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) { struct ib_uverbs_event_queue *ev_queue = cq_context; struct ib_ucq_object *uobj; struct ib_uverbs_event *entry; unsigned long flags; if (!ev_queue) return; spin_lock_irqsave(&ev_queue->lock, flags); if (ev_queue->is_closed) { spin_unlock_irqrestore(&ev_queue->lock, flags); return; } entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { spin_unlock_irqrestore(&ev_queue->lock, flags); return; } uobj = cq->uobject; entry->desc.comp.cq_handle = cq->uobject->uevent.uobject.user_handle; entry->counter = &uobj->comp_events_reported; list_add_tail(&entry->list, &ev_queue->event_list); list_add_tail(&entry->obj_list, &uobj->comp_list); spin_unlock_irqrestore(&ev_queue->lock, flags); wake_up_interruptible(&ev_queue->poll_wait); kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN); } void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file, __u64 element, __u64 event, struct list_head *obj_list, u32 *counter) { struct ib_uverbs_event *entry; unsigned long flags; if (!async_file) return; spin_lock_irqsave(&async_file->ev_queue.lock, flags); if (async_file->ev_queue.is_closed) { spin_unlock_irqrestore(&async_file->ev_queue.lock, flags); return; } entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { spin_unlock_irqrestore(&async_file->ev_queue.lock, flags); return; } entry->desc.async.element = element; entry->desc.async.event_type = event; entry->desc.async.reserved = 0; entry->counter = counter; list_add_tail(&entry->list, &async_file->ev_queue.event_list); if (obj_list) list_add_tail(&entry->obj_list, obj_list); spin_unlock_irqrestore(&async_file->ev_queue.lock, flags); wake_up_interruptible(&async_file->ev_queue.poll_wait); kill_fasync(&async_file->ev_queue.async_queue, SIGIO, POLL_IN); } static void uverbs_uobj_event(struct ib_uevent_object *eobj, struct ib_event *event) { ib_uverbs_async_handler(eobj->event_file, eobj->uobject.user_handle, event->event, &eobj->event_list, &eobj->events_reported); } void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) { uverbs_uobj_event(&event->element.cq->uobject->uevent, event); } void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) { /* for XRC target qp's, check that qp is live */ if (!event->element.qp->uobject) return; uverbs_uobj_event(&event->element.qp->uobject->uevent, event); } void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr) { uverbs_uobj_event(&event->element.wq->uobject->uevent, event); } void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) { uverbs_uobj_event(&event->element.srq->uobject->uevent, event); } static void ib_uverbs_event_handler(struct ib_event_handler *handler, struct ib_event *event) { ib_uverbs_async_handler( container_of(handler, struct ib_uverbs_async_event_file, event_handler), event->element.port_num, event->event, NULL, NULL); } void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue) { spin_lock_init(&ev_queue->lock); INIT_LIST_HEAD(&ev_queue->event_list); init_waitqueue_head(&ev_queue->poll_wait); ev_queue->is_closed = 0; ev_queue->async_queue = NULL; } void ib_uverbs_init_async_event_file( struct ib_uverbs_async_event_file *async_file) { struct ib_uverbs_file *uverbs_file = async_file->uobj.ufile; struct ib_device *ib_dev = async_file->uobj.context->device; ib_uverbs_init_event_queue(&async_file->ev_queue); /* The first async_event_file becomes the default one for the file. */ mutex_lock(&uverbs_file->ucontext_lock); if (!uverbs_file->default_async_file) { /* Pairs with the put in ib_uverbs_release_file */ uverbs_uobject_get(&async_file->uobj); smp_store_release(&uverbs_file->default_async_file, async_file); } mutex_unlock(&uverbs_file->ucontext_lock); INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev, ib_uverbs_event_handler); ib_register_event_handler(&async_file->event_handler); } static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count, const struct uverbs_api_write_method *method_elm) { if (method_elm->is_ex) { count -= sizeof(*hdr) + sizeof(*ex_hdr); if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count) return -EINVAL; if (hdr->in_words * 8 < method_elm->req_size) return -ENOSPC; if (ex_hdr->cmd_hdr_reserved) return -EINVAL; if (ex_hdr->response) { if (!hdr->out_words && !ex_hdr->provider_out_words) return -EINVAL; if (hdr->out_words * 8 < method_elm->resp_size) return -ENOSPC; if (!access_ok(u64_to_user_ptr(ex_hdr->response), (hdr->out_words + ex_hdr->provider_out_words) * 8)) return -EFAULT; } else { if (hdr->out_words || ex_hdr->provider_out_words) return -EINVAL; } return 0; } /* not extended command */ if (hdr->in_words * 4 != count) return -EINVAL; if (count < method_elm->req_size + sizeof(hdr)) { /* * rdma-core v18 and v19 have a bug where they send DESTROY_CQ * with a 16 byte write instead of 24. Old kernels didn't * check the size so they allowed this. Now that the size is * checked provide a compatibility work around to not break * those userspaces. */ if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ && count == 16) { hdr->in_words = 6; return 0; } return -ENOSPC; } if (hdr->out_words * 4 < method_elm->resp_size) return -ENOSPC; return 0; } static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_file *file = filp->private_data; const struct uverbs_api_write_method *method_elm; struct uverbs_api *uapi = file->device->uapi; struct ib_uverbs_ex_cmd_hdr ex_hdr; struct ib_uverbs_cmd_hdr hdr; struct uverbs_attr_bundle bundle; int srcu_key; ssize_t ret; if (!ib_safe_file_access(filp)) { pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", task_tgid_vnr(current), current->comm); return -EACCES; } if (count < sizeof(hdr)) return -EINVAL; if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; method_elm = uapi_get_method(uapi, hdr.command); if (IS_ERR(method_elm)) return PTR_ERR(method_elm); if (method_elm->is_ex) { if (count < (sizeof(hdr) + sizeof(ex_hdr))) return -EINVAL; if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) return -EFAULT; } ret = verify_hdr(&hdr, &ex_hdr, count, method_elm); if (ret) return ret; srcu_key = srcu_read_lock(&file->device->disassociate_srcu); buf += sizeof(hdr); memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); bundle.ufile = file; bundle.context = NULL; /* only valid if bundle has uobject */ bundle.uobject = NULL; if (!method_elm->is_ex) { size_t in_len = hdr.in_words * 4 - sizeof(hdr); size_t out_len = hdr.out_words * 4; u64 response = 0; if (method_elm->has_udata) { bundle.driver_udata.inlen = in_len - method_elm->req_size; in_len = method_elm->req_size; if (bundle.driver_udata.inlen) bundle.driver_udata.inbuf = buf + in_len; else bundle.driver_udata.inbuf = NULL; } else { memset(&bundle.driver_udata, 0, sizeof(bundle.driver_udata)); } if (method_elm->has_resp) { /* * The macros check that if has_resp is set * then the command request structure starts * with a '__aligned u64 response' member. */ ret = get_user(response, (const u64 __user *)buf); if (ret) goto out_unlock; if (method_elm->has_udata) { bundle.driver_udata.outlen = out_len - method_elm->resp_size; out_len = method_elm->resp_size; if (bundle.driver_udata.outlen) bundle.driver_udata.outbuf = u64_to_user_ptr(response + out_len); else bundle.driver_udata.outbuf = NULL; } } else { bundle.driver_udata.outlen = 0; bundle.driver_udata.outbuf = NULL; } ib_uverbs_init_udata_buf_or_null( &bundle.ucore, buf, u64_to_user_ptr(response), in_len, out_len); } else { buf += sizeof(ex_hdr); ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf, u64_to_user_ptr(ex_hdr.response), hdr.in_words * 8, hdr.out_words * 8); ib_uverbs_init_udata_buf_or_null( &bundle.driver_udata, buf + bundle.ucore.inlen, u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen, ex_hdr.provider_in_words * 8, ex_hdr.provider_out_words * 8); } ret = method_elm->handler(&bundle); if (bundle.uobject) uverbs_finalize_object(bundle.uobject, UVERBS_ACCESS_NEW, true, !ret, &bundle); out_unlock: srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); return (ret) ? : count; } static const struct vm_operations_struct rdma_umap_ops; static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) { struct ib_uverbs_file *file = filp->private_data; struct ib_ucontext *ucontext; int ret = 0; int srcu_key; srcu_key = srcu_read_lock(&file->device->disassociate_srcu); ucontext = ib_uverbs_get_ucontext_file(file); if (IS_ERR(ucontext)) { ret = PTR_ERR(ucontext); goto out; } vma->vm_ops = &rdma_umap_ops; ret = ucontext->device->ops.mmap(ucontext, vma); out: srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); return ret; } /* * The VMA has been dup'd, initialize the vm_private_data with a new tracking * struct */ static void rdma_umap_open(struct vm_area_struct *vma) { struct ib_uverbs_file *ufile = vma->vm_file->private_data; struct rdma_umap_priv *opriv = vma->vm_private_data; struct rdma_umap_priv *priv; if (!opriv) return; /* We are racing with disassociation */ if (!down_read_trylock(&ufile->hw_destroy_rwsem)) goto out_zap; /* * Disassociation already completed, the VMA should already be zapped. */ if (!ufile->ucontext) goto out_unlock; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) goto out_unlock; rdma_umap_priv_init(priv, vma, opriv->entry); up_read(&ufile->hw_destroy_rwsem); return; out_unlock: up_read(&ufile->hw_destroy_rwsem); out_zap: /* * We can't allow the VMA to be created with the actual IO pages, that * would break our API contract, and it can't be stopped at this * point, so zap it. */ vma->vm_private_data = NULL; zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); } static void rdma_umap_close(struct vm_area_struct *vma) { struct ib_uverbs_file *ufile = vma->vm_file->private_data; struct rdma_umap_priv *priv = vma->vm_private_data; if (!priv) return; /* * The vma holds a reference on the struct file that created it, which * in turn means that the ib_uverbs_file is guaranteed to exist at * this point. */ mutex_lock(&ufile->umap_lock); if (priv->entry) rdma_user_mmap_entry_put(priv->entry); list_del(&priv->list); mutex_unlock(&ufile->umap_lock); kfree(priv); } /* * Once the zap_vma_ptes has been called touches to the VMA will come here and * we return a dummy writable zero page for all the pfns. */ static vm_fault_t rdma_umap_fault(struct vm_fault *vmf) { struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data; struct rdma_umap_priv *priv = vmf->vma->vm_private_data; vm_fault_t ret = 0; if (!priv) return VM_FAULT_SIGBUS; /* Read only pages can just use the system zero page. */ if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) { vmf->page = ZERO_PAGE(vmf->address); get_page(vmf->page); return 0; } mutex_lock(&ufile->umap_lock); if (!ufile->disassociate_page) ufile->disassociate_page = alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0); if (ufile->disassociate_page) { /* * This VMA is forced to always be shared so this doesn't have * to worry about COW. */ vmf->page = ufile->disassociate_page; get_page(vmf->page); } else { ret = VM_FAULT_SIGBUS; } mutex_unlock(&ufile->umap_lock); return ret; } static const struct vm_operations_struct rdma_umap_ops = { .open = rdma_umap_open, .close = rdma_umap_close, .fault = rdma_umap_fault, }; void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) { struct rdma_umap_priv *priv, *next_priv; lockdep_assert_held(&ufile->hw_destroy_rwsem); while (1) { struct mm_struct *mm = NULL; /* Get an arbitrary mm pointer that hasn't been cleaned yet */ mutex_lock(&ufile->umap_lock); while (!list_empty(&ufile->umaps)) { int ret; priv = list_first_entry(&ufile->umaps, struct rdma_umap_priv, list); mm = priv->vma->vm_mm; ret = mmget_not_zero(mm); if (!ret) { list_del_init(&priv->list); if (priv->entry) { rdma_user_mmap_entry_put(priv->entry); priv->entry = NULL; } mm = NULL; continue; } break; } mutex_unlock(&ufile->umap_lock); if (!mm) return; /* * The umap_lock is nested under mmap_lock since it used within * the vma_ops callbacks, so we have to clean the list one mm * at a time to get the lock ordering right. Typically there * will only be one mm, so no big deal. */ mmap_read_lock(mm); mutex_lock(&ufile->umap_lock); list_for_each_entry_safe (priv, next_priv, &ufile->umaps, list) { struct vm_area_struct *vma = priv->vma; if (vma->vm_mm != mm) continue; list_del_init(&priv->list); zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); if (priv->entry) { rdma_user_mmap_entry_put(priv->entry); priv->entry = NULL; } } mutex_unlock(&ufile->umap_lock); mmap_read_unlock(mm); mmput(mm); } } /* * ib_uverbs_open() does not need the BKL: * * - the ib_uverbs_device structures are properly reference counted and * everything else is purely local to the file being created, so * races against other open calls are not a problem; * - there is no ioctl method to race against; * - the open method will either immediately run -ENXIO, or all * required initialization will be done. */ static int ib_uverbs_open(struct inode *inode, struct file *filp) { struct ib_uverbs_device *dev; struct ib_uverbs_file *file; struct ib_device *ib_dev; int ret; int module_dependent; int srcu_key; dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); if (!refcount_inc_not_zero(&dev->refcount)) return -ENXIO; get_device(&dev->dev); srcu_key = srcu_read_lock(&dev->disassociate_srcu); mutex_lock(&dev->lists_mutex); ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); if (!ib_dev) { ret = -EIO; goto err; } if (!rdma_dev_access_netns(ib_dev, current->nsproxy->net_ns)) { ret = -EPERM; goto err; } /* In case IB device supports disassociate ucontext, there is no hard * dependency between uverbs device and its low level device. */ module_dependent = !(ib_dev->ops.disassociate_ucontext); if (module_dependent) { if (!try_module_get(ib_dev->ops.owner)) { ret = -ENODEV; goto err; } } file = kzalloc(sizeof(*file), GFP_KERNEL); if (!file) { ret = -ENOMEM; if (module_dependent) goto err_module; goto err; } file->device = dev; kref_init(&file->ref); mutex_init(&file->ucontext_lock); spin_lock_init(&file->uobjects_lock); INIT_LIST_HEAD(&file->uobjects); init_rwsem(&file->hw_destroy_rwsem); mutex_init(&file->umap_lock); INIT_LIST_HEAD(&file->umaps); filp->private_data = file; list_add_tail(&file->list, &dev->uverbs_file_list); mutex_unlock(&dev->lists_mutex); srcu_read_unlock(&dev->disassociate_srcu, srcu_key); setup_ufile_idr_uobject(file); return stream_open(inode, filp); err_module: module_put(ib_dev->ops.owner); err: mutex_unlock(&dev->lists_mutex); srcu_read_unlock(&dev->disassociate_srcu, srcu_key); if (refcount_dec_and_test(&dev->refcount)) ib_uverbs_comp_dev(dev); put_device(&dev->dev); return ret; } static int ib_uverbs_close(struct inode *inode, struct file *filp) { struct ib_uverbs_file *file = filp->private_data; uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE); mutex_lock(&file->device->lists_mutex); list_del_init(&file->list); mutex_unlock(&file->device->lists_mutex); kref_put(&file->ref, ib_uverbs_release_file); return 0; } static const struct file_operations uverbs_fops = { .owner = THIS_MODULE, .write = ib_uverbs_write, .open = ib_uverbs_open, .release = ib_uverbs_close, .llseek = no_llseek, .unlocked_ioctl = ib_uverbs_ioctl, .compat_ioctl = compat_ptr_ioctl, }; static const struct file_operations uverbs_mmap_fops = { .owner = THIS_MODULE, .write = ib_uverbs_write, .mmap = ib_uverbs_mmap, .open = ib_uverbs_open, .release = ib_uverbs_close, .llseek = no_llseek, .unlocked_ioctl = ib_uverbs_ioctl, .compat_ioctl = compat_ptr_ioctl, }; static int ib_uverbs_get_nl_info(struct ib_device *ibdev, void *client_data, struct ib_client_nl_info *res) { struct ib_uverbs_device *uverbs_dev = client_data; int ret; if (res->port != -1) return -EINVAL; res->abi = ibdev->ops.uverbs_abi_ver; res->cdev = &uverbs_dev->dev; /* * To support DRIVER_ID binding in userspace some of the driver need * upgrading to expose their PCI dependent revision information * through get_context instead of relying on modalias matching. When * the drivers are fixed they can drop this flag. */ if (!ibdev->ops.uverbs_no_driver_id_binding) { ret = nla_put_u32(res->nl_msg, RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID, ibdev->ops.driver_id); if (ret) return ret; } return 0; } static struct ib_client uverbs_client = { .name = "uverbs", .no_kverbs_req = true, .add = ib_uverbs_add_one, .remove = ib_uverbs_remove_one, .get_nl_info = ib_uverbs_get_nl_info, }; MODULE_ALIAS_RDMA_CLIENT("uverbs"); static ssize_t ibdev_show(struct device *device, struct device_attribute *attr, char *buf) { struct ib_uverbs_device *dev = container_of(device, struct ib_uverbs_device, dev); int ret = -ENODEV; int srcu_key; struct ib_device *ib_dev; srcu_key = srcu_read_lock(&dev->disassociate_srcu); ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); if (ib_dev) ret = sysfs_emit(buf, "%s\n", dev_name(&ib_dev->dev)); srcu_read_unlock(&dev->disassociate_srcu, srcu_key); return ret; } static DEVICE_ATTR_RO(ibdev); static ssize_t abi_version_show(struct device *device, struct device_attribute *attr, char *buf) { struct ib_uverbs_device *dev = container_of(device, struct ib_uverbs_device, dev); int ret = -ENODEV; int srcu_key; struct ib_device *ib_dev; srcu_key = srcu_read_lock(&dev->disassociate_srcu); ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); if (ib_dev) ret = sysfs_emit(buf, "%u\n", ib_dev->ops.uverbs_abi_ver); srcu_read_unlock(&dev->disassociate_srcu, srcu_key); return ret; } static DEVICE_ATTR_RO(abi_version); static struct attribute *ib_dev_attrs[] = { &dev_attr_abi_version.attr, &dev_attr_ibdev.attr, NULL, }; static const struct attribute_group dev_attr_group = { .attrs = ib_dev_attrs, }; static CLASS_ATTR_STRING(abi_version, S_IRUGO, __stringify(IB_USER_VERBS_ABI_VERSION)); static int ib_uverbs_create_uapi(struct ib_device *device, struct ib_uverbs_device *uverbs_dev) { struct uverbs_api *uapi; uapi = uverbs_alloc_api(device); if (IS_ERR(uapi)) return PTR_ERR(uapi); uverbs_dev->uapi = uapi; return 0; } static int ib_uverbs_add_one(struct ib_device *device) { int devnum; dev_t base; struct ib_uverbs_device *uverbs_dev; int ret; if (!device->ops.alloc_ucontext) return -EOPNOTSUPP; uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL); if (!uverbs_dev) return -ENOMEM; ret = init_srcu_struct(&uverbs_dev->disassociate_srcu); if (ret) { kfree(uverbs_dev); return -ENOMEM; } device_initialize(&uverbs_dev->dev); uverbs_dev->dev.class = &uverbs_class; uverbs_dev->dev.parent = device->dev.parent; uverbs_dev->dev.release = ib_uverbs_release_dev; uverbs_dev->groups[0] = &dev_attr_group; uverbs_dev->dev.groups = uverbs_dev->groups; refcount_set(&uverbs_dev->refcount, 1); init_completion(&uverbs_dev->comp); uverbs_dev->xrcd_tree = RB_ROOT; mutex_init(&uverbs_dev->xrcd_tree_mutex); mutex_init(&uverbs_dev->lists_mutex); INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list); rcu_assign_pointer(uverbs_dev->ib_dev, device); uverbs_dev->num_comp_vectors = device->num_comp_vectors; devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1, GFP_KERNEL); if (devnum < 0) { ret = -ENOMEM; goto err; } uverbs_dev->devnum = devnum; if (devnum >= IB_UVERBS_NUM_FIXED_MINOR) base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR; else base = IB_UVERBS_BASE_DEV + devnum; ret = ib_uverbs_create_uapi(device, uverbs_dev); if (ret) goto err_uapi; uverbs_dev->dev.devt = base; dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum); cdev_init(&uverbs_dev->cdev, device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops); uverbs_dev->cdev.owner = THIS_MODULE; ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev); if (ret) goto err_uapi; ib_set_client_data(device, &uverbs_client, uverbs_dev); return 0; err_uapi: ida_free(&uverbs_ida, devnum); err: if (refcount_dec_and_test(&uverbs_dev->refcount)) ib_uverbs_comp_dev(uverbs_dev); wait_for_completion(&uverbs_dev->comp); put_device(&uverbs_dev->dev); return ret; } static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, struct ib_device *ib_dev) { struct ib_uverbs_file *file; /* Pending running commands to terminate */ uverbs_disassociate_api_pre(uverbs_dev); mutex_lock(&uverbs_dev->lists_mutex); while (!list_empty(&uverbs_dev->uverbs_file_list)) { file = list_first_entry(&uverbs_dev->uverbs_file_list, struct ib_uverbs_file, list); list_del_init(&file->list); kref_get(&file->ref); /* We must release the mutex before going ahead and calling * uverbs_cleanup_ufile, as it might end up indirectly calling * uverbs_close, for example due to freeing the resources (e.g * mmput). */ mutex_unlock(&uverbs_dev->lists_mutex); uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE); kref_put(&file->ref, ib_uverbs_release_file); mutex_lock(&uverbs_dev->lists_mutex); } mutex_unlock(&uverbs_dev->lists_mutex); uverbs_disassociate_api(uverbs_dev->uapi); } static void ib_uverbs_remove_one(struct ib_device *device, void *client_data) { struct ib_uverbs_device *uverbs_dev = client_data; int wait_clients = 1; cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev); ida_free(&uverbs_ida, uverbs_dev->devnum); if (device->ops.disassociate_ucontext) { /* We disassociate HW resources and immediately return. * Userspace will see a EIO errno for all future access. * Upon returning, ib_device may be freed internally and is not * valid any more. * uverbs_device is still available until all clients close * their files, then the uverbs device ref count will be zero * and its resources will be freed. * Note: At this point no more files can be opened since the * cdev was deleted, however active clients can still issue * commands and close their open files. */ ib_uverbs_free_hw_resources(uverbs_dev, device); wait_clients = 0; } if (refcount_dec_and_test(&uverbs_dev->refcount)) ib_uverbs_comp_dev(uverbs_dev); if (wait_clients) wait_for_completion(&uverbs_dev->comp); put_device(&uverbs_dev->dev); } static int __init ib_uverbs_init(void) { int ret; ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_NUM_FIXED_MINOR, "infiniband_verbs"); if (ret) { pr_err("user_verbs: couldn't register device number\n"); goto out; } ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0, IB_UVERBS_NUM_DYNAMIC_MINOR, "infiniband_verbs"); if (ret) { pr_err("couldn't register dynamic device number\n"); goto out_alloc; } ret = class_register(&uverbs_class); if (ret) { pr_err("user_verbs: couldn't create class infiniband_verbs\n"); goto out_chrdev; } ret = class_create_file(&uverbs_class, &class_attr_abi_version.attr); if (ret) { pr_err("user_verbs: couldn't create abi_version attribute\n"); goto out_class; } ret = ib_register_client(&uverbs_client); if (ret) { pr_err("user_verbs: couldn't register client\n"); goto out_class; } return 0; out_class: class_unregister(&uverbs_class); out_chrdev: unregister_chrdev_region(dynamic_uverbs_dev, IB_UVERBS_NUM_DYNAMIC_MINOR); out_alloc: unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_NUM_FIXED_MINOR); out: return ret; } static void __exit ib_uverbs_cleanup(void) { ib_unregister_client(&uverbs_client); class_unregister(&uverbs_class); unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_NUM_FIXED_MINOR); unregister_chrdev_region(dynamic_uverbs_dev, IB_UVERBS_NUM_DYNAMIC_MINOR); mmu_notifier_synchronize(); } module_init(ib_uverbs_init); module_exit(ib_uverbs_cleanup);
linux-master
drivers/infiniband/core/uverbs_main.c
/* * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. * Copyright (c) 2020, Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "rdma_core.h" #include "uverbs.h" #include <rdma/uverbs_std_types.h> #include "restrack.h" static int uverbs_free_mr(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { return ib_dereg_mr_user((struct ib_mr *)uobject->object, &attrs->driver_udata); } static int UVERBS_HANDLER(UVERBS_METHOD_ADVISE_MR)( struct uverbs_attr_bundle *attrs) { struct ib_pd *pd = uverbs_attr_get_obj(attrs, UVERBS_ATTR_ADVISE_MR_PD_HANDLE); enum ib_uverbs_advise_mr_advice advice; struct ib_device *ib_dev = pd->device; struct ib_sge *sg_list; int num_sge; u32 flags; int ret; /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ if (!ib_dev->ops.advise_mr) return -EOPNOTSUPP; ret = uverbs_get_const(&advice, attrs, UVERBS_ATTR_ADVISE_MR_ADVICE); if (ret) return ret; ret = uverbs_get_flags32(&flags, attrs, UVERBS_ATTR_ADVISE_MR_FLAGS, IB_UVERBS_ADVISE_MR_FLAG_FLUSH); if (ret) return ret; num_sge = uverbs_attr_ptr_get_array_size( attrs, UVERBS_ATTR_ADVISE_MR_SGE_LIST, sizeof(struct ib_sge)); if (num_sge <= 0) return num_sge; sg_list = uverbs_attr_get_alloced_ptr(attrs, UVERBS_ATTR_ADVISE_MR_SGE_LIST); return ib_dev->ops.advise_mr(pd, advice, flags, sg_list, num_sge, attrs); } static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)( struct uverbs_attr_bundle *attrs) { struct ib_dm_mr_attr attr = {}; struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE); struct ib_dm *dm = uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_DM_HANDLE); struct ib_pd *pd = uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_PD_HANDLE); struct ib_device *ib_dev = pd->device; struct ib_mr *mr; int ret; if (!ib_dev->ops.reg_dm_mr) return -EOPNOTSUPP; ret = uverbs_copy_from(&attr.offset, attrs, UVERBS_ATTR_REG_DM_MR_OFFSET); if (ret) return ret; ret = uverbs_copy_from(&attr.length, attrs, UVERBS_ATTR_REG_DM_MR_LENGTH); if (ret) return ret; ret = uverbs_get_flags32(&attr.access_flags, attrs, UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS, IB_ACCESS_SUPPORTED); if (ret) return ret; if (!(attr.access_flags & IB_ZERO_BASED)) return -EINVAL; ret = ib_check_mr_access(ib_dev, attr.access_flags); if (ret) return ret; if (attr.offset > dm->length || attr.length > dm->length || attr.length > dm->length - attr.offset) return -EINVAL; mr = pd->device->ops.reg_dm_mr(pd, dm, &attr, attrs); if (IS_ERR(mr)) return PTR_ERR(mr); mr->device = pd->device; mr->pd = pd; mr->type = IB_MR_TYPE_DM; mr->dm = dm; mr->uobject = uobj; atomic_inc(&pd->usecnt); atomic_inc(&dm->usecnt); rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_set_name(&mr->res, NULL); rdma_restrack_add(&mr->res); uobj->object = mr; uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE); ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_LKEY, &mr->lkey, sizeof(mr->lkey)); if (ret) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_RKEY, &mr->rkey, sizeof(mr->rkey)); return ret; } static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_MR)( struct uverbs_attr_bundle *attrs) { struct ib_mr *mr = uverbs_attr_get_obj(attrs, UVERBS_ATTR_QUERY_MR_HANDLE); int ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_LKEY, &mr->lkey, sizeof(mr->lkey)); if (ret) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_RKEY, &mr->rkey, sizeof(mr->rkey)); if (ret) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_LENGTH, &mr->length, sizeof(mr->length)); if (ret) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_IOVA, &mr->iova, sizeof(mr->iova)); return IS_UVERBS_COPY_ERR(ret) ? ret : 0; } static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)( struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE); struct ib_pd *pd = uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE); struct ib_device *ib_dev = pd->device; u64 offset, length, iova; u32 fd, access_flags; struct ib_mr *mr; int ret; if (!ib_dev->ops.reg_user_mr_dmabuf) return -EOPNOTSUPP; ret = uverbs_copy_from(&offset, attrs, UVERBS_ATTR_REG_DMABUF_MR_OFFSET); if (ret) return ret; ret = uverbs_copy_from(&length, attrs, UVERBS_ATTR_REG_DMABUF_MR_LENGTH); if (ret) return ret; ret = uverbs_copy_from(&iova, attrs, UVERBS_ATTR_REG_DMABUF_MR_IOVA); if (ret) return ret; if ((offset & ~PAGE_MASK) != (iova & ~PAGE_MASK)) return -EINVAL; ret = uverbs_copy_from(&fd, attrs, UVERBS_ATTR_REG_DMABUF_MR_FD); if (ret) return ret; ret = uverbs_get_flags32(&access_flags, attrs, UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS, IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_RELAXED_ORDERING); if (ret) return ret; ret = ib_check_mr_access(ib_dev, access_flags); if (ret) return ret; mr = pd->device->ops.reg_user_mr_dmabuf(pd, offset, length, iova, fd, access_flags, &attrs->driver_udata); if (IS_ERR(mr)) return PTR_ERR(mr); mr->device = pd->device; mr->pd = pd; mr->type = IB_MR_TYPE_USER; mr->uobject = uobj; atomic_inc(&pd->usecnt); rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_set_name(&mr->res, NULL); rdma_restrack_add(&mr->res); uobj->object = mr; uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE); ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY, &mr->lkey, sizeof(mr->lkey)); if (ret) return ret; ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY, &mr->rkey, sizeof(mr->rkey)); return ret; } DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_ADVISE_MR, UVERBS_ATTR_IDR(UVERBS_ATTR_ADVISE_MR_PD_HANDLE, UVERBS_OBJECT_PD, UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_CONST_IN(UVERBS_ATTR_ADVISE_MR_ADVICE, enum ib_uverbs_advise_mr_advice, UA_MANDATORY), UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_ADVISE_MR_FLAGS, enum ib_uverbs_advise_mr_flag, UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ADVISE_MR_SGE_LIST, UVERBS_ATTR_MIN_SIZE(sizeof(struct ib_uverbs_sge)), UA_MANDATORY, UA_ALLOC_AND_COPY)); DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_QUERY_MR, UVERBS_ATTR_IDR(UVERBS_ATTR_QUERY_MR_HANDLE, UVERBS_OBJECT_MR, UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_RKEY, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_LKEY, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_LENGTH, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_IOVA, UVERBS_ATTR_TYPE(u64), UA_OPTIONAL)); DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_DM_MR_REG, UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_HANDLE, UVERBS_OBJECT_MR, UVERBS_ACCESS_NEW, UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_OFFSET, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_LENGTH, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_PD_HANDLE, UVERBS_OBJECT_PD, UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS, enum ib_access_flags), UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_DM_HANDLE, UVERBS_OBJECT_DM, UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_LKEY, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_RKEY, UVERBS_ATTR_TYPE(u32), UA_MANDATORY)); DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_REG_DMABUF_MR, UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DMABUF_MR_HANDLE, UVERBS_OBJECT_MR, UVERBS_ACCESS_NEW, UA_MANDATORY), UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE, UVERBS_OBJECT_PD, UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_OFFSET, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_LENGTH, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_IOVA, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_FD, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS, enum ib_access_flags), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY, UVERBS_ATTR_TYPE(u32), UA_MANDATORY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_MR_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MR_HANDLE, UVERBS_OBJECT_MR, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_MR, UVERBS_TYPE_ALLOC_IDR(uverbs_free_mr), &UVERBS_METHOD(UVERBS_METHOD_ADVISE_MR), &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG), &UVERBS_METHOD(UVERBS_METHOD_MR_DESTROY), &UVERBS_METHOD(UVERBS_METHOD_QUERY_MR), &UVERBS_METHOD(UVERBS_METHOD_REG_DMABUF_MR)); const struct uapi_definition uverbs_def_obj_mr[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MR, UAPI_DEF_OBJ_NEEDS_FN(dereg_mr)), {} };
linux-master
drivers/infiniband/core/uverbs_std_types_mr.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2005 Voltaire Inc. All rights reserved. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. */ #include <linux/completion.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/mutex.h> #include <linux/random.h> #include <linux/rbtree.h> #include <linux/igmp.h> #include <linux/xarray.h> #include <linux/inetdevice.h> #include <linux/slab.h> #include <linux/module.h> #include <net/route.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/netevent.h> #include <net/tcp.h> #include <net/ipv6.h> #include <net/ip_fib.h> #include <net/ip6_route.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_cm_ib.h> #include <rdma/rdma_netlink.h> #include <rdma/ib.h> #include <rdma/ib_cache.h> #include <rdma/ib_cm.h> #include <rdma/ib_sa.h> #include <rdma/iw_cm.h> #include "core_priv.h" #include "cma_priv.h" #include "cma_trace.h" MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("Generic RDMA CM Agent"); MODULE_LICENSE("Dual BSD/GPL"); #define CMA_CM_RESPONSE_TIMEOUT 20 #define CMA_MAX_CM_RETRIES 15 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_IBOE_PACKET_LIFETIME 16 #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP static const char * const cma_events[] = { [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", [RDMA_CM_EVENT_ADDR_ERROR] = "address error", [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", [RDMA_CM_EVENT_REJECTED] = "rejected", [RDMA_CM_EVENT_ESTABLISHED] = "established", [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", }; static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, enum ib_gid_type gid_type); const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) { size_t index = event; return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? cma_events[index] : "unrecognized event"; } EXPORT_SYMBOL(rdma_event_msg); const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, int reason) { if (rdma_ib_or_roce(id->device, id->port_num)) return ibcm_reject_msg(reason); if (rdma_protocol_iwarp(id->device, id->port_num)) return iwcm_reject_msg(reason); WARN_ON_ONCE(1); return "unrecognized transport"; } EXPORT_SYMBOL(rdma_reject_msg); /** * rdma_is_consumer_reject - return true if the consumer rejected the connect * request. * @id: Communication identifier that received the REJECT event. * @reason: Value returned in the REJECT event status field. */ static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) { if (rdma_ib_or_roce(id->device, id->port_num)) return reason == IB_CM_REJ_CONSUMER_DEFINED; if (rdma_protocol_iwarp(id->device, id->port_num)) return reason == -ECONNREFUSED; WARN_ON_ONCE(1); return false; } const void *rdma_consumer_reject_data(struct rdma_cm_id *id, struct rdma_cm_event *ev, u8 *data_len) { const void *p; if (rdma_is_consumer_reject(id, ev->status)) { *data_len = ev->param.conn.private_data_len; p = ev->param.conn.private_data; } else { *data_len = 0; p = NULL; } return p; } EXPORT_SYMBOL(rdma_consumer_reject_data); /** * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id. * @id: Communication Identifier */ struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; id_priv = container_of(id, struct rdma_id_private, id); if (id->device->node_type == RDMA_NODE_RNIC) return id_priv->cm_id.iw; return NULL; } EXPORT_SYMBOL(rdma_iw_cm_id); /** * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack. * @res: rdma resource tracking entry pointer */ struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res) { struct rdma_id_private *id_priv = container_of(res, struct rdma_id_private, res); return &id_priv->id; } EXPORT_SYMBOL(rdma_res_to_id); static int cma_add_one(struct ib_device *device); static void cma_remove_one(struct ib_device *device, void *client_data); static struct ib_client cma_client = { .name = "cma", .add = cma_add_one, .remove = cma_remove_one }; static struct ib_sa_client sa_client; static LIST_HEAD(dev_list); static LIST_HEAD(listen_any_list); static DEFINE_MUTEX(lock); static struct rb_root id_table = RB_ROOT; /* Serialize operations of id_table tree */ static DEFINE_SPINLOCK(id_table_lock); static struct workqueue_struct *cma_wq; static unsigned int cma_pernet_id; struct cma_pernet { struct xarray tcp_ps; struct xarray udp_ps; struct xarray ipoib_ps; struct xarray ib_ps; }; static struct cma_pernet *cma_pernet(struct net *net) { return net_generic(net, cma_pernet_id); } static struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps) { struct cma_pernet *pernet = cma_pernet(net); switch (ps) { case RDMA_PS_TCP: return &pernet->tcp_ps; case RDMA_PS_UDP: return &pernet->udp_ps; case RDMA_PS_IPOIB: return &pernet->ipoib_ps; case RDMA_PS_IB: return &pernet->ib_ps; default: return NULL; } } struct id_table_entry { struct list_head id_list; struct rb_node rb_node; }; struct cma_device { struct list_head list; struct ib_device *device; struct completion comp; refcount_t refcount; struct list_head id_list; enum ib_gid_type *default_gid_type; u8 *default_roce_tos; }; struct rdma_bind_list { enum rdma_ucm_port_space ps; struct hlist_head owners; unsigned short port; }; static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps, struct rdma_bind_list *bind_list, int snum) { struct xarray *xa = cma_pernet_xa(net, ps); return xa_insert(xa, snum, bind_list, GFP_KERNEL); } static struct rdma_bind_list *cma_ps_find(struct net *net, enum rdma_ucm_port_space ps, int snum) { struct xarray *xa = cma_pernet_xa(net, ps); return xa_load(xa, snum); } static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps, int snum) { struct xarray *xa = cma_pernet_xa(net, ps); xa_erase(xa, snum); } enum { CMA_OPTION_AFONLY, }; void cma_dev_get(struct cma_device *cma_dev) { refcount_inc(&cma_dev->refcount); } void cma_dev_put(struct cma_device *cma_dev) { if (refcount_dec_and_test(&cma_dev->refcount)) complete(&cma_dev->comp); } struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, void *cookie) { struct cma_device *cma_dev; struct cma_device *found_cma_dev = NULL; mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) if (filter(cma_dev->device, cookie)) { found_cma_dev = cma_dev; break; } if (found_cma_dev) cma_dev_get(found_cma_dev); mutex_unlock(&lock); return found_cma_dev; } int cma_get_default_gid_type(struct cma_device *cma_dev, u32 port) { if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; } int cma_set_default_gid_type(struct cma_device *cma_dev, u32 port, enum ib_gid_type default_gid_type) { unsigned long supported_gids; if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; if (default_gid_type == IB_GID_TYPE_IB && rdma_protocol_roce_eth_encap(cma_dev->device, port)) default_gid_type = IB_GID_TYPE_ROCE; supported_gids = roce_gid_type_mask_support(cma_dev->device, port); if (!(supported_gids & 1 << default_gid_type)) return -EINVAL; cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = default_gid_type; return 0; } int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) { if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; } int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, u8 default_roce_tos) { if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] = default_roce_tos; return 0; } struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) { return cma_dev->device; } /* * Device removal can occur at anytime, so we need extra handling to * serialize notifying the user of device removal with other callbacks. * We do this by disabling removal notification while a callback is in process, * and reporting it after the callback completes. */ struct cma_multicast { struct rdma_id_private *id_priv; union { struct ib_sa_multicast *sa_mc; struct { struct work_struct work; struct rdma_cm_event event; } iboe_join; }; struct list_head list; void *context; struct sockaddr_storage addr; u8 join_state; }; struct cma_work { struct work_struct work; struct rdma_id_private *id; enum rdma_cm_state old_state; enum rdma_cm_state new_state; struct rdma_cm_event event; }; union cma_ip_addr { struct in6_addr ip6; struct { __be32 pad[3]; __be32 addr; } ip4; }; struct cma_hdr { u8 cma_version; u8 ip_version; /* IP version: 7:4 */ __be16 port; union cma_ip_addr src_addr; union cma_ip_addr dst_addr; }; #define CMA_VERSION 0x00 struct cma_req_info { struct sockaddr_storage listen_addr_storage; struct sockaddr_storage src_addr_storage; struct ib_device *device; union ib_gid local_gid; __be64 service_id; int port; bool has_gid; u16 pkey; }; static int cma_comp_exch(struct rdma_id_private *id_priv, enum rdma_cm_state comp, enum rdma_cm_state exch) { unsigned long flags; int ret; /* * The FSM uses a funny double locking where state is protected by both * the handler_mutex and the spinlock. State is not allowed to change * to/from a handler_mutex protected value without also holding * handler_mutex. */ if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT) lockdep_assert_held(&id_priv->handler_mutex); spin_lock_irqsave(&id_priv->lock, flags); if ((ret = (id_priv->state == comp))) id_priv->state = exch; spin_unlock_irqrestore(&id_priv->lock, flags); return ret; } static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) { return hdr->ip_version >> 4; } static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) { hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); } static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) { return (struct sockaddr *)&id_priv->id.route.addr.src_addr; } static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) { return (struct sockaddr *)&id_priv->id.route.addr.dst_addr; } static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) { struct in_device *in_dev = NULL; if (ndev) { rtnl_lock(); in_dev = __in_dev_get_rtnl(ndev); if (in_dev) { if (join) ip_mc_inc_group(in_dev, *(__be32 *)(mgid->raw + 12)); else ip_mc_dec_group(in_dev, *(__be32 *)(mgid->raw + 12)); } rtnl_unlock(); } return (in_dev) ? 0 : -ENODEV; } static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa, struct id_table_entry *entry_b) { struct rdma_id_private *id_priv = list_first_entry( &entry_b->id_list, struct rdma_id_private, id_list_entry); int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if; struct sockaddr *sb = cma_dst_addr(id_priv); if (ifindex_a != ifindex_b) return (ifindex_a > ifindex_b) ? 1 : -1; if (sa->sa_family != sb->sa_family) return sa->sa_family - sb->sa_family; if (sa->sa_family == AF_INET && __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) { return memcmp(&((struct sockaddr_in *)sa)->sin_addr, &((struct sockaddr_in *)sb)->sin_addr, sizeof(((struct sockaddr_in *)sa)->sin_addr)); } if (sa->sa_family == AF_INET6 && __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) { return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, &((struct sockaddr_in6 *)sb)->sin6_addr); } return -1; } static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv) { struct rb_node **new, *parent = NULL; struct id_table_entry *this, *node; unsigned long flags; int result; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; spin_lock_irqsave(&id_table_lock, flags); new = &id_table.rb_node; while (*new) { this = container_of(*new, struct id_table_entry, rb_node); result = compare_netdev_and_ip( node_id_priv->id.route.addr.dev_addr.bound_dev_if, cma_dst_addr(node_id_priv), this); parent = *new; if (result < 0) new = &((*new)->rb_left); else if (result > 0) new = &((*new)->rb_right); else { list_add_tail(&node_id_priv->id_list_entry, &this->id_list); kfree(node); goto unlock; } } INIT_LIST_HEAD(&node->id_list); list_add_tail(&node_id_priv->id_list_entry, &node->id_list); rb_link_node(&node->rb_node, parent, new); rb_insert_color(&node->rb_node, &id_table); unlock: spin_unlock_irqrestore(&id_table_lock, flags); return 0; } static struct id_table_entry * node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa) { struct rb_node *node = root->rb_node; struct id_table_entry *data; int result; while (node) { data = container_of(node, struct id_table_entry, rb_node); result = compare_netdev_and_ip(ifindex, sa, data); if (result < 0) node = node->rb_left; else if (result > 0) node = node->rb_right; else return data; } return NULL; } static void cma_remove_id_from_tree(struct rdma_id_private *id_priv) { struct id_table_entry *data; unsigned long flags; spin_lock_irqsave(&id_table_lock, flags); if (list_empty(&id_priv->id_list_entry)) goto out; data = node_from_ndev_ip(&id_table, id_priv->id.route.addr.dev_addr.bound_dev_if, cma_dst_addr(id_priv)); if (!data) goto out; list_del_init(&id_priv->id_list_entry); if (list_empty(&data->id_list)) { rb_erase(&data->rb_node, &id_table); kfree(data); } out: spin_unlock_irqrestore(&id_table_lock, flags); } static void _cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { cma_dev_get(cma_dev); id_priv->cma_dev = cma_dev; id_priv->id.device = cma_dev->device; id_priv->id.route.addr.dev_addr.transport = rdma_node_get_transport(cma_dev->device->node_type); list_add_tail(&id_priv->device_item, &cma_dev->id_list); trace_cm_id_attach(id_priv, cma_dev->device); } static void cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { _cma_attach_to_dev(id_priv, cma_dev); id_priv->gid_type = cma_dev->default_gid_type[id_priv->id.port_num - rdma_start_port(cma_dev->device)]; } static void cma_release_dev(struct rdma_id_private *id_priv) { mutex_lock(&lock); list_del_init(&id_priv->device_item); cma_dev_put(id_priv->cma_dev); id_priv->cma_dev = NULL; id_priv->id.device = NULL; if (id_priv->id.route.addr.dev_addr.sgid_attr) { rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); id_priv->id.route.addr.dev_addr.sgid_attr = NULL; } mutex_unlock(&lock); } static inline unsigned short cma_family(struct rdma_id_private *id_priv) { return id_priv->id.route.addr.src_addr.ss_family; } static int cma_set_default_qkey(struct rdma_id_private *id_priv) { struct ib_sa_mcmember_rec rec; int ret = 0; switch (id_priv->id.ps) { case RDMA_PS_UDP: case RDMA_PS_IB: id_priv->qkey = RDMA_UDP_QKEY; break; case RDMA_PS_IPOIB: ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, &rec.mgid, &rec); if (!ret) id_priv->qkey = be32_to_cpu(rec.qkey); break; default: break; } return ret; } static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) { if (!qkey || (id_priv->qkey && (id_priv->qkey != qkey))) return -EINVAL; id_priv->qkey = qkey; return 0; } static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) { dev_addr->dev_type = ARPHRD_INFINIBAND; rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); } static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) { int ret; if (addr->sa_family != AF_IB) { ret = rdma_translate_ip(addr, dev_addr); } else { cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); ret = 0; } return ret; } static const struct ib_gid_attr * cma_validate_port(struct ib_device *device, u32 port, enum ib_gid_type gid_type, union ib_gid *gid, struct rdma_id_private *id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; const struct ib_gid_attr *sgid_attr = ERR_PTR(-ENODEV); int bound_if_index = dev_addr->bound_dev_if; int dev_type = dev_addr->dev_type; struct net_device *ndev = NULL; if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) goto out; if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) goto out; if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) goto out; /* * For drivers that do not associate more than one net device with * their gid tables, such as iWARP drivers, it is sufficient to * return the first table entry. * * Other driver classes might be included in the future. */ if (rdma_protocol_iwarp(device, port)) { sgid_attr = rdma_get_gid_attr(device, port, 0); if (IS_ERR(sgid_attr)) goto out; rcu_read_lock(); ndev = rcu_dereference(sgid_attr->ndev); if (!net_eq(dev_net(ndev), dev_addr->net) || ndev->ifindex != bound_if_index) sgid_attr = ERR_PTR(-ENODEV); rcu_read_unlock(); goto out; } if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { ndev = dev_get_by_index(dev_addr->net, bound_if_index); if (!ndev) goto out; } else { gid_type = IB_GID_TYPE_IB; } sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev); dev_put(ndev); out: return sgid_attr; } static void cma_bind_sgid_attr(struct rdma_id_private *id_priv, const struct ib_gid_attr *sgid_attr) { WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; } /** * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute * based on source ip address. * @id_priv: cm_id which should be bound to cma device * * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute * based on source IP address. It returns 0 on success or error code otherwise. * It is applicable to active and passive side cm_id. */ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; const struct ib_gid_attr *sgid_attr; union ib_gid gid, iboe_gid, *gidp; struct cma_device *cma_dev; enum ib_gid_type gid_type; int ret = -ENODEV; u32 port; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) return -EINVAL; rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &iboe_gid); memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof(gid)); mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) { rdma_for_each_port (cma_dev->device, port) { gidp = rdma_protocol_roce(cma_dev->device, port) ? &iboe_gid : &gid; gid_type = cma_dev->default_gid_type[port - 1]; sgid_attr = cma_validate_port(cma_dev->device, port, gid_type, gidp, id_priv); if (!IS_ERR(sgid_attr)) { id_priv->id.port_num = port; cma_bind_sgid_attr(id_priv, sgid_attr); cma_attach_to_dev(id_priv, cma_dev); ret = 0; goto out; } } } out: mutex_unlock(&lock); return ret; } /** * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute * @id_priv: cm id to bind to cma device * @listen_id_priv: listener cm id to match against * @req: Pointer to req structure containaining incoming * request information * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when * rdma device matches for listen_id and incoming request. It also verifies * that a GID table entry is present for the source address. * Returns 0 on success, or returns error code otherwise. */ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv, const struct rdma_id_private *listen_id_priv, struct cma_req_info *req) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; const struct ib_gid_attr *sgid_attr; enum ib_gid_type gid_type; union ib_gid gid; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) return -EINVAL; if (rdma_protocol_roce(req->device, req->port)) rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &gid); else memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof(gid)); gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; sgid_attr = cma_validate_port(req->device, req->port, gid_type, &gid, id_priv); if (IS_ERR(sgid_attr)) return PTR_ERR(sgid_attr); id_priv->id.port_num = req->port; cma_bind_sgid_attr(id_priv, sgid_attr); /* Need to acquire lock to protect against reader * of cma_dev->id_list such as cma_netdev_callback() and * cma_process_remove(). */ mutex_lock(&lock); cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); mutex_unlock(&lock); rdma_restrack_add(&id_priv->res); return 0; } static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, const struct rdma_id_private *listen_id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; const struct ib_gid_attr *sgid_attr; struct cma_device *cma_dev; enum ib_gid_type gid_type; int ret = -ENODEV; union ib_gid gid; u32 port; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) return -EINVAL; memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof(gid)); mutex_lock(&lock); cma_dev = listen_id_priv->cma_dev; port = listen_id_priv->id.port_num; gid_type = listen_id_priv->gid_type; sgid_attr = cma_validate_port(cma_dev->device, port, gid_type, &gid, id_priv); if (!IS_ERR(sgid_attr)) { id_priv->id.port_num = port; cma_bind_sgid_attr(id_priv, sgid_attr); ret = 0; goto out; } list_for_each_entry(cma_dev, &dev_list, list) { rdma_for_each_port (cma_dev->device, port) { if (listen_id_priv->cma_dev == cma_dev && listen_id_priv->id.port_num == port) continue; gid_type = cma_dev->default_gid_type[port - 1]; sgid_attr = cma_validate_port(cma_dev->device, port, gid_type, &gid, id_priv); if (!IS_ERR(sgid_attr)) { id_priv->id.port_num = port; cma_bind_sgid_attr(id_priv, sgid_attr); ret = 0; goto out; } } } out: if (!ret) { cma_attach_to_dev(id_priv, cma_dev); rdma_restrack_add(&id_priv->res); } mutex_unlock(&lock); return ret; } /* * Select the source IB device and address to reach the destination IB address. */ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) { struct cma_device *cma_dev, *cur_dev; struct sockaddr_ib *addr; union ib_gid gid, sgid, *dgid; unsigned int p; u16 pkey, index; enum ib_port_state port_state; int ret; int i; cma_dev = NULL; addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); dgid = (union ib_gid *) &addr->sib_addr; pkey = ntohs(addr->sib_pkey); mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { rdma_for_each_port (cur_dev->device, p) { if (!rdma_cap_af_ib(cur_dev->device, p)) continue; if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) continue; if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) continue; for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; ++i) { ret = rdma_query_gid(cur_dev->device, p, i, &gid); if (ret) continue; if (!memcmp(&gid, dgid, sizeof(gid))) { cma_dev = cur_dev; sgid = gid; id_priv->id.port_num = p; goto found; } if (!cma_dev && (gid.global.subnet_prefix == dgid->global.subnet_prefix) && port_state == IB_PORT_ACTIVE) { cma_dev = cur_dev; sgid = gid; id_priv->id.port_num = p; goto found; } } } } mutex_unlock(&lock); return -ENODEV; found: cma_attach_to_dev(id_priv, cma_dev); rdma_restrack_add(&id_priv->res); mutex_unlock(&lock); addr = (struct sockaddr_ib *)cma_src_addr(id_priv); memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); return 0; } static void cma_id_get(struct rdma_id_private *id_priv) { refcount_inc(&id_priv->refcount); } static void cma_id_put(struct rdma_id_private *id_priv) { if (refcount_dec_and_test(&id_priv->refcount)) complete(&id_priv->comp); } static struct rdma_id_private * __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, void *context, enum rdma_ucm_port_space ps, enum ib_qp_type qp_type, const struct rdma_id_private *parent) { struct rdma_id_private *id_priv; id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); if (!id_priv) return ERR_PTR(-ENOMEM); id_priv->state = RDMA_CM_IDLE; id_priv->id.context = context; id_priv->id.event_handler = event_handler; id_priv->id.ps = ps; id_priv->id.qp_type = qp_type; id_priv->tos_set = false; id_priv->timeout_set = false; id_priv->min_rnr_timer_set = false; id_priv->gid_type = IB_GID_TYPE_IB; spin_lock_init(&id_priv->lock); mutex_init(&id_priv->qp_mutex); init_completion(&id_priv->comp); refcount_set(&id_priv->refcount, 1); mutex_init(&id_priv->handler_mutex); INIT_LIST_HEAD(&id_priv->device_item); INIT_LIST_HEAD(&id_priv->id_list_entry); INIT_LIST_HEAD(&id_priv->listen_list); INIT_LIST_HEAD(&id_priv->mc_list); get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); id_priv->id.route.addr.dev_addr.net = get_net(net); id_priv->seq_num &= 0x00ffffff; rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); if (parent) rdma_restrack_parent_name(&id_priv->res, &parent->res); return id_priv; } struct rdma_cm_id * __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, void *context, enum rdma_ucm_port_space ps, enum ib_qp_type qp_type, const char *caller) { struct rdma_id_private *ret; ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL); if (IS_ERR(ret)) return ERR_CAST(ret); rdma_restrack_set_name(&ret->res, caller); return &ret->id; } EXPORT_SYMBOL(__rdma_create_kernel_id); struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, void *context, enum rdma_ucm_port_space ps, enum ib_qp_type qp_type) { struct rdma_id_private *ret; ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, ps, qp_type, NULL); if (IS_ERR(ret)) return ERR_CAST(ret); rdma_restrack_set_name(&ret->res, NULL); return &ret->id; } EXPORT_SYMBOL(rdma_create_user_id); static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; qp_attr.qp_state = IB_QPS_INIT; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) return ret; ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) return ret; qp_attr.qp_state = IB_QPS_RTR; ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); if (ret) return ret; qp_attr.qp_state = IB_QPS_RTS; qp_attr.sq_psn = 0; ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); return ret; } static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; qp_attr.qp_state = IB_QPS_INIT; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) return ret; return ib_modify_qp(qp, &qp_attr, qp_attr_mask); } int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr) { struct rdma_id_private *id_priv; struct ib_qp *qp; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (id->device != pd->device) { ret = -EINVAL; goto out_err; } qp_init_attr->port_num = id->port_num; qp = ib_create_qp(pd, qp_init_attr); if (IS_ERR(qp)) { ret = PTR_ERR(qp); goto out_err; } if (id->qp_type == IB_QPT_UD) ret = cma_init_ud_qp(id_priv, qp); else ret = cma_init_conn_qp(id_priv, qp); if (ret) goto out_destroy; id->qp = qp; id_priv->qp_num = qp->qp_num; id_priv->srq = (qp->srq != NULL); trace_cm_qp_create(id_priv, pd, qp_init_attr, 0); return 0; out_destroy: ib_destroy_qp(qp); out_err: trace_cm_qp_create(id_priv, pd, qp_init_attr, ret); return ret; } EXPORT_SYMBOL(rdma_create_qp); void rdma_destroy_qp(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; id_priv = container_of(id, struct rdma_id_private, id); trace_cm_qp_destroy(id_priv); mutex_lock(&id_priv->qp_mutex); ib_destroy_qp(id_priv->id.qp); id_priv->id.qp = NULL; mutex_unlock(&id_priv->qp_mutex); } EXPORT_SYMBOL(rdma_destroy_qp); static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { ret = 0; goto out; } /* Need to update QP attributes from default values. */ qp_attr.qp_state = IB_QPS_INIT; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) goto out; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); if (ret) goto out; qp_attr.qp_state = IB_QPS_RTR; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) goto out; BUG_ON(id_priv->cma_dev->device != id_priv->id.device); if (conn_param) qp_attr.max_dest_rd_atomic = conn_param->responder_resources; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); out: mutex_unlock(&id_priv->qp_mutex); return ret; } static int cma_modify_qp_rts(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { ret = 0; goto out; } qp_attr.qp_state = IB_QPS_RTS; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) goto out; if (conn_param) qp_attr.max_rd_atomic = conn_param->initiator_depth; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); out: mutex_unlock(&id_priv->qp_mutex); return ret; } static int cma_modify_qp_err(struct rdma_id_private *id_priv) { struct ib_qp_attr qp_attr; int ret; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { ret = 0; goto out; } qp_attr.qp_state = IB_QPS_ERR; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); out: mutex_unlock(&id_priv->qp_mutex); return ret; } static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; int ret; u16 pkey; if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) pkey = 0xffff; else pkey = ib_addr_get_pkey(dev_addr); ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, pkey, &qp_attr->pkey_index); if (ret) return ret; qp_attr->port_num = id_priv->id.port_num; *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; if (id_priv->id.qp_type == IB_QPT_UD) { ret = cma_set_default_qkey(id_priv); if (ret) return ret; qp_attr->qkey = id_priv->qkey; *qp_attr_mask |= IB_QP_QKEY; } else { qp_attr->qp_access_flags = 0; *qp_attr_mask |= IB_QP_ACCESS_FLAGS; } return 0; } int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct rdma_id_private *id_priv; int ret = 0; id_priv = container_of(id, struct rdma_id_private, id); if (rdma_cap_ib_cm(id->device, id->port_num)) { if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); else ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, qp_attr_mask); if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; } else if (rdma_cap_iw_cm(id->device, id->port_num)) { if (!id_priv->cm_id.iw) { qp_attr->qp_access_flags = 0; *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; } else ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, qp_attr_mask); qp_attr->port_num = id_priv->id.port_num; *qp_attr_mask |= IB_QP_PORT; } else { ret = -ENOSYS; } if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) qp_attr->timeout = id_priv->timeout; if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) qp_attr->min_rnr_timer = id_priv->min_rnr_timer; return ret; } EXPORT_SYMBOL(rdma_init_qp_attr); static inline bool cma_zero_addr(const struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); case AF_INET6: return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr); case AF_IB: return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr); default: return false; } } static inline bool cma_loopback_addr(const struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: return ipv4_is_loopback( ((struct sockaddr_in *)addr)->sin_addr.s_addr); case AF_INET6: return ipv6_addr_loopback( &((struct sockaddr_in6 *)addr)->sin6_addr); case AF_IB: return ib_addr_loopback( &((struct sockaddr_ib *)addr)->sib_addr); default: return false; } } static inline bool cma_any_addr(const struct sockaddr *addr) { return cma_zero_addr(addr) || cma_loopback_addr(addr); } static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst) { if (src->sa_family != dst->sa_family) return -1; switch (src->sa_family) { case AF_INET: return ((struct sockaddr_in *)src)->sin_addr.s_addr != ((struct sockaddr_in *)dst)->sin_addr.s_addr; case AF_INET6: { struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src; struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst; bool link_local; if (ipv6_addr_cmp(&src_addr6->sin6_addr, &dst_addr6->sin6_addr)) return 1; link_local = ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL; /* Link local must match their scope_ids */ return link_local ? (src_addr6->sin6_scope_id != dst_addr6->sin6_scope_id) : 0; } default: return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, &((struct sockaddr_ib *) dst)->sib_addr); } } static __be16 cma_port(const struct sockaddr *addr) { struct sockaddr_ib *sib; switch (addr->sa_family) { case AF_INET: return ((struct sockaddr_in *) addr)->sin_port; case AF_INET6: return ((struct sockaddr_in6 *) addr)->sin6_port; case AF_IB: sib = (struct sockaddr_ib *) addr; return htons((u16) (be64_to_cpu(sib->sib_sid) & be64_to_cpu(sib->sib_sid_mask))); default: return 0; } } static inline int cma_any_port(const struct sockaddr *addr) { return !cma_port(addr); } static void cma_save_ib_info(struct sockaddr *src_addr, struct sockaddr *dst_addr, const struct rdma_cm_id *listen_id, const struct sa_path_rec *path) { struct sockaddr_ib *listen_ib, *ib; listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; if (src_addr) { ib = (struct sockaddr_ib *)src_addr; ib->sib_family = AF_IB; if (path) { ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->sgid, 16); ib->sib_sid = path->service_id; ib->sib_scope_id = 0; } else { ib->sib_pkey = listen_ib->sib_pkey; ib->sib_flowinfo = listen_ib->sib_flowinfo; ib->sib_addr = listen_ib->sib_addr; ib->sib_sid = listen_ib->sib_sid; ib->sib_scope_id = listen_ib->sib_scope_id; } ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); } if (dst_addr) { ib = (struct sockaddr_ib *)dst_addr; ib->sib_family = AF_IB; if (path) { ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->dgid, 16); } } } static void cma_save_ip4_info(struct sockaddr_in *src_addr, struct sockaddr_in *dst_addr, struct cma_hdr *hdr, __be16 local_port) { if (src_addr) { *src_addr = (struct sockaddr_in) { .sin_family = AF_INET, .sin_addr.s_addr = hdr->dst_addr.ip4.addr, .sin_port = local_port, }; } if (dst_addr) { *dst_addr = (struct sockaddr_in) { .sin_family = AF_INET, .sin_addr.s_addr = hdr->src_addr.ip4.addr, .sin_port = hdr->port, }; } } static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, struct sockaddr_in6 *dst_addr, struct cma_hdr *hdr, __be16 local_port) { if (src_addr) { *src_addr = (struct sockaddr_in6) { .sin6_family = AF_INET6, .sin6_addr = hdr->dst_addr.ip6, .sin6_port = local_port, }; } if (dst_addr) { *dst_addr = (struct sockaddr_in6) { .sin6_family = AF_INET6, .sin6_addr = hdr->src_addr.ip6, .sin6_port = hdr->port, }; } } static u16 cma_port_from_service_id(__be64 service_id) { return (u16)be64_to_cpu(service_id); } static int cma_save_ip_info(struct sockaddr *src_addr, struct sockaddr *dst_addr, const struct ib_cm_event *ib_event, __be64 service_id) { struct cma_hdr *hdr; __be16 port; hdr = ib_event->private_data; if (hdr->cma_version != CMA_VERSION) return -EINVAL; port = htons(cma_port_from_service_id(service_id)); switch (cma_get_ip_ver(hdr)) { case 4: cma_save_ip4_info((struct sockaddr_in *)src_addr, (struct sockaddr_in *)dst_addr, hdr, port); break; case 6: cma_save_ip6_info((struct sockaddr_in6 *)src_addr, (struct sockaddr_in6 *)dst_addr, hdr, port); break; default: return -EAFNOSUPPORT; } return 0; } static int cma_save_net_info(struct sockaddr *src_addr, struct sockaddr *dst_addr, const struct rdma_cm_id *listen_id, const struct ib_cm_event *ib_event, sa_family_t sa_family, __be64 service_id) { if (sa_family == AF_IB) { if (ib_event->event == IB_CM_REQ_RECEIVED) cma_save_ib_info(src_addr, dst_addr, listen_id, ib_event->param.req_rcvd.primary_path); else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); return 0; } return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); } static int cma_save_req_info(const struct ib_cm_event *ib_event, struct cma_req_info *req) { const struct ib_cm_req_event_param *req_param = &ib_event->param.req_rcvd; const struct ib_cm_sidr_req_event_param *sidr_param = &ib_event->param.sidr_req_rcvd; switch (ib_event->event) { case IB_CM_REQ_RECEIVED: req->device = req_param->listen_id->device; req->port = req_param->port; memcpy(&req->local_gid, &req_param->primary_path->sgid, sizeof(req->local_gid)); req->has_gid = true; req->service_id = req_param->primary_path->service_id; req->pkey = be16_to_cpu(req_param->primary_path->pkey); if (req->pkey != req_param->bth_pkey) pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" "RDMA CMA: in the future this may cause the request to be dropped\n", req_param->bth_pkey, req->pkey); break; case IB_CM_SIDR_REQ_RECEIVED: req->device = sidr_param->listen_id->device; req->port = sidr_param->port; req->has_gid = false; req->service_id = sidr_param->service_id; req->pkey = sidr_param->pkey; if (req->pkey != sidr_param->bth_pkey) pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" "RDMA CMA: in the future this may cause the request to be dropped\n", sidr_param->bth_pkey, req->pkey); break; default: return -EINVAL; } return 0; } static bool validate_ipv4_net_dev(struct net_device *net_dev, const struct sockaddr_in *dst_addr, const struct sockaddr_in *src_addr) { __be32 daddr = dst_addr->sin_addr.s_addr, saddr = src_addr->sin_addr.s_addr; struct fib_result res; struct flowi4 fl4; int err; bool ret; if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || ipv4_is_loopback(saddr)) return false; memset(&fl4, 0, sizeof(fl4)); fl4.flowi4_oif = net_dev->ifindex; fl4.daddr = daddr; fl4.saddr = saddr; rcu_read_lock(); err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); ret = err == 0 && FIB_RES_DEV(res) == net_dev; rcu_read_unlock(); return ret; } static bool validate_ipv6_net_dev(struct net_device *net_dev, const struct sockaddr_in6 *dst_addr, const struct sockaddr_in6 *src_addr) { #if IS_ENABLED(CONFIG_IPV6) const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & IPV6_ADDR_LINKLOCAL; struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, &src_addr->sin6_addr, net_dev->ifindex, NULL, strict); bool ret; if (!rt) return false; ret = rt->rt6i_idev->dev == net_dev; ip6_rt_put(rt); return ret; #else return false; #endif } static bool validate_net_dev(struct net_device *net_dev, const struct sockaddr *daddr, const struct sockaddr *saddr) { const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; switch (daddr->sa_family) { case AF_INET: return saddr->sa_family == AF_INET && validate_ipv4_net_dev(net_dev, daddr4, saddr4); case AF_INET6: return saddr->sa_family == AF_INET6 && validate_ipv6_net_dev(net_dev, daddr6, saddr6); default: return false; } } static struct net_device * roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event) { const struct ib_gid_attr *sgid_attr = NULL; struct net_device *ndev; if (ib_event->event == IB_CM_REQ_RECEIVED) sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr; else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr; if (!sgid_attr) return NULL; rcu_read_lock(); ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr); if (IS_ERR(ndev)) ndev = NULL; else dev_hold(ndev); rcu_read_unlock(); return ndev; } static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event, struct cma_req_info *req) { struct sockaddr *listen_addr = (struct sockaddr *)&req->listen_addr_storage; struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; struct net_device *net_dev; const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; int err; err = cma_save_ip_info(listen_addr, src_addr, ib_event, req->service_id); if (err) return ERR_PTR(err); if (rdma_protocol_roce(req->device, req->port)) net_dev = roce_get_net_dev_by_cm_event(ib_event); else net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey, gid, listen_addr); if (!net_dev) return ERR_PTR(-ENODEV); return net_dev; } static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id) { return (be64_to_cpu(service_id) >> 16) & 0xffff; } static bool cma_match_private_data(struct rdma_id_private *id_priv, const struct cma_hdr *hdr) { struct sockaddr *addr = cma_src_addr(id_priv); __be32 ip4_addr; struct in6_addr ip6_addr; if (cma_any_addr(addr) && !id_priv->afonly) return true; switch (addr->sa_family) { case AF_INET: ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; if (cma_get_ip_ver(hdr) != 4) return false; if (!cma_any_addr(addr) && hdr->dst_addr.ip4.addr != ip4_addr) return false; break; case AF_INET6: ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; if (cma_get_ip_ver(hdr) != 6) return false; if (!cma_any_addr(addr) && memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) return false; break; case AF_IB: return true; default: return false; } return true; } static bool cma_protocol_roce(const struct rdma_cm_id *id) { struct ib_device *device = id->device; const u32 port_num = id->port_num ?: rdma_start_port(device); return rdma_protocol_roce(device, port_num); } static bool cma_is_req_ipv6_ll(const struct cma_req_info *req) { const struct sockaddr *daddr = (const struct sockaddr *)&req->listen_addr_storage; const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; /* Returns true if the req is for IPv6 link local */ return (daddr->sa_family == AF_INET6 && (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)); } static bool cma_match_net_dev(const struct rdma_cm_id *id, const struct net_device *net_dev, const struct cma_req_info *req) { const struct rdma_addr *addr = &id->route.addr; if (!net_dev) /* This request is an AF_IB request */ return (!id->port_num || id->port_num == req->port) && (addr->src_addr.ss_family == AF_IB); /* * If the request is not for IPv6 link local, allow matching * request to any netdevice of the one or multiport rdma device. */ if (!cma_is_req_ipv6_ll(req)) return true; /* * Net namespaces must match, and if the listner is listening * on a specific netdevice than netdevice must match as well. */ if (net_eq(dev_net(net_dev), addr->dev_addr.net) && (!!addr->dev_addr.bound_dev_if == (addr->dev_addr.bound_dev_if == net_dev->ifindex))) return true; else return false; } static struct rdma_id_private *cma_find_listener( const struct rdma_bind_list *bind_list, const struct ib_cm_id *cm_id, const struct ib_cm_event *ib_event, const struct cma_req_info *req, const struct net_device *net_dev) { struct rdma_id_private *id_priv, *id_priv_dev; lockdep_assert_held(&lock); if (!bind_list) return ERR_PTR(-EINVAL); hlist_for_each_entry(id_priv, &bind_list->owners, node) { if (cma_match_private_data(id_priv, ib_event->private_data)) { if (id_priv->id.device == cm_id->device && cma_match_net_dev(&id_priv->id, net_dev, req)) return id_priv; list_for_each_entry(id_priv_dev, &id_priv->listen_list, listen_item) { if (id_priv_dev->id.device == cm_id->device && cma_match_net_dev(&id_priv_dev->id, net_dev, req)) return id_priv_dev; } } } return ERR_PTR(-EINVAL); } static struct rdma_id_private * cma_ib_id_from_event(struct ib_cm_id *cm_id, const struct ib_cm_event *ib_event, struct cma_req_info *req, struct net_device **net_dev) { struct rdma_bind_list *bind_list; struct rdma_id_private *id_priv; int err; err = cma_save_req_info(ib_event, req); if (err) return ERR_PTR(err); *net_dev = cma_get_net_dev(ib_event, req); if (IS_ERR(*net_dev)) { if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { /* Assuming the protocol is AF_IB */ *net_dev = NULL; } else { return ERR_CAST(*net_dev); } } mutex_lock(&lock); /* * Net namespace might be getting deleted while route lookup, * cm_id lookup is in progress. Therefore, perform netdevice * validation, cm_id lookup under rcu lock. * RCU lock along with netdevice state check, synchronizes with * netdevice migrating to different net namespace and also avoids * case where net namespace doesn't get deleted while lookup is in * progress. * If the device state is not IFF_UP, its properties such as ifindex * and nd_net cannot be trusted to remain valid without rcu lock. * net/core/dev.c change_net_namespace() ensures to synchronize with * ongoing operations on net device after device is closed using * synchronize_net(). */ rcu_read_lock(); if (*net_dev) { /* * If netdevice is down, it is likely that it is administratively * down or it might be migrating to different namespace. * In that case avoid further processing, as the net namespace * or ifindex may change. */ if (((*net_dev)->flags & IFF_UP) == 0) { id_priv = ERR_PTR(-EHOSTUNREACH); goto err; } if (!validate_net_dev(*net_dev, (struct sockaddr *)&req->src_addr_storage, (struct sockaddr *)&req->listen_addr_storage)) { id_priv = ERR_PTR(-EHOSTUNREACH); goto err; } } bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, rdma_ps_from_service_id(req->service_id), cma_port_from_service_id(req->service_id)); id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); err: rcu_read_unlock(); mutex_unlock(&lock); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; } return id_priv; } static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) { return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); } static void cma_cancel_route(struct rdma_id_private *id_priv) { if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { if (id_priv->query) ib_sa_cancel_query(id_priv->query_id, id_priv->query); } } static void _cma_cancel_listens(struct rdma_id_private *id_priv) { struct rdma_id_private *dev_id_priv; lockdep_assert_held(&lock); /* * Remove from listen_any_list to prevent added devices from spawning * additional listen requests. */ list_del_init(&id_priv->listen_any_item); while (!list_empty(&id_priv->listen_list)) { dev_id_priv = list_first_entry(&id_priv->listen_list, struct rdma_id_private, listen_item); /* sync with device removal to avoid duplicate destruction */ list_del_init(&dev_id_priv->device_item); list_del_init(&dev_id_priv->listen_item); mutex_unlock(&lock); rdma_destroy_id(&dev_id_priv->id); mutex_lock(&lock); } } static void cma_cancel_listens(struct rdma_id_private *id_priv) { mutex_lock(&lock); _cma_cancel_listens(id_priv); mutex_unlock(&lock); } static void cma_cancel_operation(struct rdma_id_private *id_priv, enum rdma_cm_state state) { switch (state) { case RDMA_CM_ADDR_QUERY: /* * We can avoid doing the rdma_addr_cancel() based on state, * only RDMA_CM_ADDR_QUERY has a work that could still execute. * Notice that the addr_handler work could still be exiting * outside this state, however due to the interaction with the * handler_mutex the work is guaranteed not to touch id_priv * during exit. */ rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); break; case RDMA_CM_ROUTE_QUERY: cma_cancel_route(id_priv); break; case RDMA_CM_LISTEN: if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) cma_cancel_listens(id_priv); break; default: break; } } static void cma_release_port(struct rdma_id_private *id_priv) { struct rdma_bind_list *bind_list = id_priv->bind_list; struct net *net = id_priv->id.route.addr.dev_addr.net; if (!bind_list) return; mutex_lock(&lock); hlist_del(&id_priv->node); if (hlist_empty(&bind_list->owners)) { cma_ps_remove(net, bind_list->ps, bind_list->port); kfree(bind_list); } mutex_unlock(&lock); } static void destroy_mc(struct rdma_id_private *id_priv, struct cma_multicast *mc) { bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) ib_sa_free_multicast(mc->sa_mc); if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct net_device *ndev = NULL; if (dev_addr->bound_dev_if) ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); if (ndev && !send_only) { enum ib_gid_type gid_type; union ib_gid mgid; gid_type = id_priv->cma_dev->default_gid_type [id_priv->id.port_num - rdma_start_port( id_priv->cma_dev->device)]; cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, gid_type); cma_igmp_send(ndev, &mgid, false); } dev_put(ndev); cancel_work_sync(&mc->iboe_join.work); } kfree(mc); } static void cma_leave_mc_groups(struct rdma_id_private *id_priv) { struct cma_multicast *mc; while (!list_empty(&id_priv->mc_list)) { mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, list); list_del(&mc->list); destroy_mc(id_priv, mc); } } static void _destroy_id(struct rdma_id_private *id_priv, enum rdma_cm_state state) { cma_cancel_operation(id_priv, state); rdma_restrack_del(&id_priv->res); cma_remove_id_from_tree(id_priv); if (id_priv->cma_dev) { if (rdma_cap_ib_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.ib) ib_destroy_cm_id(id_priv->cm_id.ib); } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.iw) iw_destroy_cm_id(id_priv->cm_id.iw); } cma_leave_mc_groups(id_priv); cma_release_dev(id_priv); } cma_release_port(id_priv); cma_id_put(id_priv); wait_for_completion(&id_priv->comp); if (id_priv->internal_id) cma_id_put(id_priv->id.context); kfree(id_priv->id.route.path_rec); kfree(id_priv->id.route.path_rec_inbound); kfree(id_priv->id.route.path_rec_outbound); put_net(id_priv->id.route.addr.dev_addr.net); kfree(id_priv); } /* * destroy an ID from within the handler_mutex. This ensures that no other * handlers can start running concurrently. */ static void destroy_id_handler_unlock(struct rdma_id_private *id_priv) __releases(&idprv->handler_mutex) { enum rdma_cm_state state; unsigned long flags; trace_cm_id_destroy(id_priv); /* * Setting the state to destroyed under the handler mutex provides a * fence against calling handler callbacks. If this is invoked due to * the failure of a handler callback then it guarentees that no future * handlers will be called. */ lockdep_assert_held(&id_priv->handler_mutex); spin_lock_irqsave(&id_priv->lock, flags); state = id_priv->state; id_priv->state = RDMA_CM_DESTROYING; spin_unlock_irqrestore(&id_priv->lock, flags); mutex_unlock(&id_priv->handler_mutex); _destroy_id(id_priv, state); } void rdma_destroy_id(struct rdma_cm_id *id) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->handler_mutex); destroy_id_handler_unlock(id_priv); } EXPORT_SYMBOL(rdma_destroy_id); static int cma_rep_recv(struct rdma_id_private *id_priv) { int ret; ret = cma_modify_qp_rtr(id_priv, NULL); if (ret) goto reject; ret = cma_modify_qp_rts(id_priv, NULL); if (ret) goto reject; trace_cm_send_rtu(id_priv); ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); if (ret) goto reject; return 0; reject: pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret); cma_modify_qp_err(id_priv); trace_cm_send_rej(id_priv); ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); return ret; } static void cma_set_rep_event_data(struct rdma_cm_event *event, const struct ib_cm_rep_event_param *rep_data, void *private_data) { event->param.conn.private_data = private_data; event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; event->param.conn.responder_resources = rep_data->responder_resources; event->param.conn.initiator_depth = rep_data->initiator_depth; event->param.conn.flow_control = rep_data->flow_control; event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; event->param.conn.srq = rep_data->srq; event->param.conn.qp_num = rep_data->remote_qpn; event->ece.vendor_id = rep_data->ece.vendor_id; event->ece.attr_mod = rep_data->ece.attr_mod; } static int cma_cm_event_handler(struct rdma_id_private *id_priv, struct rdma_cm_event *event) { int ret; lockdep_assert_held(&id_priv->handler_mutex); trace_cm_event_handler(id_priv, event); ret = id_priv->id.event_handler(&id_priv->id, event); trace_cm_event_done(id_priv, event, ret); return ret; } static int cma_ib_handler(struct ib_cm_id *cm_id, const struct ib_cm_event *ib_event) { struct rdma_id_private *id_priv = cm_id->context; struct rdma_cm_event event = {}; enum rdma_cm_state state; int ret; mutex_lock(&id_priv->handler_mutex); state = READ_ONCE(id_priv->state); if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && state != RDMA_CM_CONNECT) || (ib_event->event == IB_CM_TIMEWAIT_EXIT && state != RDMA_CM_DISCONNECT)) goto out; switch (ib_event->event) { case IB_CM_REQ_ERROR: case IB_CM_REP_ERROR: event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = -ETIMEDOUT; break; case IB_CM_REP_RECEIVED: if (state == RDMA_CM_CONNECT && (id_priv->id.qp_type != IB_QPT_UD)) { trace_cm_send_mra(id_priv); ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); } if (id_priv->id.qp) { event.status = cma_rep_recv(id_priv); event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : RDMA_CM_EVENT_ESTABLISHED; } else { event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; } cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, ib_event->private_data); break; case IB_CM_RTU_RECEIVED: case IB_CM_USER_ESTABLISHED: event.event = RDMA_CM_EVENT_ESTABLISHED; break; case IB_CM_DREQ_ERROR: event.status = -ETIMEDOUT; fallthrough; case IB_CM_DREQ_RECEIVED: case IB_CM_DREP_RECEIVED: if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_DISCONNECT)) goto out; event.event = RDMA_CM_EVENT_DISCONNECTED; break; case IB_CM_TIMEWAIT_EXIT: event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; break; case IB_CM_MRA_RECEIVED: /* ignore event */ goto out; case IB_CM_REJ_RECEIVED: pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id, ib_event->param.rej_rcvd.reason)); cma_modify_qp_err(id_priv); event.status = ib_event->param.rej_rcvd.reason; event.event = RDMA_CM_EVENT_REJECTED; event.param.conn.private_data = ib_event->private_data; event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; break; default: pr_err("RDMA CMA: unexpected IB CM event: %d\n", ib_event->event); goto out; } ret = cma_cm_event_handler(id_priv, &event); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.ib = NULL; destroy_id_handler_unlock(id_priv); return ret; } out: mutex_unlock(&id_priv->handler_mutex); return 0; } static struct rdma_id_private * cma_ib_new_conn_id(const struct rdma_cm_id *listen_id, const struct ib_cm_event *ib_event, struct net_device *net_dev) { struct rdma_id_private *listen_id_priv; struct rdma_id_private *id_priv; struct rdma_cm_id *id; struct rdma_route *rt; const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; const __be64 service_id = ib_event->param.req_rcvd.primary_path->service_id; int ret; listen_id_priv = container_of(listen_id, struct rdma_id_private, id); id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, listen_id->event_handler, listen_id->context, listen_id->ps, ib_event->param.req_rcvd.qp_type, listen_id_priv); if (IS_ERR(id_priv)) return NULL; id = &id_priv->id; if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, (struct sockaddr *)&id->route.addr.dst_addr, listen_id, ib_event, ss_family, service_id)) goto err; rt = &id->route; rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; rt->path_rec = kmalloc_array(rt->num_pri_alt_paths, sizeof(*rt->path_rec), GFP_KERNEL); if (!rt->path_rec) goto err; rt->path_rec[0] = *path; if (rt->num_pri_alt_paths == 2) rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; if (net_dev) { rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev); } else { if (!cma_protocol_roce(listen_id) && cma_any_addr(cma_src_addr(id_priv))) { rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); } else if (!cma_any_addr(cma_src_addr(id_priv))) { ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); if (ret) goto err; } } rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); id_priv->state = RDMA_CM_CONNECT; return id_priv; err: rdma_destroy_id(id); return NULL; } static struct rdma_id_private * cma_ib_new_udp_id(const struct rdma_cm_id *listen_id, const struct ib_cm_event *ib_event, struct net_device *net_dev) { const struct rdma_id_private *listen_id_priv; struct rdma_id_private *id_priv; struct rdma_cm_id *id; const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; struct net *net = listen_id->route.addr.dev_addr.net; int ret; listen_id_priv = container_of(listen_id, struct rdma_id_private, id); id_priv = __rdma_create_id(net, listen_id->event_handler, listen_id->context, listen_id->ps, IB_QPT_UD, listen_id_priv); if (IS_ERR(id_priv)) return NULL; id = &id_priv->id; if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, (struct sockaddr *)&id->route.addr.dst_addr, listen_id, ib_event, ss_family, ib_event->param.sidr_req_rcvd.service_id)) goto err; if (net_dev) { rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev); } else { if (!cma_any_addr(cma_src_addr(id_priv))) { ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr); if (ret) goto err; } } id_priv->state = RDMA_CM_CONNECT; return id_priv; err: rdma_destroy_id(id); return NULL; } static void cma_set_req_event_data(struct rdma_cm_event *event, const struct ib_cm_req_event_param *req_data, void *private_data, int offset) { event->param.conn.private_data = private_data + offset; event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; event->param.conn.responder_resources = req_data->responder_resources; event->param.conn.initiator_depth = req_data->initiator_depth; event->param.conn.flow_control = req_data->flow_control; event->param.conn.retry_count = req_data->retry_count; event->param.conn.rnr_retry_count = req_data->rnr_retry_count; event->param.conn.srq = req_data->srq; event->param.conn.qp_num = req_data->remote_qpn; event->ece.vendor_id = req_data->ece.vendor_id; event->ece.attr_mod = req_data->ece.attr_mod; } static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id, const struct ib_cm_event *ib_event) { return (((ib_event->event == IB_CM_REQ_RECEIVED) && (ib_event->param.req_rcvd.qp_type == id->qp_type)) || ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && (id->qp_type == IB_QPT_UD)) || (!id->qp_type)); } static int cma_ib_req_handler(struct ib_cm_id *cm_id, const struct ib_cm_event *ib_event) { struct rdma_id_private *listen_id, *conn_id = NULL; struct rdma_cm_event event = {}; struct cma_req_info req = {}; struct net_device *net_dev; u8 offset; int ret; listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev); if (IS_ERR(listen_id)) return PTR_ERR(listen_id); trace_cm_req_handler(listen_id, ib_event->event); if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) { ret = -EINVAL; goto net_dev_put; } mutex_lock(&listen_id->handler_mutex); if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) { ret = -ECONNABORTED; goto err_unlock; } offset = cma_user_data_offset(listen_id); event.event = RDMA_CM_EVENT_CONNECT_REQUEST; if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev); event.param.ud.private_data = ib_event->private_data + offset; event.param.ud.private_data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; } else { conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev); cma_set_req_event_data(&event, &ib_event->param.req_rcvd, ib_event->private_data, offset); } if (!conn_id) { ret = -ENOMEM; goto err_unlock; } mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); ret = cma_ib_acquire_dev(conn_id, listen_id, &req); if (ret) { destroy_id_handler_unlock(conn_id); goto err_unlock; } conn_id->cm_id.ib = cm_id; cm_id->context = conn_id; cm_id->cm_handler = cma_ib_handler; ret = cma_cm_event_handler(conn_id, &event); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ conn_id->cm_id.ib = NULL; mutex_unlock(&listen_id->handler_mutex); destroy_id_handler_unlock(conn_id); goto net_dev_put; } if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && conn_id->id.qp_type != IB_QPT_UD) { trace_cm_send_mra(cm_id->context); ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); } mutex_unlock(&conn_id->handler_mutex); err_unlock: mutex_unlock(&listen_id->handler_mutex); net_dev_put: dev_put(net_dev); return ret; } __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) { if (addr->sa_family == AF_IB) return ((struct sockaddr_ib *) addr)->sib_sid; return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); } EXPORT_SYMBOL(rdma_get_service_id); void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid, union ib_gid *dgid) { struct rdma_addr *addr = &cm_id->route.addr; if (!cm_id->device) { if (sgid) memset(sgid, 0, sizeof(*sgid)); if (dgid) memset(dgid, 0, sizeof(*dgid)); return; } if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) { if (sgid) rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid); if (dgid) rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid); } else { if (sgid) rdma_addr_get_sgid(&addr->dev_addr, sgid); if (dgid) rdma_addr_get_dgid(&addr->dev_addr, dgid); } } EXPORT_SYMBOL(rdma_read_gids); static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) { struct rdma_id_private *id_priv = iw_id->context; struct rdma_cm_event event = {}; int ret = 0; struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) goto out; switch (iw_event->event) { case IW_CM_EVENT_CLOSE: event.event = RDMA_CM_EVENT_DISCONNECTED; break; case IW_CM_EVENT_CONNECT_REPLY: memcpy(cma_src_addr(id_priv), laddr, rdma_addr_size(laddr)); memcpy(cma_dst_addr(id_priv), raddr, rdma_addr_size(raddr)); switch (iw_event->status) { case 0: event.event = RDMA_CM_EVENT_ESTABLISHED; event.param.conn.initiator_depth = iw_event->ird; event.param.conn.responder_resources = iw_event->ord; break; case -ECONNRESET: case -ECONNREFUSED: event.event = RDMA_CM_EVENT_REJECTED; break; case -ETIMEDOUT: event.event = RDMA_CM_EVENT_UNREACHABLE; break; default: event.event = RDMA_CM_EVENT_CONNECT_ERROR; break; } break; case IW_CM_EVENT_ESTABLISHED: event.event = RDMA_CM_EVENT_ESTABLISHED; event.param.conn.initiator_depth = iw_event->ird; event.param.conn.responder_resources = iw_event->ord; break; default: goto out; } event.status = iw_event->status; event.param.conn.private_data = iw_event->private_data; event.param.conn.private_data_len = iw_event->private_data_len; ret = cma_cm_event_handler(id_priv, &event); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.iw = NULL; destroy_id_handler_unlock(id_priv); return ret; } out: mutex_unlock(&id_priv->handler_mutex); return ret; } static int iw_conn_req_handler(struct iw_cm_id *cm_id, struct iw_cm_event *iw_event) { struct rdma_id_private *listen_id, *conn_id; struct rdma_cm_event event = {}; int ret = -ECONNABORTED; struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; event.event = RDMA_CM_EVENT_CONNECT_REQUEST; event.param.conn.private_data = iw_event->private_data; event.param.conn.private_data_len = iw_event->private_data_len; event.param.conn.initiator_depth = iw_event->ird; event.param.conn.responder_resources = iw_event->ord; listen_id = cm_id->context; mutex_lock(&listen_id->handler_mutex); if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) goto out; /* Create a new RDMA id for the new IW CM ID */ conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, listen_id->id.event_handler, listen_id->id.context, RDMA_PS_TCP, IB_QPT_RC, listen_id); if (IS_ERR(conn_id)) { ret = -ENOMEM; goto out; } mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); conn_id->state = RDMA_CM_CONNECT; ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); if (ret) { mutex_unlock(&listen_id->handler_mutex); destroy_id_handler_unlock(conn_id); return ret; } ret = cma_iw_acquire_dev(conn_id, listen_id); if (ret) { mutex_unlock(&listen_id->handler_mutex); destroy_id_handler_unlock(conn_id); return ret; } conn_id->cm_id.iw = cm_id; cm_id->context = conn_id; cm_id->cm_handler = cma_iw_handler; memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); ret = cma_cm_event_handler(conn_id, &event); if (ret) { /* User wants to destroy the CM ID */ conn_id->cm_id.iw = NULL; mutex_unlock(&listen_id->handler_mutex); destroy_id_handler_unlock(conn_id); return ret; } mutex_unlock(&conn_id->handler_mutex); out: mutex_unlock(&listen_id->handler_mutex); return ret; } static int cma_ib_listen(struct rdma_id_private *id_priv) { struct sockaddr *addr; struct ib_cm_id *id; __be64 svc_id; addr = cma_src_addr(id_priv); svc_id = rdma_get_service_id(&id_priv->id, addr); id = ib_cm_insert_listen(id_priv->id.device, cma_ib_req_handler, svc_id); if (IS_ERR(id)) return PTR_ERR(id); id_priv->cm_id.ib = id; return 0; } static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) { int ret; struct iw_cm_id *id; id = iw_create_cm_id(id_priv->id.device, iw_conn_req_handler, id_priv); if (IS_ERR(id)) return PTR_ERR(id); mutex_lock(&id_priv->qp_mutex); id->tos = id_priv->tos; id->tos_set = id_priv->tos_set; mutex_unlock(&id_priv->qp_mutex); id->afonly = id_priv->afonly; id_priv->cm_id.iw = id; memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), rdma_addr_size(cma_src_addr(id_priv))); ret = iw_cm_listen(id_priv->cm_id.iw, backlog); if (ret) { iw_destroy_cm_id(id_priv->cm_id.iw); id_priv->cm_id.iw = NULL; } return ret; } static int cma_listen_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) { struct rdma_id_private *id_priv = id->context; /* Listening IDs are always destroyed on removal */ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) return -1; id->context = id_priv->id.context; id->event_handler = id_priv->id.event_handler; trace_cm_event_handler(id_priv, event); return id_priv->id.event_handler(id, event); } static int cma_listen_on_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev, struct rdma_id_private **to_destroy) { struct rdma_id_private *dev_id_priv; struct net *net = id_priv->id.route.addr.dev_addr.net; int ret; lockdep_assert_held(&lock); *to_destroy = NULL; if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return 0; dev_id_priv = __rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, id_priv->id.qp_type, id_priv); if (IS_ERR(dev_id_priv)) return PTR_ERR(dev_id_priv); dev_id_priv->state = RDMA_CM_ADDR_BOUND; memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), rdma_addr_size(cma_src_addr(id_priv))); _cma_attach_to_dev(dev_id_priv, cma_dev); rdma_restrack_add(&dev_id_priv->res); cma_id_get(id_priv); dev_id_priv->internal_id = 1; dev_id_priv->afonly = id_priv->afonly; mutex_lock(&id_priv->qp_mutex); dev_id_priv->tos_set = id_priv->tos_set; dev_id_priv->tos = id_priv->tos; mutex_unlock(&id_priv->qp_mutex); ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); if (ret) goto err_listen; list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list); return 0; err_listen: /* Caller must destroy this after releasing lock */ *to_destroy = dev_id_priv; dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); return ret; } static int cma_listen_on_all(struct rdma_id_private *id_priv) { struct rdma_id_private *to_destroy; struct cma_device *cma_dev; int ret; mutex_lock(&lock); list_add_tail(&id_priv->listen_any_item, &listen_any_list); list_for_each_entry(cma_dev, &dev_list, list) { ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); if (ret) { /* Prevent racing with cma_process_remove() */ if (to_destroy) list_del_init(&to_destroy->device_item); goto err_listen; } } mutex_unlock(&lock); return 0; err_listen: _cma_cancel_listens(id_priv); mutex_unlock(&lock); if (to_destroy) rdma_destroy_id(&to_destroy->id); return ret; } void rdma_set_service_type(struct rdma_cm_id *id, int tos) { struct rdma_id_private *id_priv; id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->qp_mutex); id_priv->tos = (u8) tos; id_priv->tos_set = true; mutex_unlock(&id_priv->qp_mutex); } EXPORT_SYMBOL(rdma_set_service_type); /** * rdma_set_ack_timeout() - Set the ack timeout of QP associated * with a connection identifier. * @id: Communication identifier to associated with service type. * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec. * * This function should be called before rdma_connect() on active side, * and on passive side before rdma_accept(). It is applicable to primary * path only. The timeout will affect the local side of the QP, it is not * negotiated with remote side and zero disables the timer. In case it is * set before rdma_resolve_route, the value will also be used to determine * PacketLifeTime for RoCE. * * Return: 0 for success */ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) { struct rdma_id_private *id_priv; if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) return -EINVAL; id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->qp_mutex); id_priv->timeout = timeout; id_priv->timeout_set = true; mutex_unlock(&id_priv->qp_mutex); return 0; } EXPORT_SYMBOL(rdma_set_ack_timeout); /** * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the * QP associated with a connection identifier. * @id: Communication identifier to associated with service type. * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK * Timer Field" in the IBTA specification. * * This function should be called before rdma_connect() on active * side, and on passive side before rdma_accept(). The timer value * will be associated with the local QP. When it receives a send it is * not read to handle, typically if the receive queue is empty, an RNR * Retry NAK is returned to the requester with the min_rnr_timer * encoded. The requester will then wait at least the time specified * in the NAK before retrying. The default is zero, which translates * to a minimum RNR Timer value of 655 ms. * * Return: 0 for success */ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer) { struct rdma_id_private *id_priv; /* It is a five-bit value */ if (min_rnr_timer & 0xe0) return -EINVAL; if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) return -EINVAL; id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->qp_mutex); id_priv->min_rnr_timer = min_rnr_timer; id_priv->min_rnr_timer_set = true; mutex_unlock(&id_priv->qp_mutex); return 0; } EXPORT_SYMBOL(rdma_set_min_rnr_timer); static int route_set_path_rec_inbound(struct cma_work *work, struct sa_path_rec *path_rec) { struct rdma_route *route = &work->id->id.route; if (!route->path_rec_inbound) { route->path_rec_inbound = kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL); if (!route->path_rec_inbound) return -ENOMEM; } *route->path_rec_inbound = *path_rec; return 0; } static int route_set_path_rec_outbound(struct cma_work *work, struct sa_path_rec *path_rec) { struct rdma_route *route = &work->id->id.route; if (!route->path_rec_outbound) { route->path_rec_outbound = kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL); if (!route->path_rec_outbound) return -ENOMEM; } *route->path_rec_outbound = *path_rec; return 0; } static void cma_query_handler(int status, struct sa_path_rec *path_rec, unsigned int num_prs, void *context) { struct cma_work *work = context; struct rdma_route *route; int i; route = &work->id->id.route; if (status) goto fail; for (i = 0; i < num_prs; i++) { if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP)) *route->path_rec = path_rec[i]; else if (path_rec[i].flags & IB_PATH_INBOUND) status = route_set_path_rec_inbound(work, &path_rec[i]); else if (path_rec[i].flags & IB_PATH_OUTBOUND) status = route_set_path_rec_outbound(work, &path_rec[i]); else status = -EINVAL; if (status) goto fail; } route->num_pri_alt_paths = 1; queue_work(cma_wq, &work->work); return; fail: work->old_state = RDMA_CM_ROUTE_QUERY; work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; work->event.status = status; pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n", status); queue_work(cma_wq, &work->work); } static int cma_query_ib_route(struct rdma_id_private *id_priv, unsigned long timeout_ms, struct cma_work *work) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct sa_path_rec path_rec; ib_sa_comp_mask comp_mask; struct sockaddr_in6 *sin6; struct sockaddr_ib *sib; memset(&path_rec, 0, sizeof path_rec); if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num)) path_rec.rec_type = SA_PATH_REC_TYPE_OPA; else path_rec.rec_type = SA_PATH_REC_TYPE_IB; rdma_addr_get_sgid(dev_addr, &path_rec.sgid); rdma_addr_get_dgid(dev_addr, &path_rec.dgid); path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); path_rec.numb_path = 1; path_rec.reversible = 1; path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; switch (cma_family(id_priv)) { case AF_INET: path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); comp_mask |= IB_SA_PATH_REC_QOS_CLASS; break; case AF_INET6: sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; break; case AF_IB: sib = (struct sockaddr_ib *) cma_src_addr(id_priv); path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; break; } id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, id_priv->id.port_num, &path_rec, comp_mask, timeout_ms, GFP_KERNEL, cma_query_handler, work, &id_priv->query); return (id_priv->query_id < 0) ? id_priv->query_id : 0; } static void cma_iboe_join_work_handler(struct work_struct *work) { struct cma_multicast *mc = container_of(work, struct cma_multicast, iboe_join.work); struct rdma_cm_event *event = &mc->iboe_join.event; struct rdma_id_private *id_priv = mc->id_priv; int ret; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) goto out_unlock; ret = cma_cm_event_handler(id_priv, event); WARN_ON(ret); out_unlock: mutex_unlock(&id_priv->handler_mutex); if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) rdma_destroy_ah_attr(&event->param.ud.ah_attr); } static void cma_work_handler(struct work_struct *_work) { struct cma_work *work = container_of(_work, struct cma_work, work); struct rdma_id_private *id_priv = work->id; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) goto out_unlock; if (work->old_state != 0 || work->new_state != 0) { if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) goto out_unlock; } if (cma_cm_event_handler(id_priv, &work->event)) { cma_id_put(id_priv); destroy_id_handler_unlock(id_priv); goto out_free; } out_unlock: mutex_unlock(&id_priv->handler_mutex); cma_id_put(id_priv); out_free: if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) rdma_destroy_ah_attr(&work->event.param.ud.ah_attr); kfree(work); } static void cma_init_resolve_route_work(struct cma_work *work, struct rdma_id_private *id_priv) { work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ROUTE_QUERY; work->new_state = RDMA_CM_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; } static void enqueue_resolve_addr_work(struct cma_work *work, struct rdma_id_private *id_priv) { /* Balances with cma_id_put() in cma_work_handler */ cma_id_get(id_priv); work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ADDR_QUERY; work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; queue_work(cma_wq, &work->work); } static int cma_resolve_ib_route(struct rdma_id_private *id_priv, unsigned long timeout_ms) { struct rdma_route *route = &id_priv->id.route; struct cma_work *work; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; cma_init_resolve_route_work(work, id_priv); if (!route->path_rec) route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); if (!route->path_rec) { ret = -ENOMEM; goto err1; } ret = cma_query_ib_route(id_priv, timeout_ms, work); if (ret) goto err2; return 0; err2: kfree(route->path_rec); route->path_rec = NULL; err1: kfree(work); return ret; } static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, unsigned long supported_gids, enum ib_gid_type default_gid) { if ((network_type == RDMA_NETWORK_IPV4 || network_type == RDMA_NETWORK_IPV6) && test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) return IB_GID_TYPE_ROCE_UDP_ENCAP; return default_gid; } /* * cma_iboe_set_path_rec_l2_fields() is helper function which sets * path record type based on GID type. * It also sets up other L2 fields which includes destination mac address * netdev ifindex, of the path record. * It returns the netdev of the bound interface for this path record entry. */ static struct net_device * cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv) { struct rdma_route *route = &id_priv->id.route; enum ib_gid_type gid_type = IB_GID_TYPE_ROCE; struct rdma_addr *addr = &route->addr; unsigned long supported_gids; struct net_device *ndev; if (!addr->dev_addr.bound_dev_if) return NULL; ndev = dev_get_by_index(addr->dev_addr.net, addr->dev_addr.bound_dev_if); if (!ndev) return NULL; supported_gids = roce_gid_type_mask_support(id_priv->id.device, id_priv->id.port_num); gid_type = cma_route_gid_type(addr->dev_addr.network, supported_gids, id_priv->gid_type); /* Use the hint from IP Stack to select GID Type */ if (gid_type < ib_network_to_gid_type(addr->dev_addr.network)) gid_type = ib_network_to_gid_type(addr->dev_addr.network); route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type); route->path_rec->roce.route_resolved = true; sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr); return ndev; } int rdma_set_ib_path(struct rdma_cm_id *id, struct sa_path_rec *path_rec) { struct rdma_id_private *id_priv; struct net_device *ndev; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_RESOLVED)) return -EINVAL; id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec), GFP_KERNEL); if (!id->route.path_rec) { ret = -ENOMEM; goto err; } if (rdma_protocol_roce(id->device, id->port_num)) { ndev = cma_iboe_set_path_rec_l2_fields(id_priv); if (!ndev) { ret = -ENODEV; goto err_free; } dev_put(ndev); } id->route.num_pri_alt_paths = 1; return 0; err_free: kfree(id->route.path_rec); id->route.path_rec = NULL; err: cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); return ret; } EXPORT_SYMBOL(rdma_set_ib_path); static int cma_resolve_iw_route(struct rdma_id_private *id_priv) { struct cma_work *work; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; cma_init_resolve_route_work(work, id_priv); queue_work(cma_wq, &work->work); return 0; } static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio) { struct net_device *dev; dev = vlan_dev_real_dev(vlan_ndev); if (dev->num_tc) return netdev_get_prio_tc_map(dev, prio); return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } struct iboe_prio_tc_map { int input_prio; int output_tc; bool found; }; static int get_lower_vlan_dev_tc(struct net_device *dev, struct netdev_nested_priv *priv) { struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data; if (is_vlan_dev(dev)) map->output_tc = get_vlan_ndev_tc(dev, map->input_prio); else if (dev->num_tc) map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio); else map->output_tc = 0; /* We are interested only in first level VLAN device, so always * return 1 to stop iterating over next level devices. */ map->found = true; return 1; } static int iboe_tos_to_sl(struct net_device *ndev, int tos) { struct iboe_prio_tc_map prio_tc_map = {}; int prio = rt_tos2priority(tos); struct netdev_nested_priv priv; /* If VLAN device, get it directly from the VLAN netdev */ if (is_vlan_dev(ndev)) return get_vlan_ndev_tc(ndev, prio); prio_tc_map.input_prio = prio; priv.data = (void *)&prio_tc_map; rcu_read_lock(); netdev_walk_all_lower_dev_rcu(ndev, get_lower_vlan_dev_tc, &priv); rcu_read_unlock(); /* If map is found from lower device, use it; Otherwise * continue with the current netdevice to get priority to tc map. */ if (prio_tc_map.found) return prio_tc_map.output_tc; else if (ndev->num_tc) return netdev_get_prio_tc_map(ndev, prio); else return 0; } static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv) { struct sockaddr_in6 *addr6; u16 dport, sport; u32 hash, fl; addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv); fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK; if ((cma_family(id_priv) != AF_INET6) || !fl) { dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv))); sport = be16_to_cpu(cma_port(cma_src_addr(id_priv))); hash = (u32)sport * 31 + dport; fl = hash & IB_GRH_FLOWLABEL_MASK; } return cpu_to_be32(fl); } static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) { struct rdma_route *route = &id_priv->id.route; struct rdma_addr *addr = &route->addr; struct cma_work *work; int ret; struct net_device *ndev; u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - rdma_start_port(id_priv->cma_dev->device)]; u8 tos; mutex_lock(&id_priv->qp_mutex); tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; mutex_unlock(&id_priv->qp_mutex); work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); if (!route->path_rec) { ret = -ENOMEM; goto err1; } route->num_pri_alt_paths = 1; ndev = cma_iboe_set_path_rec_l2_fields(id_priv); if (!ndev) { ret = -ENODEV; goto err2; } rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &route->path_rec->sgid); rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, &route->path_rec->dgid); if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) /* TODO: get the hoplimit from the inet/inet6 device */ route->path_rec->hop_limit = addr->dev_addr.hoplimit; else route->path_rec->hop_limit = 1; route->path_rec->reversible = 1; route->path_rec->pkey = cpu_to_be16(0xffff); route->path_rec->mtu_selector = IB_SA_EQ; route->path_rec->sl = iboe_tos_to_sl(ndev, tos); route->path_rec->traffic_class = tos; route->path_rec->mtu = iboe_get_mtu(ndev->mtu); route->path_rec->rate_selector = IB_SA_EQ; route->path_rec->rate = IB_RATE_PORT_CURRENT; dev_put(ndev); route->path_rec->packet_life_time_selector = IB_SA_EQ; /* In case ACK timeout is set, use this value to calculate * PacketLifeTime. As per IBTA 12.7.34, * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay). * Assuming a negligible local ACK delay, we can use * PacketLifeTime = local ACK timeout/2 * as a reasonable approximation for RoCE networks. */ mutex_lock(&id_priv->qp_mutex); if (id_priv->timeout_set && id_priv->timeout) route->path_rec->packet_life_time = id_priv->timeout - 1; else route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; mutex_unlock(&id_priv->qp_mutex); if (!route->path_rec->mtu) { ret = -EINVAL; goto err2; } if (rdma_protocol_roce_udp_encap(id_priv->id.device, id_priv->id.port_num)) route->path_rec->flow_label = cma_get_roce_udp_flow_label(id_priv); cma_init_resolve_route_work(work, id_priv); queue_work(cma_wq, &work->work); return 0; err2: kfree(route->path_rec); route->path_rec = NULL; route->num_pri_alt_paths = 0; err1: kfree(work); return ret; } int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) { struct rdma_id_private *id_priv; int ret; if (!timeout_ms) return -EINVAL; id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) return -EINVAL; cma_id_get(id_priv); if (rdma_cap_ib_sa(id->device, id->port_num)) ret = cma_resolve_ib_route(id_priv, timeout_ms); else if (rdma_protocol_roce(id->device, id->port_num)) { ret = cma_resolve_iboe_route(id_priv); if (!ret) cma_add_id_to_tree(id_priv); } else if (rdma_protocol_iwarp(id->device, id->port_num)) ret = cma_resolve_iw_route(id_priv); else ret = -ENOSYS; if (ret) goto err; return 0; err: cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); cma_id_put(id_priv); return ret; } EXPORT_SYMBOL(rdma_resolve_route); static void cma_set_loopback(struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); break; case AF_INET6: ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 0, 0, 0, htonl(1)); break; default: ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 0, 0, 0, htonl(1)); break; } } static int cma_bind_loopback(struct rdma_id_private *id_priv) { struct cma_device *cma_dev, *cur_dev; union ib_gid gid; enum ib_port_state port_state; unsigned int p; u16 pkey; int ret; cma_dev = NULL; mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cur_dev->device, 1)) continue; if (!cma_dev) cma_dev = cur_dev; rdma_for_each_port (cur_dev->device, p) { if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && port_state == IB_PORT_ACTIVE) { cma_dev = cur_dev; goto port_found; } } } if (!cma_dev) { ret = -ENODEV; goto out; } p = 1; port_found: ret = rdma_query_gid(cma_dev->device, p, 0, &gid); if (ret) goto out; ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); if (ret) goto out; id_priv->id.route.addr.dev_addr.dev_type = (rdma_protocol_ib(cma_dev->device, p)) ? ARPHRD_INFINIBAND : ARPHRD_ETHER; rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); id_priv->id.port_num = p; cma_attach_to_dev(id_priv, cma_dev); rdma_restrack_add(&id_priv->res); cma_set_loopback(cma_src_addr(id_priv)); out: mutex_unlock(&lock); return ret; } static void addr_handler(int status, struct sockaddr *src_addr, struct rdma_dev_addr *dev_addr, void *context) { struct rdma_id_private *id_priv = context; struct rdma_cm_event event = {}; struct sockaddr *addr; struct sockaddr_storage old_addr; mutex_lock(&id_priv->handler_mutex); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_RESOLVED)) goto out; /* * Store the previous src address, so that if we fail to acquire * matching rdma device, old address can be restored back, which helps * to cancel the cma listen operation correctly. */ addr = cma_src_addr(id_priv); memcpy(&old_addr, addr, rdma_addr_size(addr)); memcpy(addr, src_addr, rdma_addr_size(src_addr)); if (!status && !id_priv->cma_dev) { status = cma_acquire_dev_by_src_ip(id_priv); if (status) pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", status); rdma_restrack_add(&id_priv->res); } else if (status) { pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); } if (status) { memcpy(addr, &old_addr, rdma_addr_size((struct sockaddr *)&old_addr)); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ADDR_BOUND)) goto out; event.event = RDMA_CM_EVENT_ADDR_ERROR; event.status = status; } else event.event = RDMA_CM_EVENT_ADDR_RESOLVED; if (cma_cm_event_handler(id_priv, &event)) { destroy_id_handler_unlock(id_priv); return; } out: mutex_unlock(&id_priv->handler_mutex); } static int cma_resolve_loopback(struct rdma_id_private *id_priv) { struct cma_work *work; union ib_gid gid; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; if (!id_priv->cma_dev) { ret = cma_bind_loopback(id_priv); if (ret) goto err; } rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); enqueue_resolve_addr_work(work, id_priv); return 0; err: kfree(work); return ret; } static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) { struct cma_work *work; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; if (!id_priv->cma_dev) { ret = cma_resolve_ib_dev(id_priv); if (ret) goto err; } rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); enqueue_resolve_addr_work(work, id_priv); return 0; err: kfree(work); return ret; } int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) { struct rdma_id_private *id_priv; unsigned long flags; int ret; id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irqsave(&id_priv->lock, flags); if ((reuse && id_priv->state != RDMA_CM_LISTEN) || id_priv->state == RDMA_CM_IDLE) { id_priv->reuseaddr = reuse; ret = 0; } else { ret = -EINVAL; } spin_unlock_irqrestore(&id_priv->lock, flags); return ret; } EXPORT_SYMBOL(rdma_set_reuseaddr); int rdma_set_afonly(struct rdma_cm_id *id, int afonly) { struct rdma_id_private *id_priv; unsigned long flags; int ret; id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irqsave(&id_priv->lock, flags); if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { id_priv->options |= (1 << CMA_OPTION_AFONLY); id_priv->afonly = afonly; ret = 0; } else { ret = -EINVAL; } spin_unlock_irqrestore(&id_priv->lock, flags); return ret; } EXPORT_SYMBOL(rdma_set_afonly); static void cma_bind_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv) { struct sockaddr *addr; struct sockaddr_ib *sib; u64 sid, mask; __be16 port; lockdep_assert_held(&lock); addr = cma_src_addr(id_priv); port = htons(bind_list->port); switch (addr->sa_family) { case AF_INET: ((struct sockaddr_in *) addr)->sin_port = port; break; case AF_INET6: ((struct sockaddr_in6 *) addr)->sin6_port = port; break; case AF_IB: sib = (struct sockaddr_ib *) addr; sid = be64_to_cpu(sib->sib_sid); mask = be64_to_cpu(sib->sib_sid_mask); sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); sib->sib_sid_mask = cpu_to_be64(~0ULL); break; } id_priv->bind_list = bind_list; hlist_add_head(&id_priv->node, &bind_list->owners); } static int cma_alloc_port(enum rdma_ucm_port_space ps, struct rdma_id_private *id_priv, unsigned short snum) { struct rdma_bind_list *bind_list; int ret; lockdep_assert_held(&lock); bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, snum); if (ret < 0) goto err; bind_list->ps = ps; bind_list->port = snum; cma_bind_port(bind_list, id_priv); return 0; err: kfree(bind_list); return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; } static int cma_port_is_unique(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv) { struct rdma_id_private *cur_id; struct sockaddr *daddr = cma_dst_addr(id_priv); struct sockaddr *saddr = cma_src_addr(id_priv); __be16 dport = cma_port(daddr); lockdep_assert_held(&lock); hlist_for_each_entry(cur_id, &bind_list->owners, node) { struct sockaddr *cur_daddr = cma_dst_addr(cur_id); struct sockaddr *cur_saddr = cma_src_addr(cur_id); __be16 cur_dport = cma_port(cur_daddr); if (id_priv == cur_id) continue; /* different dest port -> unique */ if (!cma_any_port(daddr) && !cma_any_port(cur_daddr) && (dport != cur_dport)) continue; /* different src address -> unique */ if (!cma_any_addr(saddr) && !cma_any_addr(cur_saddr) && cma_addr_cmp(saddr, cur_saddr)) continue; /* different dst address -> unique */ if (!cma_any_addr(daddr) && !cma_any_addr(cur_daddr) && cma_addr_cmp(daddr, cur_daddr)) continue; return -EADDRNOTAVAIL; } return 0; } static int cma_alloc_any_port(enum rdma_ucm_port_space ps, struct rdma_id_private *id_priv) { static unsigned int last_used_port; int low, high, remaining; unsigned int rover; struct net *net = id_priv->id.route.addr.dev_addr.net; lockdep_assert_held(&lock); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = get_random_u32_inclusive(low, remaining + low - 1); retry: if (last_used_port != rover) { struct rdma_bind_list *bind_list; int ret; bind_list = cma_ps_find(net, ps, (unsigned short)rover); if (!bind_list) { ret = cma_alloc_port(ps, id_priv, rover); } else { ret = cma_port_is_unique(bind_list, id_priv); if (!ret) cma_bind_port(bind_list, id_priv); } /* * Remember previously used port number in order to avoid * re-using same port immediately after it is closed. */ if (!ret) last_used_port = rover; if (ret != -EADDRNOTAVAIL) return ret; } if (--remaining) { rover++; if ((rover < low) || (rover > high)) rover = low; goto retry; } return -EADDRNOTAVAIL; } /* * Check that the requested port is available. This is called when trying to * bind to a specific port, or when trying to listen on a bound port. In * the latter case, the provided id_priv may already be on the bind_list, but * we still need to check that it's okay to start listening. */ static int cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv, uint8_t reuseaddr) { struct rdma_id_private *cur_id; struct sockaddr *addr, *cur_addr; lockdep_assert_held(&lock); addr = cma_src_addr(id_priv); hlist_for_each_entry(cur_id, &bind_list->owners, node) { if (id_priv == cur_id) continue; if (reuseaddr && cur_id->reuseaddr) continue; cur_addr = cma_src_addr(cur_id); if (id_priv->afonly && cur_id->afonly && (addr->sa_family != cur_addr->sa_family)) continue; if (cma_any_addr(addr) || cma_any_addr(cur_addr)) return -EADDRNOTAVAIL; if (!cma_addr_cmp(addr, cur_addr)) return -EADDRINUSE; } return 0; } static int cma_use_port(enum rdma_ucm_port_space ps, struct rdma_id_private *id_priv) { struct rdma_bind_list *bind_list; unsigned short snum; int ret; lockdep_assert_held(&lock); snum = ntohs(cma_port(cma_src_addr(id_priv))); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); if (!bind_list) { ret = cma_alloc_port(ps, id_priv, snum); } else { ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); if (!ret) cma_bind_port(bind_list, id_priv); } return ret; } static enum rdma_ucm_port_space cma_select_inet_ps(struct rdma_id_private *id_priv) { switch (id_priv->id.ps) { case RDMA_PS_TCP: case RDMA_PS_UDP: case RDMA_PS_IPOIB: case RDMA_PS_IB: return id_priv->id.ps; default: return 0; } } static enum rdma_ucm_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) { enum rdma_ucm_port_space ps = 0; struct sockaddr_ib *sib; u64 sid_ps, mask, sid; sib = (struct sockaddr_ib *) cma_src_addr(id_priv); mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; sid = be64_to_cpu(sib->sib_sid) & mask; if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { sid_ps = RDMA_IB_IP_PS_IB; ps = RDMA_PS_IB; } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && (sid == (RDMA_IB_IP_PS_TCP & mask))) { sid_ps = RDMA_IB_IP_PS_TCP; ps = RDMA_PS_TCP; } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && (sid == (RDMA_IB_IP_PS_UDP & mask))) { sid_ps = RDMA_IB_IP_PS_UDP; ps = RDMA_PS_UDP; } if (ps) { sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | be64_to_cpu(sib->sib_sid_mask)); } return ps; } static int cma_get_port(struct rdma_id_private *id_priv) { enum rdma_ucm_port_space ps; int ret; if (cma_family(id_priv) != AF_IB) ps = cma_select_inet_ps(id_priv); else ps = cma_select_ib_ps(id_priv); if (!ps) return -EPROTONOSUPPORT; mutex_lock(&lock); if (cma_any_port(cma_src_addr(id_priv))) ret = cma_alloc_any_port(ps, id_priv); else ret = cma_use_port(ps, id_priv); mutex_unlock(&lock); return ret; } static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, struct sockaddr *addr) { #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 *sin6; if (addr->sa_family != AF_INET6) return 0; sin6 = (struct sockaddr_in6 *) addr; if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) return 0; if (!sin6->sin6_scope_id) return -EINVAL; dev_addr->bound_dev_if = sin6->sin6_scope_id; #endif return 0; } int rdma_listen(struct rdma_cm_id *id, int backlog) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { struct sockaddr_in any_in = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), }; /* For a well behaved ULP state will be RDMA_CM_IDLE */ ret = rdma_bind_addr(id, (struct sockaddr *)&any_in); if (ret) return ret; if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))) return -EINVAL; } /* * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable * any more, and has to be unique in the bind list. */ if (id_priv->reuseaddr) { mutex_lock(&lock); ret = cma_check_port(id_priv->bind_list, id_priv, 0); if (!ret) id_priv->reuseaddr = 0; mutex_unlock(&lock); if (ret) goto err; } id_priv->backlog = backlog; if (id_priv->cma_dev) { if (rdma_cap_ib_cm(id->device, 1)) { ret = cma_ib_listen(id_priv); if (ret) goto err; } else if (rdma_cap_iw_cm(id->device, 1)) { ret = cma_iw_listen(id_priv, backlog); if (ret) goto err; } else { ret = -ENOSYS; goto err; } } else { ret = cma_listen_on_all(id_priv); if (ret) goto err; } return 0; err: id_priv->backlog = 0; /* * All the failure paths that lead here will not allow the req_handler's * to have run. */ cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); return ret; } EXPORT_SYMBOL(rdma_listen); static int rdma_bind_addr_dst(struct rdma_id_private *id_priv, struct sockaddr *addr, const struct sockaddr *daddr) { struct sockaddr *id_daddr; int ret; if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && addr->sa_family != AF_IB) return -EAFNOSUPPORT; if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) return -EINVAL; ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr); if (ret) goto err1; memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); if (!cma_any_addr(addr)) { ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr); if (ret) goto err1; ret = cma_acquire_dev_by_src_ip(id_priv); if (ret) goto err1; } if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { if (addr->sa_family == AF_INET) id_priv->afonly = 1; #if IS_ENABLED(CONFIG_IPV6) else if (addr->sa_family == AF_INET6) { struct net *net = id_priv->id.route.addr.dev_addr.net; id_priv->afonly = net->ipv6.sysctl.bindv6only; } #endif } id_daddr = cma_dst_addr(id_priv); if (daddr != id_daddr) memcpy(id_daddr, daddr, rdma_addr_size(addr)); id_daddr->sa_family = addr->sa_family; ret = cma_get_port(id_priv); if (ret) goto err2; if (!cma_any_addr(addr)) rdma_restrack_add(&id_priv->res); return 0; err2: if (id_priv->cma_dev) cma_release_dev(id_priv); err1: cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); return ret; } static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, const struct sockaddr *dst_addr) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); struct sockaddr_storage zero_sock = {}; if (src_addr && src_addr->sa_family) return rdma_bind_addr_dst(id_priv, src_addr, dst_addr); /* * When the src_addr is not specified, automatically supply an any addr */ zero_sock.ss_family = dst_addr->sa_family; if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)&zero_sock; struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst_addr; src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; } else if (dst_addr->sa_family == AF_IB) { ((struct sockaddr_ib *)&zero_sock)->sib_pkey = ((struct sockaddr_ib *)dst_addr)->sib_pkey; } return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr); } /* * If required, resolve the source address for bind and leave the id_priv in * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior * calls made by ULP, a previously bound ID will not be re-bound and src_addr is * ignored. */ static int resolve_prepare_src(struct rdma_id_private *id_priv, struct sockaddr *src_addr, const struct sockaddr *dst_addr) { int ret; if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { /* For a well behaved ULP state will be RDMA_CM_IDLE */ ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); if (ret) return ret; if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))) return -EINVAL; } else { memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); } if (cma_family(id_priv) != dst_addr->sa_family) { ret = -EINVAL; goto err_state; } return 0; err_state: cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); return ret; } int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, const struct sockaddr *dst_addr, unsigned long timeout_ms) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; ret = resolve_prepare_src(id_priv, src_addr, dst_addr); if (ret) return ret; if (cma_any_addr(dst_addr)) { ret = cma_resolve_loopback(id_priv); } else { if (dst_addr->sa_family == AF_IB) { ret = cma_resolve_ib_addr(id_priv); } else { /* * The FSM can return back to RDMA_CM_ADDR_BOUND after * rdma_resolve_ip() is called, eg through the error * path in addr_handler(). If this happens the existing * request must be canceled before issuing a new one. * Since canceling a request is a bit slow and this * oddball path is rare, keep track once a request has * been issued. The track turns out to be a permanent * state since this is the only cancel as it is * immediately before rdma_resolve_ip(). */ if (id_priv->used_resolve_ip) rdma_addr_cancel(&id->route.addr.dev_addr); else id_priv->used_resolve_ip = 1; ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, &id->route.addr.dev_addr, timeout_ms, addr_handler, false, id_priv); } } if (ret) goto err; return 0; err: cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); return ret; } EXPORT_SYMBOL(rdma_resolve_addr); int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv)); } EXPORT_SYMBOL(rdma_bind_addr); static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) { struct cma_hdr *cma_hdr; cma_hdr = hdr; cma_hdr->cma_version = CMA_VERSION; if (cma_family(id_priv) == AF_INET) { struct sockaddr_in *src4, *dst4; src4 = (struct sockaddr_in *) cma_src_addr(id_priv); dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); cma_set_ip_ver(cma_hdr, 4); cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; cma_hdr->port = src4->sin_port; } else if (cma_family(id_priv) == AF_INET6) { struct sockaddr_in6 *src6, *dst6; src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); cma_set_ip_ver(cma_hdr, 6); cma_hdr->src_addr.ip6 = src6->sin6_addr; cma_hdr->dst_addr.ip6 = dst6->sin6_addr; cma_hdr->port = src6->sin6_port; } return 0; } static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, const struct ib_cm_event *ib_event) { struct rdma_id_private *id_priv = cm_id->context; struct rdma_cm_event event = {}; const struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; int ret; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) goto out; switch (ib_event->event) { case IB_CM_SIDR_REQ_ERROR: event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = -ETIMEDOUT; break; case IB_CM_SIDR_REP_RECEIVED: event.param.ud.private_data = ib_event->private_data; event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; if (rep->status != IB_SIDR_SUCCESS) { event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = ib_event->param.sidr_rep_rcvd.status; pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n", event.status); break; } ret = cma_set_qkey(id_priv, rep->qkey); if (ret) { pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret); event.event = RDMA_CM_EVENT_ADDR_ERROR; event.status = ret; break; } ib_init_ah_attr_from_path(id_priv->id.device, id_priv->id.port_num, id_priv->id.route.path_rec, &event.param.ud.ah_attr, rep->sgid_attr); event.param.ud.qp_num = rep->qpn; event.param.ud.qkey = rep->qkey; event.event = RDMA_CM_EVENT_ESTABLISHED; event.status = 0; break; default: pr_err("RDMA CMA: unexpected IB CM event: %d\n", ib_event->event); goto out; } ret = cma_cm_event_handler(id_priv, &event); rdma_destroy_ah_attr(&event.param.ud.ah_attr); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.ib = NULL; destroy_id_handler_unlock(id_priv); return ret; } out: mutex_unlock(&id_priv->handler_mutex); return 0; } static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_cm_sidr_req_param req; struct ib_cm_id *id; void *private_data; u8 offset; int ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) return -EINVAL; if (req.private_data_len) { private_data = kzalloc(req.private_data_len, GFP_ATOMIC); if (!private_data) return -ENOMEM; } else { private_data = NULL; } if (conn_param->private_data && conn_param->private_data_len) memcpy(private_data + offset, conn_param->private_data, conn_param->private_data_len); if (private_data) { ret = cma_format_hdr(private_data, id_priv); if (ret) goto out; req.private_data = private_data; } id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, id_priv); if (IS_ERR(id)) { ret = PTR_ERR(id); goto out; } id_priv->cm_id.ib = id; req.path = id_priv->id.route.path_rec; req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); req.max_cm_retries = CMA_MAX_CM_RETRIES; trace_cm_send_sidr_req(id_priv); ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); if (ret) { ib_destroy_cm_id(id_priv->cm_id.ib); id_priv->cm_id.ib = NULL; } out: kfree(private_data); return ret; } static int cma_connect_ib(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_cm_req_param req; struct rdma_route *route; void *private_data; struct ib_cm_id *id; u8 offset; int ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) return -EINVAL; if (req.private_data_len) { private_data = kzalloc(req.private_data_len, GFP_ATOMIC); if (!private_data) return -ENOMEM; } else { private_data = NULL; } if (conn_param->private_data && conn_param->private_data_len) memcpy(private_data + offset, conn_param->private_data, conn_param->private_data_len); id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); if (IS_ERR(id)) { ret = PTR_ERR(id); goto out; } id_priv->cm_id.ib = id; route = &id_priv->id.route; if (private_data) { ret = cma_format_hdr(private_data, id_priv); if (ret) goto out; req.private_data = private_data; } req.primary_path = &route->path_rec[0]; req.primary_path_inbound = route->path_rec_inbound; req.primary_path_outbound = route->path_rec_outbound; if (route->num_pri_alt_paths == 2) req.alternate_path = &route->path_rec[1]; req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; /* Alternate path SGID attribute currently unsupported */ req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); req.qp_num = id_priv->qp_num; req.qp_type = id_priv->id.qp_type; req.starting_psn = id_priv->seq_num; req.responder_resources = conn_param->responder_resources; req.initiator_depth = conn_param->initiator_depth; req.flow_control = conn_param->flow_control; req.retry_count = min_t(u8, 7, conn_param->retry_count); req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; req.max_cm_retries = CMA_MAX_CM_RETRIES; req.srq = id_priv->srq ? 1 : 0; req.ece.vendor_id = id_priv->ece.vendor_id; req.ece.attr_mod = id_priv->ece.attr_mod; trace_cm_send_req(id_priv); ret = ib_send_cm_req(id_priv->cm_id.ib, &req); out: if (ret && !IS_ERR(id)) { ib_destroy_cm_id(id); id_priv->cm_id.ib = NULL; } kfree(private_data); return ret; } static int cma_connect_iw(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct iw_cm_id *cm_id; int ret; struct iw_cm_conn_param iw_param; cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); mutex_lock(&id_priv->qp_mutex); cm_id->tos = id_priv->tos; cm_id->tos_set = id_priv->tos_set; mutex_unlock(&id_priv->qp_mutex); id_priv->cm_id.iw = cm_id; memcpy(&cm_id->local_addr, cma_src_addr(id_priv), rdma_addr_size(cma_src_addr(id_priv))); memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), rdma_addr_size(cma_dst_addr(id_priv))); ret = cma_modify_qp_rtr(id_priv, conn_param); if (ret) goto out; if (conn_param) { iw_param.ord = conn_param->initiator_depth; iw_param.ird = conn_param->responder_resources; iw_param.private_data = conn_param->private_data; iw_param.private_data_len = conn_param->private_data_len; iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; } else { memset(&iw_param, 0, sizeof iw_param); iw_param.qpn = id_priv->qp_num; } ret = iw_cm_connect(cm_id, &iw_param); out: if (ret) { iw_destroy_cm_id(cm_id); id_priv->cm_id.iw = NULL; } return ret; } /** * rdma_connect_locked - Initiate an active connection request. * @id: Connection identifier to connect. * @conn_param: Connection information used for connected QPs. * * Same as rdma_connect() but can only be called from the * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback. */ int rdma_connect_locked(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) return -EINVAL; if (!id->qp) { id_priv->qp_num = conn_param->qp_num; id_priv->srq = conn_param->srq; } if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) ret = cma_resolve_ib_udp(id_priv, conn_param); else ret = cma_connect_ib(id_priv, conn_param); } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = cma_connect_iw(id_priv, conn_param); } else { ret = -ENOSYS; } if (ret) goto err_state; return 0; err_state: cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); return ret; } EXPORT_SYMBOL(rdma_connect_locked); /** * rdma_connect - Initiate an active connection request. * @id: Connection identifier to connect. * @conn_param: Connection information used for connected QPs. * * Users must have resolved a route for the rdma_cm_id to connect with by having * called rdma_resolve_route before calling this routine. * * This call will either connect to a remote QP or obtain remote QP information * for unconnected rdma_cm_id's. The actual operation is based on the * rdma_cm_id's port space. */ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; mutex_lock(&id_priv->handler_mutex); ret = rdma_connect_locked(id, conn_param); mutex_unlock(&id_priv->handler_mutex); return ret; } EXPORT_SYMBOL(rdma_connect); /** * rdma_connect_ece - Initiate an active connection request with ECE data. * @id: Connection identifier to connect. * @conn_param: Connection information used for connected QPs. * @ece: ECE parameters * * See rdma_connect() explanation. */ int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, struct rdma_ucm_ece *ece) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); id_priv->ece.vendor_id = ece->vendor_id; id_priv->ece.attr_mod = ece->attr_mod; return rdma_connect(id, conn_param); } EXPORT_SYMBOL(rdma_connect_ece); static int cma_accept_ib(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_cm_rep_param rep; int ret; ret = cma_modify_qp_rtr(id_priv, conn_param); if (ret) goto out; ret = cma_modify_qp_rts(id_priv, conn_param); if (ret) goto out; memset(&rep, 0, sizeof rep); rep.qp_num = id_priv->qp_num; rep.starting_psn = id_priv->seq_num; rep.private_data = conn_param->private_data; rep.private_data_len = conn_param->private_data_len; rep.responder_resources = conn_param->responder_resources; rep.initiator_depth = conn_param->initiator_depth; rep.failover_accepted = 0; rep.flow_control = conn_param->flow_control; rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); rep.srq = id_priv->srq ? 1 : 0; rep.ece.vendor_id = id_priv->ece.vendor_id; rep.ece.attr_mod = id_priv->ece.attr_mod; trace_cm_send_rep(id_priv); ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); out: return ret; } static int cma_accept_iw(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct iw_cm_conn_param iw_param; int ret; if (!conn_param) return -EINVAL; ret = cma_modify_qp_rtr(id_priv, conn_param); if (ret) return ret; iw_param.ord = conn_param->initiator_depth; iw_param.ird = conn_param->responder_resources; iw_param.private_data = conn_param->private_data; iw_param.private_data_len = conn_param->private_data_len; if (id_priv->id.qp) iw_param.qpn = id_priv->qp_num; else iw_param.qpn = conn_param->qp_num; return iw_cm_accept(id_priv->cm_id.iw, &iw_param); } static int cma_send_sidr_rep(struct rdma_id_private *id_priv, enum ib_cm_sidr_status status, u32 qkey, const void *private_data, int private_data_len) { struct ib_cm_sidr_rep_param rep; int ret; memset(&rep, 0, sizeof rep); rep.status = status; if (status == IB_SIDR_SUCCESS) { if (qkey) ret = cma_set_qkey(id_priv, qkey); else ret = cma_set_default_qkey(id_priv); if (ret) return ret; rep.qp_num = id_priv->qp_num; rep.qkey = id_priv->qkey; rep.ece.vendor_id = id_priv->ece.vendor_id; rep.ece.attr_mod = id_priv->ece.attr_mod; } rep.private_data = private_data; rep.private_data_len = private_data_len; trace_cm_send_sidr_rep(id_priv); return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); } /** * rdma_accept - Called to accept a connection request or response. * @id: Connection identifier associated with the request. * @conn_param: Information needed to establish the connection. This must be * provided if accepting a connection request. If accepting a connection * response, this parameter must be NULL. * * Typically, this routine is only called by the listener to accept a connection * request. It must also be called on the active side of a connection if the * user is performing their own QP transitions. * * In the case of error, a reject message is sent to the remote side and the * state of the qp associated with the id is modified to error, such that any * previously posted receive buffers would be flushed. * * This function is for use by kernel ULPs and must be called from under the * handler callback. */ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; lockdep_assert_held(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) return -EINVAL; if (!id->qp && conn_param) { id_priv->qp_num = conn_param->qp_num; id_priv->srq = conn_param->srq; } if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) { if (conn_param) ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, conn_param->qkey, conn_param->private_data, conn_param->private_data_len); else ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 0, NULL, 0); } else { if (conn_param) ret = cma_accept_ib(id_priv, conn_param); else ret = cma_rep_recv(id_priv); } } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = cma_accept_iw(id_priv, conn_param); } else { ret = -ENOSYS; } if (ret) goto reject; return 0; reject: cma_modify_qp_err(id_priv); rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); return ret; } EXPORT_SYMBOL(rdma_accept); int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, struct rdma_ucm_ece *ece) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); id_priv->ece.vendor_id = ece->vendor_id; id_priv->ece.attr_mod = ece->attr_mod; return rdma_accept(id, conn_param); } EXPORT_SYMBOL(rdma_accept_ece); void rdma_lock_handler(struct rdma_cm_id *id) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->handler_mutex); } EXPORT_SYMBOL(rdma_lock_handler); void rdma_unlock_handler(struct rdma_cm_id *id) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); mutex_unlock(&id_priv->handler_mutex); } EXPORT_SYMBOL(rdma_unlock_handler); int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!id_priv->cm_id.ib) return -EINVAL; switch (id->device->node_type) { case RDMA_NODE_IB_CA: ret = ib_cm_notify(id_priv->cm_id.ib, event); break; default: ret = 0; break; } return ret; } EXPORT_SYMBOL(rdma_notify); int rdma_reject(struct rdma_cm_id *id, const void *private_data, u8 private_data_len, u8 reason) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!id_priv->cm_id.ib) return -EINVAL; if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) { ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, private_data, private_data_len); } else { trace_cm_send_rej(id_priv); ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0, private_data, private_data_len); } } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_reject(id_priv->cm_id.iw, private_data, private_data_len); } else { ret = -ENOSYS; } return ret; } EXPORT_SYMBOL(rdma_reject); int rdma_disconnect(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!id_priv->cm_id.ib) return -EINVAL; if (rdma_cap_ib_cm(id->device, id->port_num)) { ret = cma_modify_qp_err(id_priv); if (ret) goto out; /* Initiate or respond to a disconnect. */ trace_cm_disconnect(id_priv); if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) trace_cm_sent_drep(id_priv); } else { trace_cm_sent_dreq(id_priv); } } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); } else ret = -EINVAL; out: return ret; } EXPORT_SYMBOL(rdma_disconnect); static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, struct ib_sa_multicast *multicast, struct rdma_cm_event *event, struct cma_multicast *mc) { struct rdma_dev_addr *dev_addr; enum ib_gid_type gid_type; struct net_device *ndev; if (status) pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n", status); event->status = status; event->param.ud.private_data = mc->context; if (status) { event->event = RDMA_CM_EVENT_MULTICAST_ERROR; return; } dev_addr = &id_priv->id.route.addr.dev_addr; ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); gid_type = id_priv->cma_dev ->default_gid_type[id_priv->id.port_num - rdma_start_port( id_priv->cma_dev->device)]; event->event = RDMA_CM_EVENT_MULTICAST_JOIN; if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, &multicast->rec, ndev, gid_type, &event->param.ud.ah_attr)) { event->event = RDMA_CM_EVENT_MULTICAST_ERROR; goto out; } event->param.ud.qp_num = 0xFFFFFF; event->param.ud.qkey = id_priv->qkey; out: dev_put(ndev); } static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) { struct cma_multicast *mc = multicast->context; struct rdma_id_private *id_priv = mc->id_priv; struct rdma_cm_event event = {}; int ret = 0; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) goto out; ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); if (!ret) { cma_make_mc_event(status, id_priv, multicast, &event, mc); ret = cma_cm_event_handler(id_priv, &event); } rdma_destroy_ah_attr(&event.param.ud.ah_attr); WARN_ON(ret); out: mutex_unlock(&id_priv->handler_mutex); return 0; } static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr, union ib_gid *mgid) { unsigned char mc_map[MAX_ADDR_LEN]; struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct sockaddr_in *sin = (struct sockaddr_in *) addr; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; if (cma_any_addr(addr)) { memset(mgid, 0, sizeof *mgid); } else if ((addr->sa_family == AF_INET6) && ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 0xFF10A01B)) { /* IPv6 address is an SA assigned MGID. */ memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); } else if (addr->sa_family == AF_IB) { memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); } else if (addr->sa_family == AF_INET6) { ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); if (id_priv->id.ps == RDMA_PS_UDP) mc_map[7] = 0x01; /* Use RDMA CM signature */ *mgid = *(union ib_gid *) (mc_map + 4); } else { ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); if (id_priv->id.ps == RDMA_PS_UDP) mc_map[7] = 0x01; /* Use RDMA CM signature */ *mgid = *(union ib_gid *) (mc_map + 4); } } static int cma_join_ib_multicast(struct rdma_id_private *id_priv, struct cma_multicast *mc) { struct ib_sa_mcmember_rec rec; struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; ib_sa_comp_mask comp_mask; int ret; ib_addr_get_mgid(dev_addr, &rec.mgid); ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, &rec.mgid, &rec); if (ret) return ret; if (!id_priv->qkey) { ret = cma_set_default_qkey(id_priv); if (ret) return ret; } cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); rec.qkey = cpu_to_be32(id_priv->qkey); rdma_addr_get_sgid(dev_addr, &rec.port_gid); rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); rec.join_state = mc->join_state; comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | IB_SA_MCMEMBER_REC_FLOW_LABEL | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; if (id_priv->id.ps == RDMA_PS_IPOIB) comp_mask |= IB_SA_MCMEMBER_REC_RATE | IB_SA_MCMEMBER_REC_RATE_SELECTOR | IB_SA_MCMEMBER_REC_MTU_SELECTOR | IB_SA_MCMEMBER_REC_MTU | IB_SA_MCMEMBER_REC_HOP_LIMIT; mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, id_priv->id.port_num, &rec, comp_mask, GFP_KERNEL, cma_ib_mc_handler, mc); return PTR_ERR_OR_ZERO(mc->sa_mc); } static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, enum ib_gid_type gid_type) { struct sockaddr_in *sin = (struct sockaddr_in *)addr; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; if (cma_any_addr(addr)) { memset(mgid, 0, sizeof *mgid); } else if (addr->sa_family == AF_INET6) { memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); } else { mgid->raw[0] = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff; mgid->raw[1] = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e; mgid->raw[2] = 0; mgid->raw[3] = 0; mgid->raw[4] = 0; mgid->raw[5] = 0; mgid->raw[6] = 0; mgid->raw[7] = 0; mgid->raw[8] = 0; mgid->raw[9] = 0; mgid->raw[10] = 0xff; mgid->raw[11] = 0xff; *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; } } static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, struct cma_multicast *mc) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; int err = 0; struct sockaddr *addr = (struct sockaddr *)&mc->addr; struct net_device *ndev = NULL; struct ib_sa_multicast ib; enum ib_gid_type gid_type; bool send_only; send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); if (cma_zero_addr(addr)) return -EINVAL; gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - rdma_start_port(id_priv->cma_dev->device)]; cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type); ib.rec.pkey = cpu_to_be16(0xffff); if (dev_addr->bound_dev_if) ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); if (!ndev) return -ENODEV; ib.rec.rate = IB_RATE_PORT_CURRENT; ib.rec.hop_limit = 1; ib.rec.mtu = iboe_get_mtu(ndev->mtu); if (addr->sa_family == AF_INET) { if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; if (!send_only) { err = cma_igmp_send(ndev, &ib.rec.mgid, true); } } } else { if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) err = -ENOTSUPP; } dev_put(ndev); if (err || !ib.rec.mtu) return err ?: -EINVAL; if (!id_priv->qkey) cma_set_default_qkey(id_priv); rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &ib.rec.port_gid); INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc); queue_work(cma_wq, &mc->iboe_join.work); return 0; } int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, u8 join_state, void *context) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); struct cma_multicast *mc; int ret; /* Not supported for kernel QPs */ if (WARN_ON(id->qp)) return -EINVAL; /* ULP is calling this wrong. */ if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) return -EINVAL; if (id_priv->id.qp_type != IB_QPT_UD) return -EINVAL; mc = kzalloc(sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; memcpy(&mc->addr, addr, rdma_addr_size(addr)); mc->context = context; mc->id_priv = id_priv; mc->join_state = join_state; if (rdma_protocol_roce(id->device, id->port_num)) { ret = cma_iboe_join_multicast(id_priv, mc); if (ret) goto out_err; } else if (rdma_cap_ib_mcast(id->device, id->port_num)) { ret = cma_join_ib_multicast(id_priv, mc); if (ret) goto out_err; } else { ret = -ENOSYS; goto out_err; } spin_lock(&id_priv->lock); list_add(&mc->list, &id_priv->mc_list); spin_unlock(&id_priv->lock); return 0; out_err: kfree(mc); return ret; } EXPORT_SYMBOL(rdma_join_multicast); void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) { struct rdma_id_private *id_priv; struct cma_multicast *mc; id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irq(&id_priv->lock); list_for_each_entry(mc, &id_priv->mc_list, list) { if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0) continue; list_del(&mc->list); spin_unlock_irq(&id_priv->lock); WARN_ON(id_priv->cma_dev->device != id->device); destroy_mc(id_priv, mc); return; } spin_unlock_irq(&id_priv->lock); } EXPORT_SYMBOL(rdma_leave_multicast); static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) { struct rdma_dev_addr *dev_addr; struct cma_work *work; dev_addr = &id_priv->id.route.addr.dev_addr; if ((dev_addr->bound_dev_if == ndev->ifindex) && (net_eq(dev_net(ndev), dev_addr->net)) && memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { pr_info("RDMA CM addr change for ndev %s used by id %p\n", ndev->name, &id_priv->id); work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; INIT_WORK(&work->work, cma_work_handler); work->id = id_priv; work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; cma_id_get(id_priv); queue_work(cma_wq, &work->work); } return 0; } static int cma_netdev_callback(struct notifier_block *self, unsigned long event, void *ptr) { struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct cma_device *cma_dev; struct rdma_id_private *id_priv; int ret = NOTIFY_DONE; if (event != NETDEV_BONDING_FAILOVER) return NOTIFY_DONE; if (!netif_is_bond_master(ndev)) return NOTIFY_DONE; mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { ret = cma_netdev_change(ndev, id_priv); if (ret) goto out; } out: mutex_unlock(&lock); return ret; } static void cma_netevent_work_handler(struct work_struct *_work) { struct rdma_id_private *id_priv = container_of(_work, struct rdma_id_private, id.net_work); struct rdma_cm_event event = {}; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) goto out_unlock; event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = -ETIMEDOUT; if (cma_cm_event_handler(id_priv, &event)) { __acquire(&id_priv->handler_mutex); id_priv->cm_id.ib = NULL; cma_id_put(id_priv); destroy_id_handler_unlock(id_priv); return; } out_unlock: mutex_unlock(&id_priv->handler_mutex); cma_id_put(id_priv); } static int cma_netevent_callback(struct notifier_block *self, unsigned long event, void *ctx) { struct id_table_entry *ips_node = NULL; struct rdma_id_private *current_id; struct neighbour *neigh = ctx; unsigned long flags; if (event != NETEVENT_NEIGH_UPDATE) return NOTIFY_DONE; spin_lock_irqsave(&id_table_lock, flags); if (neigh->tbl->family == AF_INET6) { struct sockaddr_in6 neigh_sock_6; neigh_sock_6.sin6_family = AF_INET6; neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key; ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, (struct sockaddr *)&neigh_sock_6); } else if (neigh->tbl->family == AF_INET) { struct sockaddr_in neigh_sock_4; neigh_sock_4.sin_family = AF_INET; neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key); ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, (struct sockaddr *)&neigh_sock_4); } else goto out; if (!ips_node) goto out; list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) { if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr, neigh->ha, ETH_ALEN)) continue; INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler); cma_id_get(current_id); queue_work(cma_wq, &current_id->id.net_work); } out: spin_unlock_irqrestore(&id_table_lock, flags); return NOTIFY_DONE; } static struct notifier_block cma_nb = { .notifier_call = cma_netdev_callback }; static struct notifier_block cma_netevent_cb = { .notifier_call = cma_netevent_callback }; static void cma_send_device_removal_put(struct rdma_id_private *id_priv) { struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL }; enum rdma_cm_state state; unsigned long flags; mutex_lock(&id_priv->handler_mutex); /* Record that we want to remove the device */ spin_lock_irqsave(&id_priv->lock, flags); state = id_priv->state; if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) { spin_unlock_irqrestore(&id_priv->lock, flags); mutex_unlock(&id_priv->handler_mutex); cma_id_put(id_priv); return; } id_priv->state = RDMA_CM_DEVICE_REMOVAL; spin_unlock_irqrestore(&id_priv->lock, flags); if (cma_cm_event_handler(id_priv, &event)) { /* * At this point the ULP promises it won't call * rdma_destroy_id() concurrently */ cma_id_put(id_priv); mutex_unlock(&id_priv->handler_mutex); trace_cm_id_destroy(id_priv); _destroy_id(id_priv, state); return; } mutex_unlock(&id_priv->handler_mutex); /* * If this races with destroy then the thread that first assigns state * to a destroying does the cancel. */ cma_cancel_operation(id_priv, state); cma_id_put(id_priv); } static void cma_process_remove(struct cma_device *cma_dev) { mutex_lock(&lock); while (!list_empty(&cma_dev->id_list)) { struct rdma_id_private *id_priv = list_first_entry( &cma_dev->id_list, struct rdma_id_private, device_item); list_del_init(&id_priv->listen_item); list_del_init(&id_priv->device_item); cma_id_get(id_priv); mutex_unlock(&lock); cma_send_device_removal_put(id_priv); mutex_lock(&lock); } mutex_unlock(&lock); cma_dev_put(cma_dev); wait_for_completion(&cma_dev->comp); } static bool cma_supported(struct ib_device *device) { u32 i; rdma_for_each_port(device, i) { if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i)) return true; } return false; } static int cma_add_one(struct ib_device *device) { struct rdma_id_private *to_destroy; struct cma_device *cma_dev; struct rdma_id_private *id_priv; unsigned long supported_gids = 0; int ret; u32 i; if (!cma_supported(device)) return -EOPNOTSUPP; cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); if (!cma_dev) return -ENOMEM; cma_dev->device = device; cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, sizeof(*cma_dev->default_gid_type), GFP_KERNEL); if (!cma_dev->default_gid_type) { ret = -ENOMEM; goto free_cma_dev; } cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, sizeof(*cma_dev->default_roce_tos), GFP_KERNEL); if (!cma_dev->default_roce_tos) { ret = -ENOMEM; goto free_gid_type; } rdma_for_each_port (device, i) { supported_gids = roce_gid_type_mask_support(device, i); WARN_ON(!supported_gids); if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) cma_dev->default_gid_type[i - rdma_start_port(device)] = CMA_PREFERRED_ROCE_GID_TYPE; else cma_dev->default_gid_type[i - rdma_start_port(device)] = find_first_bit(&supported_gids, BITS_PER_LONG); cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; } init_completion(&cma_dev->comp); refcount_set(&cma_dev->refcount, 1); INIT_LIST_HEAD(&cma_dev->id_list); ib_set_client_data(device, &cma_client, cma_dev); mutex_lock(&lock); list_add_tail(&cma_dev->list, &dev_list); list_for_each_entry(id_priv, &listen_any_list, listen_any_item) { ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); if (ret) goto free_listen; } mutex_unlock(&lock); trace_cm_add_one(device); return 0; free_listen: list_del(&cma_dev->list); mutex_unlock(&lock); /* cma_process_remove() will delete to_destroy */ cma_process_remove(cma_dev); kfree(cma_dev->default_roce_tos); free_gid_type: kfree(cma_dev->default_gid_type); free_cma_dev: kfree(cma_dev); return ret; } static void cma_remove_one(struct ib_device *device, void *client_data) { struct cma_device *cma_dev = client_data; trace_cm_remove_one(device); mutex_lock(&lock); list_del(&cma_dev->list); mutex_unlock(&lock); cma_process_remove(cma_dev); kfree(cma_dev->default_roce_tos); kfree(cma_dev->default_gid_type); kfree(cma_dev); } static int cma_init_net(struct net *net) { struct cma_pernet *pernet = cma_pernet(net); xa_init(&pernet->tcp_ps); xa_init(&pernet->udp_ps); xa_init(&pernet->ipoib_ps); xa_init(&pernet->ib_ps); return 0; } static void cma_exit_net(struct net *net) { struct cma_pernet *pernet = cma_pernet(net); WARN_ON(!xa_empty(&pernet->tcp_ps)); WARN_ON(!xa_empty(&pernet->udp_ps)); WARN_ON(!xa_empty(&pernet->ipoib_ps)); WARN_ON(!xa_empty(&pernet->ib_ps)); } static struct pernet_operations cma_pernet_operations = { .init = cma_init_net, .exit = cma_exit_net, .id = &cma_pernet_id, .size = sizeof(struct cma_pernet), }; static int __init cma_init(void) { int ret; /* * There is a rare lock ordering dependency in cma_netdev_callback() * that only happens when bonding is enabled. Teach lockdep that rtnl * must never be nested under lock so it can find these without having * to test with bonding. */ if (IS_ENABLED(CONFIG_LOCKDEP)) { rtnl_lock(); mutex_lock(&lock); mutex_unlock(&lock); rtnl_unlock(); } cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); if (!cma_wq) return -ENOMEM; ret = register_pernet_subsys(&cma_pernet_operations); if (ret) goto err_wq; ib_sa_register_client(&sa_client); register_netdevice_notifier(&cma_nb); register_netevent_notifier(&cma_netevent_cb); ret = ib_register_client(&cma_client); if (ret) goto err; ret = cma_configfs_init(); if (ret) goto err_ib; return 0; err_ib: ib_unregister_client(&cma_client); err: unregister_netevent_notifier(&cma_netevent_cb); unregister_netdevice_notifier(&cma_nb); ib_sa_unregister_client(&sa_client); unregister_pernet_subsys(&cma_pernet_operations); err_wq: destroy_workqueue(cma_wq); return ret; } static void __exit cma_cleanup(void) { cma_configfs_exit(); ib_unregister_client(&cma_client); unregister_netevent_notifier(&cma_netevent_cb); unregister_netdevice_notifier(&cma_nb); ib_sa_unregister_client(&sa_client); unregister_pernet_subsys(&cma_pernet_operations); destroy_workqueue(cma_wq); } module_init(cma_init); module_exit(cma_cleanup);
linux-master
drivers/infiniband/core/cma.c
/* * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/export.h> #include <linux/string.h> #include <rdma/ib_pack.h> static u64 value_read(int offset, int size, void *structure) { switch (size) { case 1: return *(u8 *) (structure + offset); case 2: return be16_to_cpup((__be16 *) (structure + offset)); case 4: return be32_to_cpup((__be32 *) (structure + offset)); case 8: return be64_to_cpup((__be64 *) (structure + offset)); default: pr_warn("Field size %d bits not handled\n", size * 8); return 0; } } /** * ib_pack - Pack a structure into a buffer * @desc:Array of structure field descriptions * @desc_len:Number of entries in @desc * @structure:Structure to pack from * @buf:Buffer to pack into * * ib_pack() packs a list of structure fields into a buffer, * controlled by the array of fields in @desc. */ void ib_pack(const struct ib_field *desc, int desc_len, void *structure, void *buf) { int i; for (i = 0; i < desc_len; ++i) { if (desc[i].size_bits <= 32) { int shift; u32 val; __be32 mask; __be32 *addr; shift = 32 - desc[i].offset_bits - desc[i].size_bits; if (desc[i].struct_size_bytes) val = value_read(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, structure) << shift; else val = 0; mask = cpu_to_be32(((1ull << desc[i].size_bits) - 1) << shift); addr = (__be32 *) buf + desc[i].offset_words; *addr = (*addr & ~mask) | (cpu_to_be32(val) & mask); } else if (desc[i].size_bits <= 64) { int shift; u64 val; __be64 mask; __be64 *addr; shift = 64 - desc[i].offset_bits - desc[i].size_bits; if (desc[i].struct_size_bytes) val = value_read(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, structure) << shift; else val = 0; mask = cpu_to_be64((~0ull >> (64 - desc[i].size_bits)) << shift); addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words); *addr = (*addr & ~mask) | (cpu_to_be64(val) & mask); } else { if (desc[i].offset_bits % 8 || desc[i].size_bits % 8) { pr_warn("Structure field %s of size %d bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits); } if (desc[i].struct_size_bytes) memcpy(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, structure + desc[i].struct_offset_bytes, desc[i].size_bits / 8); else memset(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, 0, desc[i].size_bits / 8); } } } EXPORT_SYMBOL(ib_pack); static void value_write(int offset, int size, u64 val, void *structure) { switch (size * 8) { case 8: *( u8 *) (structure + offset) = val; break; case 16: *(__be16 *) (structure + offset) = cpu_to_be16(val); break; case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break; case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break; default: pr_warn("Field size %d bits not handled\n", size * 8); } } /** * ib_unpack - Unpack a buffer into a structure * @desc:Array of structure field descriptions * @desc_len:Number of entries in @desc * @buf:Buffer to unpack from * @structure:Structure to unpack into * * ib_pack() unpacks a list of structure fields from a buffer, * controlled by the array of fields in @desc. */ void ib_unpack(const struct ib_field *desc, int desc_len, void *buf, void *structure) { int i; for (i = 0; i < desc_len; ++i) { if (!desc[i].struct_size_bytes) continue; if (desc[i].size_bits <= 32) { int shift; u32 val; u32 mask; __be32 *addr; shift = 32 - desc[i].offset_bits - desc[i].size_bits; mask = ((1ull << desc[i].size_bits) - 1) << shift; addr = (__be32 *) buf + desc[i].offset_words; val = (be32_to_cpup(addr) & mask) >> shift; value_write(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, val, structure); } else if (desc[i].size_bits <= 64) { int shift; u64 val; u64 mask; __be64 *addr; shift = 64 - desc[i].offset_bits - desc[i].size_bits; mask = (~0ull >> (64 - desc[i].size_bits)) << shift; addr = (__be64 *) buf + desc[i].offset_words; val = (be64_to_cpup(addr) & mask) >> shift; value_write(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, val, structure); } else { if (desc[i].offset_bits % 8 || desc[i].size_bits % 8) { pr_warn("Structure field %s of size %d bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits); } memcpy(structure + desc[i].struct_offset_bytes, buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, desc[i].size_bits / 8); } } } EXPORT_SYMBOL(ib_unpack);
linux-master
drivers/infiniband/core/packer.c
/* * Copyright (c) 2014 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/types.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/pid.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/vmalloc.h> #include <linux/hugetlb.h> #include <linux/interval_tree.h> #include <linux/hmm.h> #include <linux/pagemap.h> #include <rdma/ib_umem_odp.h> #include "uverbs.h" static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, const struct mmu_interval_notifier_ops *ops) { int ret; umem_odp->umem.is_odp = 1; mutex_init(&umem_odp->umem_mutex); if (!umem_odp->is_implicit_odp) { size_t page_size = 1UL << umem_odp->page_shift; unsigned long start; unsigned long end; size_t ndmas, npfns; start = ALIGN_DOWN(umem_odp->umem.address, page_size); if (check_add_overflow(umem_odp->umem.address, (unsigned long)umem_odp->umem.length, &end)) return -EOVERFLOW; end = ALIGN(end, page_size); if (unlikely(end < page_size)) return -EOVERFLOW; ndmas = (end - start) >> umem_odp->page_shift; if (!ndmas) return -EINVAL; npfns = (end - start) >> PAGE_SHIFT; umem_odp->pfn_list = kvcalloc( npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL); if (!umem_odp->pfn_list) return -ENOMEM; umem_odp->dma_list = kvcalloc( ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL); if (!umem_odp->dma_list) { ret = -ENOMEM; goto out_pfn_list; } ret = mmu_interval_notifier_insert(&umem_odp->notifier, umem_odp->umem.owning_mm, start, end - start, ops); if (ret) goto out_dma_list; } return 0; out_dma_list: kvfree(umem_odp->dma_list); out_pfn_list: kvfree(umem_odp->pfn_list); return ret; } /** * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem * * Implicit ODP umems do not have a VA range and do not have any page lists. * They exist only to hold the per_mm reference to help the driver create * children umems. * * @device: IB device to create UMEM * @access: ib_reg_mr access flags */ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, int access) { struct ib_umem *umem; struct ib_umem_odp *umem_odp; int ret; if (access & IB_ACCESS_HUGETLB) return ERR_PTR(-EINVAL); umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL); if (!umem_odp) return ERR_PTR(-ENOMEM); umem = &umem_odp->umem; umem->ibdev = device; umem->writable = ib_access_writable(access); umem->owning_mm = current->mm; umem_odp->is_implicit_odp = 1; umem_odp->page_shift = PAGE_SHIFT; umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); ret = ib_init_umem_odp(umem_odp, NULL); if (ret) { put_pid(umem_odp->tgid); kfree(umem_odp); return ERR_PTR(ret); } return umem_odp; } EXPORT_SYMBOL(ib_umem_odp_alloc_implicit); /** * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit * parent ODP umem * * @root: The parent umem enclosing the child. This must be allocated using * ib_alloc_implicit_odp_umem() * @addr: The starting userspace VA * @size: The length of the userspace VA * @ops: MMU interval ops, currently only @invalidate */ struct ib_umem_odp * ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr, size_t size, const struct mmu_interval_notifier_ops *ops) { /* * Caller must ensure that root cannot be freed during the call to * ib_alloc_odp_umem. */ struct ib_umem_odp *odp_data; struct ib_umem *umem; int ret; if (WARN_ON(!root->is_implicit_odp)) return ERR_PTR(-EINVAL); odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL); if (!odp_data) return ERR_PTR(-ENOMEM); umem = &odp_data->umem; umem->ibdev = root->umem.ibdev; umem->length = size; umem->address = addr; umem->writable = root->umem.writable; umem->owning_mm = root->umem.owning_mm; odp_data->page_shift = PAGE_SHIFT; odp_data->notifier.ops = ops; /* * A mmget must be held when registering a notifier, the owming_mm only * has a mm_grab at this point. */ if (!mmget_not_zero(umem->owning_mm)) { ret = -EFAULT; goto out_free; } odp_data->tgid = get_pid(root->tgid); ret = ib_init_umem_odp(odp_data, ops); if (ret) goto out_tgid; mmput(umem->owning_mm); return odp_data; out_tgid: put_pid(odp_data->tgid); mmput(umem->owning_mm); out_free: kfree(odp_data); return ERR_PTR(ret); } EXPORT_SYMBOL(ib_umem_odp_alloc_child); /** * ib_umem_odp_get - Create a umem_odp for a userspace va * * @device: IB device struct to get UMEM * @addr: userspace virtual address to start at * @size: length of region to pin * @access: IB_ACCESS_xxx flags for memory being pinned * @ops: MMU interval ops, currently only @invalidate * * The driver should use when the access flags indicate ODP memory. It avoids * pinning, instead, stores the mm for future page fault handling in * conjunction with MMU notifiers. */ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size, int access, const struct mmu_interval_notifier_ops *ops) { struct ib_umem_odp *umem_odp; int ret; if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND))) return ERR_PTR(-EINVAL); umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL); if (!umem_odp) return ERR_PTR(-ENOMEM); umem_odp->umem.ibdev = device; umem_odp->umem.length = size; umem_odp->umem.address = addr; umem_odp->umem.writable = ib_access_writable(access); umem_odp->umem.owning_mm = current->mm; umem_odp->notifier.ops = ops; umem_odp->page_shift = PAGE_SHIFT; #ifdef CONFIG_HUGETLB_PAGE if (access & IB_ACCESS_HUGETLB) umem_odp->page_shift = HPAGE_SHIFT; #endif umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); ret = ib_init_umem_odp(umem_odp, ops); if (ret) goto err_put_pid; return umem_odp; err_put_pid: put_pid(umem_odp->tgid); kfree(umem_odp); return ERR_PTR(ret); } EXPORT_SYMBOL(ib_umem_odp_get); void ib_umem_odp_release(struct ib_umem_odp *umem_odp) { /* * Ensure that no more pages are mapped in the umem. * * It is the driver's responsibility to ensure, before calling us, * that the hardware will not attempt to access the MR any more. */ if (!umem_odp->is_implicit_odp) { mutex_lock(&umem_odp->umem_mutex); ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp)); mutex_unlock(&umem_odp->umem_mutex); mmu_interval_notifier_remove(&umem_odp->notifier); kvfree(umem_odp->dma_list); kvfree(umem_odp->pfn_list); } put_pid(umem_odp->tgid); kfree(umem_odp); } EXPORT_SYMBOL(ib_umem_odp_release); /* * Map for DMA and insert a single page into the on-demand paging page tables. * * @umem: the umem to insert the page to. * @dma_index: index in the umem to add the dma to. * @page: the page struct to map and add. * @access_mask: access permissions needed for this page. * * The function returns -EFAULT if the DMA mapping operation fails. * */ static int ib_umem_odp_map_dma_single_page( struct ib_umem_odp *umem_odp, unsigned int dma_index, struct page *page, u64 access_mask) { struct ib_device *dev = umem_odp->umem.ibdev; dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index]; if (*dma_addr) { /* * If the page is already dma mapped it means it went through * a non-invalidating trasition, like read-only to writable. * Resync the flags. */ *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask; return 0; } *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift, DMA_BIDIRECTIONAL); if (ib_dma_mapping_error(dev, *dma_addr)) { *dma_addr = 0; return -EFAULT; } umem_odp->npages++; *dma_addr |= access_mask; return 0; } /** * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it. * * Maps the range passed in the argument to DMA addresses. * The DMA addresses of the mapped pages is updated in umem_odp->dma_list. * Upon success the ODP MR will be locked to let caller complete its device * page table update. * * Returns the number of pages mapped in success, negative error code * for failure. * @umem_odp: the umem to map and pin * @user_virt: the address from which we need to map. * @bcnt: the minimal number of bytes to pin and map. The mapping might be * bigger due to alignment, and may also be smaller in case of an error * pinning or mapping a page. The actual pages mapped is returned in * the return value. * @access_mask: bit mask of the requested access permissions for the given * range. * @fault: is faulting required for the given range */ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt, u64 bcnt, u64 access_mask, bool fault) __acquires(&umem_odp->umem_mutex) { struct task_struct *owning_process = NULL; struct mm_struct *owning_mm = umem_odp->umem.owning_mm; int pfn_index, dma_index, ret = 0, start_idx; unsigned int page_shift, hmm_order, pfn_start_idx; unsigned long num_pfns, current_seq; struct hmm_range range = {}; unsigned long timeout; if (access_mask == 0) return -EINVAL; if (user_virt < ib_umem_start(umem_odp) || user_virt + bcnt > ib_umem_end(umem_odp)) return -EFAULT; page_shift = umem_odp->page_shift; /* * owning_process is allowed to be NULL, this means somehow the mm is * existing beyond the lifetime of the originating process.. Presumably * mmget_not_zero will fail in this case. */ owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID); if (!owning_process || !mmget_not_zero(owning_mm)) { ret = -EINVAL; goto out_put_task; } range.notifier = &umem_odp->notifier; range.start = ALIGN_DOWN(user_virt, 1UL << page_shift); range.end = ALIGN(user_virt + bcnt, 1UL << page_shift); pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT; num_pfns = (range.end - range.start) >> PAGE_SHIFT; if (fault) { range.default_flags = HMM_PFN_REQ_FAULT; if (access_mask & ODP_WRITE_ALLOWED_BIT) range.default_flags |= HMM_PFN_REQ_WRITE; } range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]); timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); retry: current_seq = range.notifier_seq = mmu_interval_read_begin(&umem_odp->notifier); mmap_read_lock(owning_mm); ret = hmm_range_fault(&range); mmap_read_unlock(owning_mm); if (unlikely(ret)) { if (ret == -EBUSY && !time_after(jiffies, timeout)) goto retry; goto out_put_mm; } start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift; dma_index = start_idx; mutex_lock(&umem_odp->umem_mutex); if (mmu_interval_read_retry(&umem_odp->notifier, current_seq)) { mutex_unlock(&umem_odp->umem_mutex); goto retry; } for (pfn_index = 0; pfn_index < num_pfns; pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) { if (fault) { /* * Since we asked for hmm_range_fault() to populate * pages it shouldn't return an error entry on success. */ WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR); WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)); } else { if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) { WARN_ON(umem_odp->dma_list[dma_index]); continue; } access_mask = ODP_READ_ALLOWED_BIT; if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE) access_mask |= ODP_WRITE_ALLOWED_BIT; } hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]); /* If a hugepage was detected and ODP wasn't set for, the umem * page_shift will be used, the opposite case is an error. */ if (hmm_order + PAGE_SHIFT < page_shift) { ret = -EINVAL; ibdev_dbg(umem_odp->umem.ibdev, "%s: un-expected hmm_order %u, page_shift %u\n", __func__, hmm_order, page_shift); break; } ret = ib_umem_odp_map_dma_single_page( umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]), access_mask); if (ret < 0) { ibdev_dbg(umem_odp->umem.ibdev, "ib_umem_odp_map_dma_single_page failed with error %d\n", ret); break; } } /* upon success lock should stay on hold for the callee */ if (!ret) ret = dma_index - start_idx; else mutex_unlock(&umem_odp->umem_mutex); out_put_mm: mmput_async(owning_mm); out_put_task: if (owning_process) put_task_struct(owning_process); return ret; } EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock); void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, u64 bound) { dma_addr_t dma_addr; dma_addr_t dma; int idx; u64 addr; struct ib_device *dev = umem_odp->umem.ibdev; lockdep_assert_held(&umem_odp->umem_mutex); virt = max_t(u64, virt, ib_umem_start(umem_odp)); bound = min_t(u64, bound, ib_umem_end(umem_odp)); for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) { idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; dma = umem_odp->dma_list[idx]; /* The access flags guaranteed a valid DMA address in case was NULL */ if (dma) { unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT; struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]); dma_addr = dma & ODP_DMA_ADDR_MASK; ib_dma_unmap_page(dev, dma_addr, BIT(umem_odp->page_shift), DMA_BIDIRECTIONAL); if (dma & ODP_WRITE_ALLOWED_BIT) { struct page *head_page = compound_head(page); /* * set_page_dirty prefers being called with * the page lock. However, MMU notifiers are * called sometimes with and sometimes without * the lock. We rely on the umem_mutex instead * to prevent other mmu notifiers from * continuing and allowing the page mapping to * be removed. */ set_page_dirty(head_page); } umem_odp->dma_list[idx] = 0; umem_odp->npages--; } } } EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
linux-master
drivers/infiniband/core/umem_odp.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/netdevice.h> #include <net/net_namespace.h> #include <linux/security.h> #include <linux/notifier.h> #include <linux/hashtable.h> #include <rdma/rdma_netlink.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> #include <rdma/rdma_counter.h> #include "core_priv.h" #include "restrack.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("core kernel InfiniBand API"); MODULE_LICENSE("Dual BSD/GPL"); struct workqueue_struct *ib_comp_wq; struct workqueue_struct *ib_comp_unbound_wq; struct workqueue_struct *ib_wq; EXPORT_SYMBOL_GPL(ib_wq); static struct workqueue_struct *ib_unreg_wq; /* * Each of the three rwsem locks (devices, clients, client_data) protects the * xarray of the same name. Specifically it allows the caller to assert that * the MARK will/will not be changing under the lock, and for devices and * clients, that the value in the xarray is still a valid pointer. Change of * the MARK is linked to the object state, so holding the lock and testing the * MARK also asserts that the contained object is in a certain state. * * This is used to build a two stage register/unregister flow where objects * can continue to be in the xarray even though they are still in progress to * register/unregister. * * The xarray itself provides additional locking, and restartable iteration, * which is also relied on. * * Locks should not be nested, with the exception of client_data, which is * allowed to nest under the read side of the other two locks. * * The devices_rwsem also protects the device name list, any change or * assignment of device name must also hold the write side to guarantee unique * names. */ /* * devices contains devices that have had their names assigned. The * devices may not be registered. Users that care about the registration * status need to call ib_device_try_get() on the device to ensure it is * registered, and keep it registered, for the required duration. * */ static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC); static DECLARE_RWSEM(devices_rwsem); #define DEVICE_REGISTERED XA_MARK_1 static u32 highest_client_id; #define CLIENT_REGISTERED XA_MARK_1 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); static DECLARE_RWSEM(clients_rwsem); static void ib_client_put(struct ib_client *client) { if (refcount_dec_and_test(&client->uses)) complete(&client->uses_zero); } /* * If client_data is registered then the corresponding client must also still * be registered. */ #define CLIENT_DATA_REGISTERED XA_MARK_1 unsigned int rdma_dev_net_id; /* * A list of net namespaces is maintained in an xarray. This is necessary * because we can't get the locking right using the existing net ns list. We * would require a init_net callback after the list is updated. */ static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC); /* * rwsem to protect accessing the rdma_nets xarray entries. */ static DECLARE_RWSEM(rdma_nets_rwsem); bool ib_devices_shared_netns = true; module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444); MODULE_PARM_DESC(netns_mode, "Share device among net namespaces; default=1 (shared)"); /** * rdma_dev_access_netns() - Return whether an rdma device can be accessed * from a specified net namespace or not. * @dev: Pointer to rdma device which needs to be checked * @net: Pointer to net namesapce for which access to be checked * * When the rdma device is in shared mode, it ignores the net namespace. * When the rdma device is exclusive to a net namespace, rdma device net * namespace is checked against the specified one. */ bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net) { return (ib_devices_shared_netns || net_eq(read_pnet(&dev->coredev.rdma_net), net)); } EXPORT_SYMBOL(rdma_dev_access_netns); /* * xarray has this behavior where it won't iterate over NULL values stored in * allocated arrays. So we need our own iterator to see all values stored in * the array. This does the same thing as xa_for_each except that it also * returns NULL valued entries if the array is allocating. Simplified to only * work on simple xarrays. */ static void *xan_find_marked(struct xarray *xa, unsigned long *indexp, xa_mark_t filter) { XA_STATE(xas, xa, *indexp); void *entry; rcu_read_lock(); do { entry = xas_find_marked(&xas, ULONG_MAX, filter); if (xa_is_zero(entry)) break; } while (xas_retry(&xas, entry)); rcu_read_unlock(); if (entry) { *indexp = xas.xa_index; if (xa_is_zero(entry)) return NULL; return entry; } return XA_ERROR(-ENOENT); } #define xan_for_each_marked(xa, index, entry, filter) \ for (index = 0, entry = xan_find_marked(xa, &(index), filter); \ !xa_is_err(entry); \ (index)++, entry = xan_find_marked(xa, &(index), filter)) /* RCU hash table mapping netdevice pointers to struct ib_port_data */ static DEFINE_SPINLOCK(ndev_hash_lock); static DECLARE_HASHTABLE(ndev_hash, 5); static void free_netdevs(struct ib_device *ib_dev); static void ib_unregister_work(struct work_struct *work); static void __ib_unregister_device(struct ib_device *device); static int ib_security_change(struct notifier_block *nb, unsigned long event, void *lsm_data); static void ib_policy_change_task(struct work_struct *work); static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); static void __ibdev_printk(const char *level, const struct ib_device *ibdev, struct va_format *vaf) { if (ibdev && ibdev->dev.parent) dev_printk_emit(level[1] - '0', ibdev->dev.parent, "%s %s %s: %pV", dev_driver_string(ibdev->dev.parent), dev_name(ibdev->dev.parent), dev_name(&ibdev->dev), vaf); else if (ibdev) printk("%s%s: %pV", level, dev_name(&ibdev->dev), vaf); else printk("%s(NULL ib_device): %pV", level, vaf); } void ibdev_printk(const char *level, const struct ib_device *ibdev, const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; __ibdev_printk(level, ibdev, &vaf); va_end(args); } EXPORT_SYMBOL(ibdev_printk); #define define_ibdev_printk_level(func, level) \ void func(const struct ib_device *ibdev, const char *fmt, ...) \ { \ struct va_format vaf; \ va_list args; \ \ va_start(args, fmt); \ \ vaf.fmt = fmt; \ vaf.va = &args; \ \ __ibdev_printk(level, ibdev, &vaf); \ \ va_end(args); \ } \ EXPORT_SYMBOL(func); define_ibdev_printk_level(ibdev_emerg, KERN_EMERG); define_ibdev_printk_level(ibdev_alert, KERN_ALERT); define_ibdev_printk_level(ibdev_crit, KERN_CRIT); define_ibdev_printk_level(ibdev_err, KERN_ERR); define_ibdev_printk_level(ibdev_warn, KERN_WARNING); define_ibdev_printk_level(ibdev_notice, KERN_NOTICE); define_ibdev_printk_level(ibdev_info, KERN_INFO); static struct notifier_block ibdev_lsm_nb = { .notifier_call = ib_security_change, }; static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, struct net *net); /* Pointer to the RCU head at the start of the ib_port_data array */ struct ib_port_data_rcu { struct rcu_head rcu_head; struct ib_port_data pdata[]; }; static void ib_device_check_mandatory(struct ib_device *device) { #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x } static const struct { size_t offset; char *name; } mandatory_table[] = { IB_MANDATORY_FUNC(query_device), IB_MANDATORY_FUNC(query_port), IB_MANDATORY_FUNC(alloc_pd), IB_MANDATORY_FUNC(dealloc_pd), IB_MANDATORY_FUNC(create_qp), IB_MANDATORY_FUNC(modify_qp), IB_MANDATORY_FUNC(destroy_qp), IB_MANDATORY_FUNC(post_send), IB_MANDATORY_FUNC(post_recv), IB_MANDATORY_FUNC(create_cq), IB_MANDATORY_FUNC(destroy_cq), IB_MANDATORY_FUNC(poll_cq), IB_MANDATORY_FUNC(req_notify_cq), IB_MANDATORY_FUNC(get_dma_mr), IB_MANDATORY_FUNC(reg_user_mr), IB_MANDATORY_FUNC(dereg_mr), IB_MANDATORY_FUNC(get_port_immutable) }; int i; device->kverbs_provider = true; for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { if (!*(void **) ((void *) &device->ops + mandatory_table[i].offset)) { device->kverbs_provider = false; break; } } } /* * Caller must perform ib_device_put() to return the device reference count * when ib_device_get_by_index() returns valid device pointer. */ struct ib_device *ib_device_get_by_index(const struct net *net, u32 index) { struct ib_device *device; down_read(&devices_rwsem); device = xa_load(&devices, index); if (device) { if (!rdma_dev_access_netns(device, net)) { device = NULL; goto out; } if (!ib_device_try_get(device)) device = NULL; } out: up_read(&devices_rwsem); return device; } /** * ib_device_put - Release IB device reference * @device: device whose reference to be released * * ib_device_put() releases reference to the IB device to allow it to be * unregistered and eventually free. */ void ib_device_put(struct ib_device *device) { if (refcount_dec_and_test(&device->refcount)) complete(&device->unreg_completion); } EXPORT_SYMBOL(ib_device_put); static struct ib_device *__ib_device_get_by_name(const char *name) { struct ib_device *device; unsigned long index; xa_for_each (&devices, index, device) if (!strcmp(name, dev_name(&device->dev))) return device; return NULL; } /** * ib_device_get_by_name - Find an IB device by name * @name: The name to look for * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) * * Find and hold an ib_device by its name. The caller must call * ib_device_put() on the returned pointer. */ struct ib_device *ib_device_get_by_name(const char *name, enum rdma_driver_id driver_id) { struct ib_device *device; down_read(&devices_rwsem); device = __ib_device_get_by_name(name); if (device && driver_id != RDMA_DRIVER_UNKNOWN && device->ops.driver_id != driver_id) device = NULL; if (device) { if (!ib_device_try_get(device)) device = NULL; } up_read(&devices_rwsem); return device; } EXPORT_SYMBOL(ib_device_get_by_name); static int rename_compat_devs(struct ib_device *device) { struct ib_core_device *cdev; unsigned long index; int ret = 0; mutex_lock(&device->compat_devs_mutex); xa_for_each (&device->compat_devs, index, cdev) { ret = device_rename(&cdev->dev, dev_name(&device->dev)); if (ret) { dev_warn(&cdev->dev, "Fail to rename compatdev to new name %s\n", dev_name(&device->dev)); break; } } mutex_unlock(&device->compat_devs_mutex); return ret; } int ib_device_rename(struct ib_device *ibdev, const char *name) { unsigned long index; void *client_data; int ret; down_write(&devices_rwsem); if (!strcmp(name, dev_name(&ibdev->dev))) { up_write(&devices_rwsem); return 0; } if (__ib_device_get_by_name(name)) { up_write(&devices_rwsem); return -EEXIST; } ret = device_rename(&ibdev->dev, name); if (ret) { up_write(&devices_rwsem); return ret; } strscpy(ibdev->name, name, IB_DEVICE_NAME_MAX); ret = rename_compat_devs(ibdev); downgrade_write(&devices_rwsem); down_read(&ibdev->client_data_rwsem); xan_for_each_marked(&ibdev->client_data, index, client_data, CLIENT_DATA_REGISTERED) { struct ib_client *client = xa_load(&clients, index); if (!client || !client->rename) continue; client->rename(ibdev, client_data); } up_read(&ibdev->client_data_rwsem); up_read(&devices_rwsem); return 0; } int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim) { if (use_dim > 1) return -EINVAL; ibdev->use_cq_dim = use_dim; return 0; } static int alloc_name(struct ib_device *ibdev, const char *name) { struct ib_device *device; unsigned long index; struct ida inuse; int rc; int i; lockdep_assert_held_write(&devices_rwsem); ida_init(&inuse); xa_for_each (&devices, index, device) { char buf[IB_DEVICE_NAME_MAX]; if (sscanf(dev_name(&device->dev), name, &i) != 1) continue; if (i < 0 || i >= INT_MAX) continue; snprintf(buf, sizeof buf, name, i); if (strcmp(buf, dev_name(&device->dev)) != 0) continue; rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL); if (rc < 0) goto out; } rc = ida_alloc(&inuse, GFP_KERNEL); if (rc < 0) goto out; rc = dev_set_name(&ibdev->dev, name, rc); out: ida_destroy(&inuse); return rc; } static void ib_device_release(struct device *device) { struct ib_device *dev = container_of(device, struct ib_device, dev); free_netdevs(dev); WARN_ON(refcount_read(&dev->refcount)); if (dev->hw_stats_data) ib_device_release_hw_stats(dev->hw_stats_data); if (dev->port_data) { ib_cache_release_one(dev); ib_security_release_port_pkey_list(dev); rdma_counter_release(dev); kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu, pdata[0]), rcu_head); } mutex_destroy(&dev->unregistration_lock); mutex_destroy(&dev->compat_devs_mutex); xa_destroy(&dev->compat_devs); xa_destroy(&dev->client_data); kfree_rcu(dev, rcu_head); } static int ib_device_uevent(const struct device *device, struct kobj_uevent_env *env) { if (add_uevent_var(env, "NAME=%s", dev_name(device))) return -ENOMEM; /* * It would be nice to pass the node GUID with the event... */ return 0; } static const void *net_namespace(const struct device *d) { const struct ib_core_device *coredev = container_of(d, struct ib_core_device, dev); return read_pnet(&coredev->rdma_net); } static struct class ib_class = { .name = "infiniband", .dev_release = ib_device_release, .dev_uevent = ib_device_uevent, .ns_type = &net_ns_type_operations, .namespace = net_namespace, }; static void rdma_init_coredev(struct ib_core_device *coredev, struct ib_device *dev, struct net *net) { /* This BUILD_BUG_ON is intended to catch layout change * of union of ib_core_device and device. * dev must be the first element as ib_core and providers * driver uses it. Adding anything in ib_core_device before * device will break this assumption. */ BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) != offsetof(struct ib_device, dev)); coredev->dev.class = &ib_class; coredev->dev.groups = dev->groups; device_initialize(&coredev->dev); coredev->owner = dev; INIT_LIST_HEAD(&coredev->port_list); write_pnet(&coredev->rdma_net, net); } /** * _ib_alloc_device - allocate an IB device struct * @size:size of structure to allocate * * Low-level drivers should use ib_alloc_device() to allocate &struct * ib_device. @size is the size of the structure to be allocated, * including any private data used by the low-level driver. * ib_dealloc_device() must be used to free structures allocated with * ib_alloc_device(). */ struct ib_device *_ib_alloc_device(size_t size) { struct ib_device *device; unsigned int i; if (WARN_ON(size < sizeof(struct ib_device))) return NULL; device = kzalloc(size, GFP_KERNEL); if (!device) return NULL; if (rdma_restrack_init(device)) { kfree(device); return NULL; } rdma_init_coredev(&device->coredev, device, &init_net); INIT_LIST_HEAD(&device->event_handler_list); spin_lock_init(&device->qp_open_list_lock); init_rwsem(&device->event_handler_rwsem); mutex_init(&device->unregistration_lock); /* * client_data needs to be alloc because we don't want our mark to be * destroyed if the user stores NULL in the client data. */ xa_init_flags(&device->client_data, XA_FLAGS_ALLOC); init_rwsem(&device->client_data_rwsem); xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC); mutex_init(&device->compat_devs_mutex); init_completion(&device->unreg_completion); INIT_WORK(&device->unregistration_work, ib_unregister_work); spin_lock_init(&device->cq_pools_lock); for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++) INIT_LIST_HEAD(&device->cq_pools[i]); rwlock_init(&device->cache_lock); device->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) | BIT_ULL(IB_USER_VERBS_CMD_CLOSE_XRCD) | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) | BIT_ULL(IB_USER_VERBS_CMD_CREATE_XSRQ) | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) | BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) | BIT_ULL(IB_USER_VERBS_CMD_OPEN_QP) | BIT_ULL(IB_USER_VERBS_CMD_OPEN_XRCD) | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) | BIT_ULL(IB_USER_VERBS_CMD_REG_MR) | BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ); return device; } EXPORT_SYMBOL(_ib_alloc_device); /** * ib_dealloc_device - free an IB device struct * @device:structure to free * * Free a structure allocated with ib_alloc_device(). */ void ib_dealloc_device(struct ib_device *device) { if (device->ops.dealloc_driver) device->ops.dealloc_driver(device); /* * ib_unregister_driver() requires all devices to remain in the xarray * while their ops are callable. The last op we call is dealloc_driver * above. This is needed to create a fence on op callbacks prior to * allowing the driver module to unload. */ down_write(&devices_rwsem); if (xa_load(&devices, device->index) == device) xa_erase(&devices, device->index); up_write(&devices_rwsem); /* Expedite releasing netdev references */ free_netdevs(device); WARN_ON(!xa_empty(&device->compat_devs)); WARN_ON(!xa_empty(&device->client_data)); WARN_ON(refcount_read(&device->refcount)); rdma_restrack_clean(device); /* Balances with device_initialize */ put_device(&device->dev); } EXPORT_SYMBOL(ib_dealloc_device); /* * add_client_context() and remove_client_context() must be safe against * parallel calls on the same device - registration/unregistration of both the * device and client can be occurring in parallel. * * The routines need to be a fence, any caller must not return until the add * or remove is fully completed. */ static int add_client_context(struct ib_device *device, struct ib_client *client) { int ret = 0; if (!device->kverbs_provider && !client->no_kverbs_req) return 0; down_write(&device->client_data_rwsem); /* * So long as the client is registered hold both the client and device * unregistration locks. */ if (!refcount_inc_not_zero(&client->uses)) goto out_unlock; refcount_inc(&device->refcount); /* * Another caller to add_client_context got here first and has already * completely initialized context. */ if (xa_get_mark(&device->client_data, client->client_id, CLIENT_DATA_REGISTERED)) goto out; ret = xa_err(xa_store(&device->client_data, client->client_id, NULL, GFP_KERNEL)); if (ret) goto out; downgrade_write(&device->client_data_rwsem); if (client->add) { if (client->add(device)) { /* * If a client fails to add then the error code is * ignored, but we won't call any more ops on this * client. */ xa_erase(&device->client_data, client->client_id); up_read(&device->client_data_rwsem); ib_device_put(device); ib_client_put(client); return 0; } } /* Readers shall not see a client until add has been completed */ xa_set_mark(&device->client_data, client->client_id, CLIENT_DATA_REGISTERED); up_read(&device->client_data_rwsem); return 0; out: ib_device_put(device); ib_client_put(client); out_unlock: up_write(&device->client_data_rwsem); return ret; } static void remove_client_context(struct ib_device *device, unsigned int client_id) { struct ib_client *client; void *client_data; down_write(&device->client_data_rwsem); if (!xa_get_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED)) { up_write(&device->client_data_rwsem); return; } client_data = xa_load(&device->client_data, client_id); xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); client = xa_load(&clients, client_id); up_write(&device->client_data_rwsem); /* * Notice we cannot be holding any exclusive locks when calling the * remove callback as the remove callback can recurse back into any * public functions in this module and thus try for any locks those * functions take. * * For this reason clients and drivers should not call the * unregistration functions will holdling any locks. */ if (client->remove) client->remove(device, client_data); xa_erase(&device->client_data, client_id); ib_device_put(device); ib_client_put(client); } static int alloc_port_data(struct ib_device *device) { struct ib_port_data_rcu *pdata_rcu; u32 port; if (device->port_data) return 0; /* This can only be called once the physical port range is defined */ if (WARN_ON(!device->phys_port_cnt)) return -EINVAL; /* Reserve U32_MAX so the logic to go over all the ports is sane */ if (WARN_ON(device->phys_port_cnt == U32_MAX)) return -EINVAL; /* * device->port_data is indexed directly by the port number to make * access to this data as efficient as possible. * * Therefore port_data is declared as a 1 based array with potential * empty slots at the beginning. */ pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata, rdma_end_port(device) + 1), GFP_KERNEL); if (!pdata_rcu) return -ENOMEM; /* * The rcu_head is put in front of the port data array and the stored * pointer is adjusted since we never need to see that member until * kfree_rcu. */ device->port_data = pdata_rcu->pdata; rdma_for_each_port (device, port) { struct ib_port_data *pdata = &device->port_data[port]; pdata->ib_dev = device; spin_lock_init(&pdata->pkey_list_lock); INIT_LIST_HEAD(&pdata->pkey_list); spin_lock_init(&pdata->netdev_lock); INIT_HLIST_NODE(&pdata->ndev_hash_link); } return 0; } static int verify_immutable(const struct ib_device *dev, u32 port) { return WARN_ON(!rdma_cap_ib_mad(dev, port) && rdma_max_mad_size(dev, port) != 0); } static int setup_port_data(struct ib_device *device) { u32 port; int ret; ret = alloc_port_data(device); if (ret) return ret; rdma_for_each_port (device, port) { struct ib_port_data *pdata = &device->port_data[port]; ret = device->ops.get_port_immutable(device, port, &pdata->immutable); if (ret) return ret; if (verify_immutable(device, port)) return -EINVAL; } return 0; } /** * ib_port_immutable_read() - Read rdma port's immutable data * @dev: IB device * @port: port number whose immutable data to read. It starts with index 1 and * valid upto including rdma_end_port(). */ const struct ib_port_immutable* ib_port_immutable_read(struct ib_device *dev, unsigned int port) { WARN_ON(!rdma_is_port_valid(dev, port)); return &dev->port_data[port].immutable; } EXPORT_SYMBOL(ib_port_immutable_read); void ib_get_device_fw_str(struct ib_device *dev, char *str) { if (dev->ops.get_dev_fw_str) dev->ops.get_dev_fw_str(dev, str); else str[0] = '\0'; } EXPORT_SYMBOL(ib_get_device_fw_str); static void ib_policy_change_task(struct work_struct *work) { struct ib_device *dev; unsigned long index; down_read(&devices_rwsem); xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { unsigned int i; rdma_for_each_port (dev, i) { u64 sp; ib_get_cached_subnet_prefix(dev, i, &sp); ib_security_cache_change(dev, i, sp); } } up_read(&devices_rwsem); } static int ib_security_change(struct notifier_block *nb, unsigned long event, void *lsm_data) { if (event != LSM_POLICY_CHANGE) return NOTIFY_DONE; schedule_work(&ib_policy_change_work); ib_mad_agent_security_change(); return NOTIFY_OK; } static void compatdev_release(struct device *dev) { struct ib_core_device *cdev = container_of(dev, struct ib_core_device, dev); kfree(cdev); } static int add_one_compat_dev(struct ib_device *device, struct rdma_dev_net *rnet) { struct ib_core_device *cdev; int ret; lockdep_assert_held(&rdma_nets_rwsem); if (!ib_devices_shared_netns) return 0; /* * Create and add compat device in all namespaces other than where it * is currently bound to. */ if (net_eq(read_pnet(&rnet->net), read_pnet(&device->coredev.rdma_net))) return 0; /* * The first of init_net() or ib_register_device() to take the * compat_devs_mutex wins and gets to add the device. Others will wait * for completion here. */ mutex_lock(&device->compat_devs_mutex); cdev = xa_load(&device->compat_devs, rnet->id); if (cdev) { ret = 0; goto done; } ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL); if (ret) goto done; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) { ret = -ENOMEM; goto cdev_err; } cdev->dev.parent = device->dev.parent; rdma_init_coredev(cdev, device, read_pnet(&rnet->net)); cdev->dev.release = compatdev_release; ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); if (ret) goto add_err; ret = device_add(&cdev->dev); if (ret) goto add_err; ret = ib_setup_port_attrs(cdev); if (ret) goto port_err; ret = xa_err(xa_store(&device->compat_devs, rnet->id, cdev, GFP_KERNEL)); if (ret) goto insert_err; mutex_unlock(&device->compat_devs_mutex); return 0; insert_err: ib_free_port_attrs(cdev); port_err: device_del(&cdev->dev); add_err: put_device(&cdev->dev); cdev_err: xa_release(&device->compat_devs, rnet->id); done: mutex_unlock(&device->compat_devs_mutex); return ret; } static void remove_one_compat_dev(struct ib_device *device, u32 id) { struct ib_core_device *cdev; mutex_lock(&device->compat_devs_mutex); cdev = xa_erase(&device->compat_devs, id); mutex_unlock(&device->compat_devs_mutex); if (cdev) { ib_free_port_attrs(cdev); device_del(&cdev->dev); put_device(&cdev->dev); } } static void remove_compat_devs(struct ib_device *device) { struct ib_core_device *cdev; unsigned long index; xa_for_each (&device->compat_devs, index, cdev) remove_one_compat_dev(device, index); } static int add_compat_devs(struct ib_device *device) { struct rdma_dev_net *rnet; unsigned long index; int ret = 0; lockdep_assert_held(&devices_rwsem); down_read(&rdma_nets_rwsem); xa_for_each (&rdma_nets, index, rnet) { ret = add_one_compat_dev(device, rnet); if (ret) break; } up_read(&rdma_nets_rwsem); return ret; } static void remove_all_compat_devs(void) { struct ib_compat_device *cdev; struct ib_device *dev; unsigned long index; down_read(&devices_rwsem); xa_for_each (&devices, index, dev) { unsigned long c_index = 0; /* Hold nets_rwsem so that any other thread modifying this * system param can sync with this thread. */ down_read(&rdma_nets_rwsem); xa_for_each (&dev->compat_devs, c_index, cdev) remove_one_compat_dev(dev, c_index); up_read(&rdma_nets_rwsem); } up_read(&devices_rwsem); } static int add_all_compat_devs(void) { struct rdma_dev_net *rnet; struct ib_device *dev; unsigned long index; int ret = 0; down_read(&devices_rwsem); xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { unsigned long net_index = 0; /* Hold nets_rwsem so that any other thread modifying this * system param can sync with this thread. */ down_read(&rdma_nets_rwsem); xa_for_each (&rdma_nets, net_index, rnet) { ret = add_one_compat_dev(dev, rnet); if (ret) break; } up_read(&rdma_nets_rwsem); } up_read(&devices_rwsem); if (ret) remove_all_compat_devs(); return ret; } int rdma_compatdev_set(u8 enable) { struct rdma_dev_net *rnet; unsigned long index; int ret = 0; down_write(&rdma_nets_rwsem); if (ib_devices_shared_netns == enable) { up_write(&rdma_nets_rwsem); return 0; } /* enable/disable of compat devices is not supported * when more than default init_net exists. */ xa_for_each (&rdma_nets, index, rnet) { ret++; break; } if (!ret) ib_devices_shared_netns = enable; up_write(&rdma_nets_rwsem); if (ret) return -EBUSY; if (enable) ret = add_all_compat_devs(); else remove_all_compat_devs(); return ret; } static void rdma_dev_exit_net(struct net *net) { struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); struct ib_device *dev; unsigned long index; int ret; down_write(&rdma_nets_rwsem); /* * Prevent the ID from being re-used and hide the id from xa_for_each. */ ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL)); WARN_ON(ret); up_write(&rdma_nets_rwsem); down_read(&devices_rwsem); xa_for_each (&devices, index, dev) { get_device(&dev->dev); /* * Release the devices_rwsem so that pontentially blocking * device_del, doesn't hold the devices_rwsem for too long. */ up_read(&devices_rwsem); remove_one_compat_dev(dev, rnet->id); /* * If the real device is in the NS then move it back to init. */ rdma_dev_change_netns(dev, net, &init_net); put_device(&dev->dev); down_read(&devices_rwsem); } up_read(&devices_rwsem); rdma_nl_net_exit(rnet); xa_erase(&rdma_nets, rnet->id); } static __net_init int rdma_dev_init_net(struct net *net) { struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); unsigned long index; struct ib_device *dev; int ret; write_pnet(&rnet->net, net); ret = rdma_nl_net_init(rnet); if (ret) return ret; /* No need to create any compat devices in default init_net. */ if (net_eq(net, &init_net)) return 0; ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); if (ret) { rdma_nl_net_exit(rnet); return ret; } down_read(&devices_rwsem); xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { /* Hold nets_rwsem so that netlink command cannot change * system configuration for device sharing mode. */ down_read(&rdma_nets_rwsem); ret = add_one_compat_dev(dev, rnet); up_read(&rdma_nets_rwsem); if (ret) break; } up_read(&devices_rwsem); if (ret) rdma_dev_exit_net(net); return ret; } /* * Assign the unique string device name and the unique device index. This is * undone by ib_dealloc_device. */ static int assign_name(struct ib_device *device, const char *name) { static u32 last_id; int ret; down_write(&devices_rwsem); /* Assign a unique name to the device */ if (strchr(name, '%')) ret = alloc_name(device, name); else ret = dev_set_name(&device->dev, name); if (ret) goto out; if (__ib_device_get_by_name(dev_name(&device->dev))) { ret = -ENFILE; goto out; } strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX); ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b, &last_id, GFP_KERNEL); if (ret > 0) ret = 0; out: up_write(&devices_rwsem); return ret; } /* * setup_device() allocates memory and sets up data that requires calling the * device ops, this is the only reason these actions are not done during * ib_alloc_device. It is undone by ib_dealloc_device(). */ static int setup_device(struct ib_device *device) { struct ib_udata uhw = {.outlen = 0, .inlen = 0}; int ret; ib_device_check_mandatory(device); ret = setup_port_data(device); if (ret) { dev_warn(&device->dev, "Couldn't create per-port data\n"); return ret; } memset(&device->attrs, 0, sizeof(device->attrs)); ret = device->ops.query_device(device, &device->attrs, &uhw); if (ret) { dev_warn(&device->dev, "Couldn't query the device attributes\n"); return ret; } return 0; } static void disable_device(struct ib_device *device) { u32 cid; WARN_ON(!refcount_read(&device->refcount)); down_write(&devices_rwsem); xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); up_write(&devices_rwsem); /* * Remove clients in LIFO order, see assign_client_id. This could be * more efficient if xarray learns to reverse iterate. Since no new * clients can be added to this ib_device past this point we only need * the maximum possible client_id value here. */ down_read(&clients_rwsem); cid = highest_client_id; up_read(&clients_rwsem); while (cid) { cid--; remove_client_context(device, cid); } ib_cq_pool_cleanup(device); /* Pairs with refcount_set in enable_device */ ib_device_put(device); wait_for_completion(&device->unreg_completion); /* * compat devices must be removed after device refcount drops to zero. * Otherwise init_net() may add more compatdevs after removing compat * devices and before device is disabled. */ remove_compat_devs(device); } /* * An enabled device is visible to all clients and to all the public facing * APIs that return a device pointer. This always returns with a new get, even * if it fails. */ static int enable_device_and_get(struct ib_device *device) { struct ib_client *client; unsigned long index; int ret = 0; /* * One ref belongs to the xa and the other belongs to this * thread. This is needed to guard against parallel unregistration. */ refcount_set(&device->refcount, 2); down_write(&devices_rwsem); xa_set_mark(&devices, device->index, DEVICE_REGISTERED); /* * By using downgrade_write() we ensure that no other thread can clear * DEVICE_REGISTERED while we are completing the client setup. */ downgrade_write(&devices_rwsem); if (device->ops.enable_driver) { ret = device->ops.enable_driver(device); if (ret) goto out; } down_read(&clients_rwsem); xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { ret = add_client_context(device, client); if (ret) break; } up_read(&clients_rwsem); if (!ret) ret = add_compat_devs(device); out: up_read(&devices_rwsem); return ret; } static void prevent_dealloc_device(struct ib_device *ib_dev) { } /** * ib_register_device - Register an IB device with IB core * @device: Device to register * @name: unique string device name. This may include a '%' which will * cause a unique index to be added to the passed device name. * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB * device will be used. In this case the caller should fully * setup the ibdev for DMA. This usually means using dma_virt_ops. * * Low-level drivers use ib_register_device() to register their * devices with the IB core. All registered clients will receive a * callback for each device that is added. @device must be allocated * with ib_alloc_device(). * * If the driver uses ops.dealloc_driver and calls any ib_unregister_device() * asynchronously then the device pointer may become freed as soon as this * function returns. */ int ib_register_device(struct ib_device *device, const char *name, struct device *dma_device) { int ret; ret = assign_name(device, name); if (ret) return ret; /* * If the caller does not provide a DMA capable device then the IB core * will set up ib_sge and scatterlist structures that stash the kernel * virtual address into the address field. */ WARN_ON(dma_device && !dma_device->dma_parms); device->dma_device = dma_device; ret = setup_device(device); if (ret) return ret; ret = ib_cache_setup_one(device); if (ret) { dev_warn(&device->dev, "Couldn't set up InfiniBand P_Key/GID cache\n"); return ret; } device->groups[0] = &ib_dev_attr_group; device->groups[1] = device->ops.device_group; ret = ib_setup_device_attrs(device); if (ret) goto cache_cleanup; ib_device_register_rdmacg(device); rdma_counter_init(device); /* * Ensure that ADD uevent is not fired because it * is too early amd device is not initialized yet. */ dev_set_uevent_suppress(&device->dev, true); ret = device_add(&device->dev); if (ret) goto cg_cleanup; ret = ib_setup_port_attrs(&device->coredev); if (ret) { dev_warn(&device->dev, "Couldn't register device with driver model\n"); goto dev_cleanup; } ret = enable_device_and_get(device); if (ret) { void (*dealloc_fn)(struct ib_device *); /* * If we hit this error flow then we don't want to * automatically dealloc the device since the caller is * expected to call ib_dealloc_device() after * ib_register_device() fails. This is tricky due to the * possibility for a parallel unregistration along with this * error flow. Since we have a refcount here we know any * parallel flow is stopped in disable_device and will see the * special dealloc_driver pointer, causing the responsibility to * ib_dealloc_device() to revert back to this thread. */ dealloc_fn = device->ops.dealloc_driver; device->ops.dealloc_driver = prevent_dealloc_device; ib_device_put(device); __ib_unregister_device(device); device->ops.dealloc_driver = dealloc_fn; dev_set_uevent_suppress(&device->dev, false); return ret; } dev_set_uevent_suppress(&device->dev, false); /* Mark for userspace that device is ready */ kobject_uevent(&device->dev.kobj, KOBJ_ADD); ib_device_put(device); return 0; dev_cleanup: device_del(&device->dev); cg_cleanup: dev_set_uevent_suppress(&device->dev, false); ib_device_unregister_rdmacg(device); cache_cleanup: ib_cache_cleanup_one(device); return ret; } EXPORT_SYMBOL(ib_register_device); /* Callers must hold a get on the device. */ static void __ib_unregister_device(struct ib_device *ib_dev) { /* * We have a registration lock so that all the calls to unregister are * fully fenced, once any unregister returns the device is truely * unregistered even if multiple callers are unregistering it at the * same time. This also interacts with the registration flow and * provides sane semantics if register and unregister are racing. */ mutex_lock(&ib_dev->unregistration_lock); if (!refcount_read(&ib_dev->refcount)) goto out; disable_device(ib_dev); /* Expedite removing unregistered pointers from the hash table */ free_netdevs(ib_dev); ib_free_port_attrs(&ib_dev->coredev); device_del(&ib_dev->dev); ib_device_unregister_rdmacg(ib_dev); ib_cache_cleanup_one(ib_dev); /* * Drivers using the new flow may not call ib_dealloc_device except * in error unwind prior to registration success. */ if (ib_dev->ops.dealloc_driver && ib_dev->ops.dealloc_driver != prevent_dealloc_device) { WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1); ib_dealloc_device(ib_dev); } out: mutex_unlock(&ib_dev->unregistration_lock); } /** * ib_unregister_device - Unregister an IB device * @ib_dev: The device to unregister * * Unregister an IB device. All clients will receive a remove callback. * * Callers should call this routine only once, and protect against races with * registration. Typically it should only be called as part of a remove * callback in an implementation of driver core's struct device_driver and * related. * * If ops.dealloc_driver is used then ib_dev will be freed upon return from * this function. */ void ib_unregister_device(struct ib_device *ib_dev) { get_device(&ib_dev->dev); __ib_unregister_device(ib_dev); put_device(&ib_dev->dev); } EXPORT_SYMBOL(ib_unregister_device); /** * ib_unregister_device_and_put - Unregister a device while holding a 'get' * @ib_dev: The device to unregister * * This is the same as ib_unregister_device(), except it includes an internal * ib_device_put() that should match a 'get' obtained by the caller. * * It is safe to call this routine concurrently from multiple threads while * holding the 'get'. When the function returns the device is fully * unregistered. * * Drivers using this flow MUST use the driver_unregister callback to clean up * their resources associated with the device and dealloc it. */ void ib_unregister_device_and_put(struct ib_device *ib_dev) { WARN_ON(!ib_dev->ops.dealloc_driver); get_device(&ib_dev->dev); ib_device_put(ib_dev); __ib_unregister_device(ib_dev); put_device(&ib_dev->dev); } EXPORT_SYMBOL(ib_unregister_device_and_put); /** * ib_unregister_driver - Unregister all IB devices for a driver * @driver_id: The driver to unregister * * This implements a fence for device unregistration. It only returns once all * devices associated with the driver_id have fully completed their * unregistration and returned from ib_unregister_device*(). * * If device's are not yet unregistered it goes ahead and starts unregistering * them. * * This does not block creation of new devices with the given driver_id, that * is the responsibility of the caller. */ void ib_unregister_driver(enum rdma_driver_id driver_id) { struct ib_device *ib_dev; unsigned long index; down_read(&devices_rwsem); xa_for_each (&devices, index, ib_dev) { if (ib_dev->ops.driver_id != driver_id) continue; get_device(&ib_dev->dev); up_read(&devices_rwsem); WARN_ON(!ib_dev->ops.dealloc_driver); __ib_unregister_device(ib_dev); put_device(&ib_dev->dev); down_read(&devices_rwsem); } up_read(&devices_rwsem); } EXPORT_SYMBOL(ib_unregister_driver); static void ib_unregister_work(struct work_struct *work) { struct ib_device *ib_dev = container_of(work, struct ib_device, unregistration_work); __ib_unregister_device(ib_dev); put_device(&ib_dev->dev); } /** * ib_unregister_device_queued - Unregister a device using a work queue * @ib_dev: The device to unregister * * This schedules an asynchronous unregistration using a WQ for the device. A * driver should use this to avoid holding locks while doing unregistration, * such as holding the RTNL lock. * * Drivers using this API must use ib_unregister_driver before module unload * to ensure that all scheduled unregistrations have completed. */ void ib_unregister_device_queued(struct ib_device *ib_dev) { WARN_ON(!refcount_read(&ib_dev->refcount)); WARN_ON(!ib_dev->ops.dealloc_driver); get_device(&ib_dev->dev); if (!queue_work(ib_unreg_wq, &ib_dev->unregistration_work)) put_device(&ib_dev->dev); } EXPORT_SYMBOL(ib_unregister_device_queued); /* * The caller must pass in a device that has the kref held and the refcount * released. If the device is in cur_net and still registered then it is moved * into net. */ static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, struct net *net) { int ret2 = -EINVAL; int ret; mutex_lock(&device->unregistration_lock); /* * If a device not under ib_device_get() or if the unregistration_lock * is not held, the namespace can be changed, or it can be unregistered. * Check again under the lock. */ if (refcount_read(&device->refcount) == 0 || !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) { ret = -ENODEV; goto out; } kobject_uevent(&device->dev.kobj, KOBJ_REMOVE); disable_device(device); /* * At this point no one can be using the device, so it is safe to * change the namespace. */ write_pnet(&device->coredev.rdma_net, net); down_read(&devices_rwsem); /* * Currently rdma devices are system wide unique. So the device name * is guaranteed free in the new namespace. Publish the new namespace * at the sysfs level. */ ret = device_rename(&device->dev, dev_name(&device->dev)); up_read(&devices_rwsem); if (ret) { dev_warn(&device->dev, "%s: Couldn't rename device after namespace change\n", __func__); /* Try and put things back and re-enable the device */ write_pnet(&device->coredev.rdma_net, cur_net); } ret2 = enable_device_and_get(device); if (ret2) { /* * This shouldn't really happen, but if it does, let the user * retry at later point. So don't disable the device. */ dev_warn(&device->dev, "%s: Couldn't re-enable device after namespace change\n", __func__); } kobject_uevent(&device->dev.kobj, KOBJ_ADD); ib_device_put(device); out: mutex_unlock(&device->unregistration_lock); if (ret) return ret; return ret2; } int ib_device_set_netns_put(struct sk_buff *skb, struct ib_device *dev, u32 ns_fd) { struct net *net; int ret; net = get_net_ns_by_fd(ns_fd); if (IS_ERR(net)) { ret = PTR_ERR(net); goto net_err; } if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { ret = -EPERM; goto ns_err; } /* * All the ib_clients, including uverbs, are reset when the namespace is * changed and this cannot be blocked waiting for userspace to do * something, so disassociation is mandatory. */ if (!dev->ops.disassociate_ucontext || ib_devices_shared_netns) { ret = -EOPNOTSUPP; goto ns_err; } get_device(&dev->dev); ib_device_put(dev); ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net); put_device(&dev->dev); put_net(net); return ret; ns_err: put_net(net); net_err: ib_device_put(dev); return ret; } static struct pernet_operations rdma_dev_net_ops = { .init = rdma_dev_init_net, .exit = rdma_dev_exit_net, .id = &rdma_dev_net_id, .size = sizeof(struct rdma_dev_net), }; static int assign_client_id(struct ib_client *client) { int ret; down_write(&clients_rwsem); /* * The add/remove callbacks must be called in FIFO/LIFO order. To * achieve this we assign client_ids so they are sorted in * registration order. */ client->client_id = highest_client_id; ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); if (ret) goto out; highest_client_id++; xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); out: up_write(&clients_rwsem); return ret; } static void remove_client_id(struct ib_client *client) { down_write(&clients_rwsem); xa_erase(&clients, client->client_id); for (; highest_client_id; highest_client_id--) if (xa_load(&clients, highest_client_id - 1)) break; up_write(&clients_rwsem); } /** * ib_register_client - Register an IB client * @client:Client to register * * Upper level users of the IB drivers can use ib_register_client() to * register callbacks for IB device addition and removal. When an IB * device is added, each registered client's add method will be called * (in the order the clients were registered), and when a device is * removed, each client's remove method will be called (in the reverse * order that clients were registered). In addition, when * ib_register_client() is called, the client will receive an add * callback for all devices already registered. */ int ib_register_client(struct ib_client *client) { struct ib_device *device; unsigned long index; int ret; refcount_set(&client->uses, 1); init_completion(&client->uses_zero); ret = assign_client_id(client); if (ret) return ret; down_read(&devices_rwsem); xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) { ret = add_client_context(device, client); if (ret) { up_read(&devices_rwsem); ib_unregister_client(client); return ret; } } up_read(&devices_rwsem); return 0; } EXPORT_SYMBOL(ib_register_client); /** * ib_unregister_client - Unregister an IB client * @client:Client to unregister * * Upper level users use ib_unregister_client() to remove their client * registration. When ib_unregister_client() is called, the client * will receive a remove callback for each IB device still registered. * * This is a full fence, once it returns no client callbacks will be called, * or are running in another thread. */ void ib_unregister_client(struct ib_client *client) { struct ib_device *device; unsigned long index; down_write(&clients_rwsem); ib_client_put(client); xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); up_write(&clients_rwsem); /* We do not want to have locks while calling client->remove() */ rcu_read_lock(); xa_for_each (&devices, index, device) { if (!ib_device_try_get(device)) continue; rcu_read_unlock(); remove_client_context(device, client->client_id); ib_device_put(device); rcu_read_lock(); } rcu_read_unlock(); /* * remove_client_context() is not a fence, it can return even though a * removal is ongoing. Wait until all removals are completed. */ wait_for_completion(&client->uses_zero); remove_client_id(client); } EXPORT_SYMBOL(ib_unregister_client); static int __ib_get_global_client_nl_info(const char *client_name, struct ib_client_nl_info *res) { struct ib_client *client; unsigned long index; int ret = -ENOENT; down_read(&clients_rwsem); xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { if (strcmp(client->name, client_name) != 0) continue; if (!client->get_global_nl_info) { ret = -EOPNOTSUPP; break; } ret = client->get_global_nl_info(res); if (WARN_ON(ret == -ENOENT)) ret = -EINVAL; if (!ret && res->cdev) get_device(res->cdev); break; } up_read(&clients_rwsem); return ret; } static int __ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name, struct ib_client_nl_info *res) { unsigned long index; void *client_data; int ret = -ENOENT; down_read(&ibdev->client_data_rwsem); xan_for_each_marked (&ibdev->client_data, index, client_data, CLIENT_DATA_REGISTERED) { struct ib_client *client = xa_load(&clients, index); if (!client || strcmp(client->name, client_name) != 0) continue; if (!client->get_nl_info) { ret = -EOPNOTSUPP; break; } ret = client->get_nl_info(ibdev, client_data, res); if (WARN_ON(ret == -ENOENT)) ret = -EINVAL; /* * The cdev is guaranteed valid as long as we are inside the * client_data_rwsem as remove_one can't be called. Keep it * valid for the caller. */ if (!ret && res->cdev) get_device(res->cdev); break; } up_read(&ibdev->client_data_rwsem); return ret; } /** * ib_get_client_nl_info - Fetch the nl_info from a client * @ibdev: IB device * @client_name: Name of the client * @res: Result of the query */ int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name, struct ib_client_nl_info *res) { int ret; if (ibdev) ret = __ib_get_client_nl_info(ibdev, client_name, res); else ret = __ib_get_global_client_nl_info(client_name, res); #ifdef CONFIG_MODULES if (ret == -ENOENT) { request_module("rdma-client-%s", client_name); if (ibdev) ret = __ib_get_client_nl_info(ibdev, client_name, res); else ret = __ib_get_global_client_nl_info(client_name, res); } #endif if (ret) { if (ret == -ENOENT) return -EOPNOTSUPP; return ret; } if (WARN_ON(!res->cdev)) return -EINVAL; return 0; } /** * ib_set_client_data - Set IB client context * @device:Device to set context for * @client:Client to set context for * @data:Context to set * * ib_set_client_data() sets client context data that can be retrieved with * ib_get_client_data(). This can only be called while the client is * registered to the device, once the ib_client remove() callback returns this * cannot be called. */ void ib_set_client_data(struct ib_device *device, struct ib_client *client, void *data) { void *rc; if (WARN_ON(IS_ERR(data))) data = NULL; rc = xa_store(&device->client_data, client->client_id, data, GFP_KERNEL); WARN_ON(xa_is_err(rc)); } EXPORT_SYMBOL(ib_set_client_data); /** * ib_register_event_handler - Register an IB event handler * @event_handler:Handler to register * * ib_register_event_handler() registers an event handler that will be * called back when asynchronous IB events occur (as defined in * chapter 11 of the InfiniBand Architecture Specification). This * callback occurs in workqueue context. */ void ib_register_event_handler(struct ib_event_handler *event_handler) { down_write(&event_handler->device->event_handler_rwsem); list_add_tail(&event_handler->list, &event_handler->device->event_handler_list); up_write(&event_handler->device->event_handler_rwsem); } EXPORT_SYMBOL(ib_register_event_handler); /** * ib_unregister_event_handler - Unregister an event handler * @event_handler:Handler to unregister * * Unregister an event handler registered with * ib_register_event_handler(). */ void ib_unregister_event_handler(struct ib_event_handler *event_handler) { down_write(&event_handler->device->event_handler_rwsem); list_del(&event_handler->list); up_write(&event_handler->device->event_handler_rwsem); } EXPORT_SYMBOL(ib_unregister_event_handler); void ib_dispatch_event_clients(struct ib_event *event) { struct ib_event_handler *handler; down_read(&event->device->event_handler_rwsem); list_for_each_entry(handler, &event->device->event_handler_list, list) handler->handler(handler, event); up_read(&event->device->event_handler_rwsem); } static int iw_query_port(struct ib_device *device, u32 port_num, struct ib_port_attr *port_attr) { struct in_device *inetdev; struct net_device *netdev; memset(port_attr, 0, sizeof(*port_attr)); netdev = ib_device_get_netdev(device, port_num); if (!netdev) return -ENODEV; port_attr->max_mtu = IB_MTU_4096; port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu); if (!netif_carrier_ok(netdev)) { port_attr->state = IB_PORT_DOWN; port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; } else { rcu_read_lock(); inetdev = __in_dev_get_rcu(netdev); if (inetdev && inetdev->ifa_list) { port_attr->state = IB_PORT_ACTIVE; port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } else { port_attr->state = IB_PORT_INIT; port_attr->phys_state = IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING; } rcu_read_unlock(); } dev_put(netdev); return device->ops.query_port(device, port_num, port_attr); } static int __ib_query_port(struct ib_device *device, u32 port_num, struct ib_port_attr *port_attr) { int err; memset(port_attr, 0, sizeof(*port_attr)); err = device->ops.query_port(device, port_num, port_attr); if (err || port_attr->subnet_prefix) return err; if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) return 0; ib_get_cached_subnet_prefix(device, port_num, &port_attr->subnet_prefix); return 0; } /** * ib_query_port - Query IB port attributes * @device:Device to query * @port_num:Port number to query * @port_attr:Port attributes * * ib_query_port() returns the attributes of a port through the * @port_attr pointer. */ int ib_query_port(struct ib_device *device, u32 port_num, struct ib_port_attr *port_attr) { if (!rdma_is_port_valid(device, port_num)) return -EINVAL; if (rdma_protocol_iwarp(device, port_num)) return iw_query_port(device, port_num, port_attr); else return __ib_query_port(device, port_num, port_attr); } EXPORT_SYMBOL(ib_query_port); static void add_ndev_hash(struct ib_port_data *pdata) { unsigned long flags; might_sleep(); spin_lock_irqsave(&ndev_hash_lock, flags); if (hash_hashed(&pdata->ndev_hash_link)) { hash_del_rcu(&pdata->ndev_hash_link); spin_unlock_irqrestore(&ndev_hash_lock, flags); /* * We cannot do hash_add_rcu after a hash_del_rcu until the * grace period */ synchronize_rcu(); spin_lock_irqsave(&ndev_hash_lock, flags); } if (pdata->netdev) hash_add_rcu(ndev_hash, &pdata->ndev_hash_link, (uintptr_t)pdata->netdev); spin_unlock_irqrestore(&ndev_hash_lock, flags); } /** * ib_device_set_netdev - Associate the ib_dev with an underlying net_device * @ib_dev: Device to modify * @ndev: net_device to affiliate, may be NULL * @port: IB port the net_device is connected to * * Drivers should use this to link the ib_device to a netdev so the netdev * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be * affiliated with any port. * * The caller must ensure that the given ndev is not unregistered or * unregistering, and that either the ib_device is unregistered or * ib_device_set_netdev() is called with NULL when the ndev sends a * NETDEV_UNREGISTER event. */ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, u32 port) { struct net_device *old_ndev; struct ib_port_data *pdata; unsigned long flags; int ret; /* * Drivers wish to call this before ib_register_driver, so we have to * setup the port data early. */ ret = alloc_port_data(ib_dev); if (ret) return ret; if (!rdma_is_port_valid(ib_dev, port)) return -EINVAL; pdata = &ib_dev->port_data[port]; spin_lock_irqsave(&pdata->netdev_lock, flags); old_ndev = rcu_dereference_protected( pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); if (old_ndev == ndev) { spin_unlock_irqrestore(&pdata->netdev_lock, flags); return 0; } if (old_ndev) netdev_tracker_free(ndev, &pdata->netdev_tracker); if (ndev) netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC); rcu_assign_pointer(pdata->netdev, ndev); spin_unlock_irqrestore(&pdata->netdev_lock, flags); add_ndev_hash(pdata); if (old_ndev) __dev_put(old_ndev); return 0; } EXPORT_SYMBOL(ib_device_set_netdev); static void free_netdevs(struct ib_device *ib_dev) { unsigned long flags; u32 port; if (!ib_dev->port_data) return; rdma_for_each_port (ib_dev, port) { struct ib_port_data *pdata = &ib_dev->port_data[port]; struct net_device *ndev; spin_lock_irqsave(&pdata->netdev_lock, flags); ndev = rcu_dereference_protected( pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); if (ndev) { spin_lock(&ndev_hash_lock); hash_del_rcu(&pdata->ndev_hash_link); spin_unlock(&ndev_hash_lock); /* * If this is the last dev_put there is still a * synchronize_rcu before the netdev is kfreed, so we * can continue to rely on unlocked pointer * comparisons after the put */ rcu_assign_pointer(pdata->netdev, NULL); netdev_put(ndev, &pdata->netdev_tracker); } spin_unlock_irqrestore(&pdata->netdev_lock, flags); } } struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, u32 port) { struct ib_port_data *pdata; struct net_device *res; if (!rdma_is_port_valid(ib_dev, port)) return NULL; pdata = &ib_dev->port_data[port]; /* * New drivers should use ib_device_set_netdev() not the legacy * get_netdev(). */ if (ib_dev->ops.get_netdev) res = ib_dev->ops.get_netdev(ib_dev, port); else { spin_lock(&pdata->netdev_lock); res = rcu_dereference_protected( pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); if (res) dev_hold(res); spin_unlock(&pdata->netdev_lock); } /* * If we are starting to unregister expedite things by preventing * propagation of an unregistering netdev. */ if (res && res->reg_state != NETREG_REGISTERED) { dev_put(res); return NULL; } return res; } /** * ib_device_get_by_netdev - Find an IB device associated with a netdev * @ndev: netdev to locate * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) * * Find and hold an ib_device that is associated with a netdev via * ib_device_set_netdev(). The caller must call ib_device_put() on the * returned pointer. */ struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, enum rdma_driver_id driver_id) { struct ib_device *res = NULL; struct ib_port_data *cur; rcu_read_lock(); hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link, (uintptr_t)ndev) { if (rcu_access_pointer(cur->netdev) == ndev && (driver_id == RDMA_DRIVER_UNKNOWN || cur->ib_dev->ops.driver_id == driver_id) && ib_device_try_get(cur->ib_dev)) { res = cur->ib_dev; break; } } rcu_read_unlock(); return res; } EXPORT_SYMBOL(ib_device_get_by_netdev); /** * ib_enum_roce_netdev - enumerate all RoCE ports * @ib_dev : IB device we want to query * @filter: Should we call the callback? * @filter_cookie: Cookie passed to filter * @cb: Callback to call for each found RoCE ports * @cookie: Cookie passed back to the callback * * Enumerates all of the physical RoCE ports of ib_dev * which are related to netdevice and calls callback() on each * device for which filter() function returns non zero. */ void ib_enum_roce_netdev(struct ib_device *ib_dev, roce_netdev_filter filter, void *filter_cookie, roce_netdev_callback cb, void *cookie) { u32 port; rdma_for_each_port (ib_dev, port) if (rdma_protocol_roce(ib_dev, port)) { struct net_device *idev = ib_device_get_netdev(ib_dev, port); if (filter(ib_dev, port, idev, filter_cookie)) cb(ib_dev, port, idev, cookie); if (idev) dev_put(idev); } } /** * ib_enum_all_roce_netdevs - enumerate all RoCE devices * @filter: Should we call the callback? * @filter_cookie: Cookie passed to filter * @cb: Callback to call for each found RoCE ports * @cookie: Cookie passed back to the callback * * Enumerates all RoCE devices' physical ports which are related * to netdevices and calls callback() on each device for which * filter() function returns non zero. */ void ib_enum_all_roce_netdevs(roce_netdev_filter filter, void *filter_cookie, roce_netdev_callback cb, void *cookie) { struct ib_device *dev; unsigned long index; down_read(&devices_rwsem); xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); up_read(&devices_rwsem); } /* * ib_enum_all_devs - enumerate all ib_devices * @cb: Callback to call for each found ib_device * * Enumerates all ib_devices and calls callback() on each device. */ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, struct netlink_callback *cb) { unsigned long index; struct ib_device *dev; unsigned int idx = 0; int ret = 0; down_read(&devices_rwsem); xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { if (!rdma_dev_access_netns(dev, sock_net(skb->sk))) continue; ret = nldev_cb(dev, skb, cb, idx); if (ret) break; idx++; } up_read(&devices_rwsem); return ret; } /** * ib_query_pkey - Get P_Key table entry * @device:Device to query * @port_num:Port number to query * @index:P_Key table index to query * @pkey:Returned P_Key * * ib_query_pkey() fetches the specified P_Key table entry. */ int ib_query_pkey(struct ib_device *device, u32 port_num, u16 index, u16 *pkey) { if (!rdma_is_port_valid(device, port_num)) return -EINVAL; if (!device->ops.query_pkey) return -EOPNOTSUPP; return device->ops.query_pkey(device, port_num, index, pkey); } EXPORT_SYMBOL(ib_query_pkey); /** * ib_modify_device - Change IB device attributes * @device:Device to modify * @device_modify_mask:Mask of attributes to change * @device_modify:New attribute values * * ib_modify_device() changes a device's attributes as specified by * the @device_modify_mask and @device_modify structure. */ int ib_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) { if (!device->ops.modify_device) return -EOPNOTSUPP; return device->ops.modify_device(device, device_modify_mask, device_modify); } EXPORT_SYMBOL(ib_modify_device); /** * ib_modify_port - Modifies the attributes for the specified port. * @device: The device to modify. * @port_num: The number of the port to modify. * @port_modify_mask: Mask used to specify which attributes of the port * to change. * @port_modify: New attribute values for the port. * * ib_modify_port() changes a port's attributes as specified by the * @port_modify_mask and @port_modify structure. */ int ib_modify_port(struct ib_device *device, u32 port_num, int port_modify_mask, struct ib_port_modify *port_modify) { int rc; if (!rdma_is_port_valid(device, port_num)) return -EINVAL; if (device->ops.modify_port) rc = device->ops.modify_port(device, port_num, port_modify_mask, port_modify); else if (rdma_protocol_roce(device, port_num) && ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 || (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0)) rc = 0; else rc = -EOPNOTSUPP; return rc; } EXPORT_SYMBOL(ib_modify_port); /** * ib_find_gid - Returns the port number and GID table index where * a specified GID value occurs. Its searches only for IB link layer. * @device: The device to query. * @gid: The GID value to search for. * @port_num: The port number of the device where the GID value was found. * @index: The index into the GID table where the GID was found. This * parameter may be NULL. */ int ib_find_gid(struct ib_device *device, union ib_gid *gid, u32 *port_num, u16 *index) { union ib_gid tmp_gid; u32 port; int ret, i; rdma_for_each_port (device, port) { if (!rdma_protocol_ib(device, port)) continue; for (i = 0; i < device->port_data[port].immutable.gid_tbl_len; ++i) { ret = rdma_query_gid(device, port, i, &tmp_gid); if (ret) continue; if (!memcmp(&tmp_gid, gid, sizeof *gid)) { *port_num = port; if (index) *index = i; return 0; } } } return -ENOENT; } EXPORT_SYMBOL(ib_find_gid); /** * ib_find_pkey - Returns the PKey table index where a specified * PKey value occurs. * @device: The device to query. * @port_num: The port number of the device to search for the PKey. * @pkey: The PKey value to search for. * @index: The index into the PKey table where the PKey was found. */ int ib_find_pkey(struct ib_device *device, u32 port_num, u16 pkey, u16 *index) { int ret, i; u16 tmp_pkey; int partial_ix = -1; for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len; ++i) { ret = ib_query_pkey(device, port_num, i, &tmp_pkey); if (ret) return ret; if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { /* if there is full-member pkey take it.*/ if (tmp_pkey & 0x8000) { *index = i; return 0; } if (partial_ix < 0) partial_ix = i; } } /*no full-member, if exists take the limited*/ if (partial_ix >= 0) { *index = partial_ix; return 0; } return -ENOENT; } EXPORT_SYMBOL(ib_find_pkey); /** * ib_get_net_dev_by_params() - Return the appropriate net_dev * for a received CM request * @dev: An RDMA device on which the request has been received. * @port: Port number on the RDMA device. * @pkey: The Pkey the request came on. * @gid: A GID that the net_dev uses to communicate. * @addr: Contains the IP address that the request specified as its * destination. * */ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr) { struct net_device *net_dev = NULL; unsigned long index; void *client_data; if (!rdma_protocol_ib(dev, port)) return NULL; /* * Holding the read side guarantees that the client will not become * unregistered while we are calling get_net_dev_by_params() */ down_read(&dev->client_data_rwsem); xan_for_each_marked (&dev->client_data, index, client_data, CLIENT_DATA_REGISTERED) { struct ib_client *client = xa_load(&clients, index); if (!client || !client->get_net_dev_by_params) continue; net_dev = client->get_net_dev_by_params(dev, port, pkey, gid, addr, client_data); if (net_dev) break; } up_read(&dev->client_data_rwsem); return net_dev; } EXPORT_SYMBOL(ib_get_net_dev_by_params); void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) { struct ib_device_ops *dev_ops = &dev->ops; #define SET_DEVICE_OP(ptr, name) \ do { \ if (ops->name) \ if (!((ptr)->name)) \ (ptr)->name = ops->name; \ } while (0) #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name) if (ops->driver_id != RDMA_DRIVER_UNKNOWN) { WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN && dev_ops->driver_id != ops->driver_id); dev_ops->driver_id = ops->driver_id; } if (ops->owner) { WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner); dev_ops->owner = ops->owner; } if (ops->uverbs_abi_ver) dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver; dev_ops->uverbs_no_driver_id_binding |= ops->uverbs_no_driver_id_binding; SET_DEVICE_OP(dev_ops, add_gid); SET_DEVICE_OP(dev_ops, advise_mr); SET_DEVICE_OP(dev_ops, alloc_dm); SET_DEVICE_OP(dev_ops, alloc_hw_device_stats); SET_DEVICE_OP(dev_ops, alloc_hw_port_stats); SET_DEVICE_OP(dev_ops, alloc_mr); SET_DEVICE_OP(dev_ops, alloc_mr_integrity); SET_DEVICE_OP(dev_ops, alloc_mw); SET_DEVICE_OP(dev_ops, alloc_pd); SET_DEVICE_OP(dev_ops, alloc_rdma_netdev); SET_DEVICE_OP(dev_ops, alloc_ucontext); SET_DEVICE_OP(dev_ops, alloc_xrcd); SET_DEVICE_OP(dev_ops, attach_mcast); SET_DEVICE_OP(dev_ops, check_mr_status); SET_DEVICE_OP(dev_ops, counter_alloc_stats); SET_DEVICE_OP(dev_ops, counter_bind_qp); SET_DEVICE_OP(dev_ops, counter_dealloc); SET_DEVICE_OP(dev_ops, counter_unbind_qp); SET_DEVICE_OP(dev_ops, counter_update_stats); SET_DEVICE_OP(dev_ops, create_ah); SET_DEVICE_OP(dev_ops, create_counters); SET_DEVICE_OP(dev_ops, create_cq); SET_DEVICE_OP(dev_ops, create_flow); SET_DEVICE_OP(dev_ops, create_qp); SET_DEVICE_OP(dev_ops, create_rwq_ind_table); SET_DEVICE_OP(dev_ops, create_srq); SET_DEVICE_OP(dev_ops, create_user_ah); SET_DEVICE_OP(dev_ops, create_wq); SET_DEVICE_OP(dev_ops, dealloc_dm); SET_DEVICE_OP(dev_ops, dealloc_driver); SET_DEVICE_OP(dev_ops, dealloc_mw); SET_DEVICE_OP(dev_ops, dealloc_pd); SET_DEVICE_OP(dev_ops, dealloc_ucontext); SET_DEVICE_OP(dev_ops, dealloc_xrcd); SET_DEVICE_OP(dev_ops, del_gid); SET_DEVICE_OP(dev_ops, dereg_mr); SET_DEVICE_OP(dev_ops, destroy_ah); SET_DEVICE_OP(dev_ops, destroy_counters); SET_DEVICE_OP(dev_ops, destroy_cq); SET_DEVICE_OP(dev_ops, destroy_flow); SET_DEVICE_OP(dev_ops, destroy_flow_action); SET_DEVICE_OP(dev_ops, destroy_qp); SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table); SET_DEVICE_OP(dev_ops, destroy_srq); SET_DEVICE_OP(dev_ops, destroy_wq); SET_DEVICE_OP(dev_ops, device_group); SET_DEVICE_OP(dev_ops, detach_mcast); SET_DEVICE_OP(dev_ops, disassociate_ucontext); SET_DEVICE_OP(dev_ops, drain_rq); SET_DEVICE_OP(dev_ops, drain_sq); SET_DEVICE_OP(dev_ops, enable_driver); SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry); SET_DEVICE_OP(dev_ops, fill_res_cq_entry); SET_DEVICE_OP(dev_ops, fill_res_cq_entry_raw); SET_DEVICE_OP(dev_ops, fill_res_mr_entry); SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw); SET_DEVICE_OP(dev_ops, fill_res_qp_entry); SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw); SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); SET_DEVICE_OP(dev_ops, get_dev_fw_str); SET_DEVICE_OP(dev_ops, get_dma_mr); SET_DEVICE_OP(dev_ops, get_hw_stats); SET_DEVICE_OP(dev_ops, get_link_layer); SET_DEVICE_OP(dev_ops, get_netdev); SET_DEVICE_OP(dev_ops, get_numa_node); SET_DEVICE_OP(dev_ops, get_port_immutable); SET_DEVICE_OP(dev_ops, get_vector_affinity); SET_DEVICE_OP(dev_ops, get_vf_config); SET_DEVICE_OP(dev_ops, get_vf_guid); SET_DEVICE_OP(dev_ops, get_vf_stats); SET_DEVICE_OP(dev_ops, iw_accept); SET_DEVICE_OP(dev_ops, iw_add_ref); SET_DEVICE_OP(dev_ops, iw_connect); SET_DEVICE_OP(dev_ops, iw_create_listen); SET_DEVICE_OP(dev_ops, iw_destroy_listen); SET_DEVICE_OP(dev_ops, iw_get_qp); SET_DEVICE_OP(dev_ops, iw_reject); SET_DEVICE_OP(dev_ops, iw_rem_ref); SET_DEVICE_OP(dev_ops, map_mr_sg); SET_DEVICE_OP(dev_ops, map_mr_sg_pi); SET_DEVICE_OP(dev_ops, mmap); SET_DEVICE_OP(dev_ops, mmap_free); SET_DEVICE_OP(dev_ops, modify_ah); SET_DEVICE_OP(dev_ops, modify_cq); SET_DEVICE_OP(dev_ops, modify_device); SET_DEVICE_OP(dev_ops, modify_hw_stat); SET_DEVICE_OP(dev_ops, modify_port); SET_DEVICE_OP(dev_ops, modify_qp); SET_DEVICE_OP(dev_ops, modify_srq); SET_DEVICE_OP(dev_ops, modify_wq); SET_DEVICE_OP(dev_ops, peek_cq); SET_DEVICE_OP(dev_ops, poll_cq); SET_DEVICE_OP(dev_ops, port_groups); SET_DEVICE_OP(dev_ops, post_recv); SET_DEVICE_OP(dev_ops, post_send); SET_DEVICE_OP(dev_ops, post_srq_recv); SET_DEVICE_OP(dev_ops, process_mad); SET_DEVICE_OP(dev_ops, query_ah); SET_DEVICE_OP(dev_ops, query_device); SET_DEVICE_OP(dev_ops, query_gid); SET_DEVICE_OP(dev_ops, query_pkey); SET_DEVICE_OP(dev_ops, query_port); SET_DEVICE_OP(dev_ops, query_qp); SET_DEVICE_OP(dev_ops, query_srq); SET_DEVICE_OP(dev_ops, query_ucontext); SET_DEVICE_OP(dev_ops, rdma_netdev_get_params); SET_DEVICE_OP(dev_ops, read_counters); SET_DEVICE_OP(dev_ops, reg_dm_mr); SET_DEVICE_OP(dev_ops, reg_user_mr); SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf); SET_DEVICE_OP(dev_ops, req_notify_cq); SET_DEVICE_OP(dev_ops, rereg_user_mr); SET_DEVICE_OP(dev_ops, resize_cq); SET_DEVICE_OP(dev_ops, set_vf_guid); SET_DEVICE_OP(dev_ops, set_vf_link_state); SET_OBJ_SIZE(dev_ops, ib_ah); SET_OBJ_SIZE(dev_ops, ib_counters); SET_OBJ_SIZE(dev_ops, ib_cq); SET_OBJ_SIZE(dev_ops, ib_mw); SET_OBJ_SIZE(dev_ops, ib_pd); SET_OBJ_SIZE(dev_ops, ib_qp); SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table); SET_OBJ_SIZE(dev_ops, ib_srq); SET_OBJ_SIZE(dev_ops, ib_ucontext); SET_OBJ_SIZE(dev_ops, ib_xrcd); } EXPORT_SYMBOL(ib_set_device_ops); #ifdef CONFIG_INFINIBAND_VIRT_DMA int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { sg_dma_address(s) = (uintptr_t)sg_virt(s); sg_dma_len(s) = s->length; } return nents; } EXPORT_SYMBOL(ib_dma_virt_map_sg); #endif /* CONFIG_INFINIBAND_VIRT_DMA */ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { [RDMA_NL_LS_OP_RESOLVE] = { .doit = ib_nl_handle_resolve_resp, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NL_LS_OP_SET_TIMEOUT] = { .doit = ib_nl_handle_set_timeout, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NL_LS_OP_IP_RESOLVE] = { .doit = ib_nl_handle_ip_res_resp, .flags = RDMA_NL_ADMIN_PERM, }, }; static int __init ib_core_init(void) { int ret = -ENOMEM; ib_wq = alloc_workqueue("infiniband", 0, 0); if (!ib_wq) return -ENOMEM; ib_unreg_wq = alloc_workqueue("ib-unreg-wq", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); if (!ib_unreg_wq) goto err; ib_comp_wq = alloc_workqueue("ib-comp-wq", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); if (!ib_comp_wq) goto err_unbound; ib_comp_unbound_wq = alloc_workqueue("ib-comp-unb-wq", WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE); if (!ib_comp_unbound_wq) goto err_comp; ret = class_register(&ib_class); if (ret) { pr_warn("Couldn't create InfiniBand device class\n"); goto err_comp_unbound; } rdma_nl_init(); ret = addr_init(); if (ret) { pr_warn("Couldn't init IB address resolution\n"); goto err_ibnl; } ret = ib_mad_init(); if (ret) { pr_warn("Couldn't init IB MAD\n"); goto err_addr; } ret = ib_sa_init(); if (ret) { pr_warn("Couldn't init SA\n"); goto err_mad; } ret = register_blocking_lsm_notifier(&ibdev_lsm_nb); if (ret) { pr_warn("Couldn't register LSM notifier. ret %d\n", ret); goto err_sa; } ret = register_pernet_device(&rdma_dev_net_ops); if (ret) { pr_warn("Couldn't init compat dev. ret %d\n", ret); goto err_compat; } nldev_init(); rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); ret = roce_gid_mgmt_init(); if (ret) { pr_warn("Couldn't init RoCE GID management\n"); goto err_parent; } return 0; err_parent: rdma_nl_unregister(RDMA_NL_LS); nldev_exit(); unregister_pernet_device(&rdma_dev_net_ops); err_compat: unregister_blocking_lsm_notifier(&ibdev_lsm_nb); err_sa: ib_sa_cleanup(); err_mad: ib_mad_cleanup(); err_addr: addr_cleanup(); err_ibnl: class_unregister(&ib_class); err_comp_unbound: destroy_workqueue(ib_comp_unbound_wq); err_comp: destroy_workqueue(ib_comp_wq); err_unbound: destroy_workqueue(ib_unreg_wq); err: destroy_workqueue(ib_wq); return ret; } static void __exit ib_core_cleanup(void) { roce_gid_mgmt_cleanup(); rdma_nl_unregister(RDMA_NL_LS); nldev_exit(); unregister_pernet_device(&rdma_dev_net_ops); unregister_blocking_lsm_notifier(&ibdev_lsm_nb); ib_sa_cleanup(); ib_mad_cleanup(); addr_cleanup(); rdma_nl_exit(); class_unregister(&ib_class); destroy_workqueue(ib_comp_unbound_wq); destroy_workqueue(ib_comp_wq); /* Make sure that any pending umem accounting work is done. */ destroy_workqueue(ib_wq); destroy_workqueue(ib_unreg_wq); WARN_ON(!xa_empty(&clients)); WARN_ON(!xa_empty(&devices)); } MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); /* ib core relies on netdev stack to first register net_ns_type_operations * ns kobject type before ib_core initialization. */ fs_initcall(ib_core_init); module_exit(ib_core_cleanup);
linux-master
drivers/infiniband/core/device.c
/* * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/uverbs_std_types.h> #include "rdma_core.h" #include "uverbs.h" #include "restrack.h" static int uverbs_free_cq(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct ib_cq *cq = uobject->object; struct ib_uverbs_event_queue *ev_queue = cq->cq_context; struct ib_ucq_object *ucq = container_of(uobject, struct ib_ucq_object, uevent.uobject); int ret; ret = ib_destroy_cq_user(cq, &attrs->driver_udata); if (ret) return ret; ib_uverbs_release_ucq( ev_queue ? container_of(ev_queue, struct ib_uverbs_completion_event_file, ev_queue) : NULL, ucq); return 0; } static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)( struct uverbs_attr_bundle *attrs) { struct ib_ucq_object *obj = container_of( uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE), typeof(*obj), uevent.uobject); struct ib_device *ib_dev = attrs->context->device; int ret; u64 user_handle; struct ib_cq_init_attr attr = {}; struct ib_cq *cq; struct ib_uverbs_completion_event_file *ev_file = NULL; struct ib_uobject *ev_file_uobj; if (!ib_dev->ops.create_cq || !ib_dev->ops.destroy_cq) return -EOPNOTSUPP; ret = uverbs_copy_from(&attr.comp_vector, attrs, UVERBS_ATTR_CREATE_CQ_COMP_VECTOR); if (!ret) ret = uverbs_copy_from(&attr.cqe, attrs, UVERBS_ATTR_CREATE_CQ_CQE); if (!ret) ret = uverbs_copy_from(&user_handle, attrs, UVERBS_ATTR_CREATE_CQ_USER_HANDLE); if (ret) return ret; ret = uverbs_get_flags32(&attr.flags, attrs, UVERBS_ATTR_CREATE_CQ_FLAGS, IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION | IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN); if (ret) return ret; ev_file_uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL); if (!IS_ERR(ev_file_uobj)) { ev_file = container_of(ev_file_uobj, struct ib_uverbs_completion_event_file, uobj); uverbs_uobject_get(ev_file_uobj); } obj->uevent.event_file = ib_uverbs_get_async_event( attrs, UVERBS_ATTR_CREATE_CQ_EVENT_FD); if (attr.comp_vector >= attrs->ufile->device->num_comp_vectors) { ret = -EINVAL; goto err_event_file; } INIT_LIST_HEAD(&obj->comp_list); INIT_LIST_HEAD(&obj->uevent.event_list); cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); if (!cq) { ret = -ENOMEM; goto err_event_file; } cq->device = ib_dev; cq->uobject = obj; cq->comp_handler = ib_uverbs_comp_handler; cq->event_handler = ib_uverbs_cq_event_handler; cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; atomic_set(&cq->usecnt, 0); rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); rdma_restrack_set_name(&cq->res, NULL); ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata); if (ret) goto err_free; obj->uevent.uobject.object = cq; obj->uevent.uobject.user_handle = user_handle; rdma_restrack_add(&cq->res); uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE); ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe, sizeof(cq->cqe)); return ret; err_free: rdma_restrack_put(&cq->res); kfree(cq); err_event_file: if (obj->uevent.event_file) uverbs_uobject_put(&obj->uevent.event_file->uobj); if (ev_file) uverbs_uobject_put(ev_file_uobj); return ret; }; DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_CQ_CREATE, UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_NEW, UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_CQE, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_USER_HANDLE, UVERBS_ATTR_TYPE(u64), UA_MANDATORY), UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL, UVERBS_OBJECT_COMP_CHANNEL, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_COMP_VECTOR, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_CQ_FLAGS, enum ib_uverbs_ex_create_cq_flags), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_EVENT_FD, UVERBS_OBJECT_ASYNC_EVENT, UVERBS_ACCESS_READ, UA_OPTIONAL), UVERBS_ATTR_UHW()); static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)( struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE); struct ib_ucq_object *obj = container_of(uobj, struct ib_ucq_object, uevent.uobject); struct ib_uverbs_destroy_cq_resp resp = { .comp_events_reported = obj->comp_events_reported, .async_events_reported = obj->uevent.events_reported }; return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_CQ_RESP, &resp, sizeof(resp)); } DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_CQ_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_DESTROY, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_CQ_RESP, UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_cq_resp), UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_CQ, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), uverbs_free_cq), &UVERBS_METHOD(UVERBS_METHOD_CQ_CREATE), &UVERBS_METHOD(UVERBS_METHOD_CQ_DESTROY) ); const struct uapi_definition uverbs_def_obj_cq[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_CQ, UAPI_DEF_OBJ_NEEDS_FN(destroy_cq)), {} };
linux-master
drivers/infiniband/core/uverbs_std_types_cq.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015 HGST, a Western Digital Company. */ #include <linux/err.h> #include <linux/slab.h> #include <rdma/ib_verbs.h> #include "core_priv.h" #include <trace/events/rdma_core.h> /* Max size for shared CQ, may require tuning */ #define IB_MAX_SHARED_CQ_SZ 4096U /* # of WCs to poll for with a single call to ib_poll_cq */ #define IB_POLL_BATCH 16 #define IB_POLL_BATCH_DIRECT 8 /* # of WCs to iterate over before yielding */ #define IB_POLL_BUDGET_IRQ 256 #define IB_POLL_BUDGET_WORKQUEUE 65536 #define IB_POLL_FLAGS \ (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) static const struct dim_cq_moder rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] = { {1, 0, 1, 0}, {1, 0, 4, 0}, {2, 0, 4, 0}, {2, 0, 8, 0}, {4, 0, 8, 0}, {16, 0, 8, 0}, {16, 0, 16, 0}, {32, 0, 16, 0}, {32, 0, 32, 0}, }; static void ib_cq_rdma_dim_work(struct work_struct *w) { struct dim *dim = container_of(w, struct dim, work); struct ib_cq *cq = dim->priv; u16 usec = rdma_dim_prof[dim->profile_ix].usec; u16 comps = rdma_dim_prof[dim->profile_ix].comps; dim->state = DIM_START_MEASURE; trace_cq_modify(cq, comps, usec); cq->device->ops.modify_cq(cq, comps, usec); } static void rdma_dim_init(struct ib_cq *cq) { struct dim *dim; if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim || cq->poll_ctx == IB_POLL_DIRECT) return; dim = kzalloc(sizeof(struct dim), GFP_KERNEL); if (!dim) return; dim->state = DIM_START_MEASURE; dim->tune_state = DIM_GOING_RIGHT; dim->profile_ix = RDMA_DIM_START_PROFILE; dim->priv = cq; cq->dim = dim; INIT_WORK(&dim->work, ib_cq_rdma_dim_work); } static void rdma_dim_destroy(struct ib_cq *cq) { if (!cq->dim) return; cancel_work_sync(&cq->dim->work); kfree(cq->dim); } static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) { int rc; rc = ib_poll_cq(cq, num_entries, wc); trace_cq_poll(cq, num_entries, rc); return rc; } static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, int batch) { int i, n, completed = 0; trace_cq_process(cq); /* * budget might be (-1) if the caller does not * want to bound this call, thus we need unsigned * minimum here. */ while ((n = __poll_cq(cq, min_t(u32, batch, budget - completed), wcs)) > 0) { for (i = 0; i < n; i++) { struct ib_wc *wc = &wcs[i]; if (wc->wr_cqe) wc->wr_cqe->done(cq, wc); else WARN_ON_ONCE(wc->status == IB_WC_SUCCESS); } completed += n; if (n != batch || (budget != -1 && completed >= budget)) break; } return completed; } /** * ib_process_cq_direct - process a CQ in caller context * @cq: CQ to process * @budget: number of CQEs to poll for * * This function is used to process all outstanding CQ entries. * It does not offload CQ processing to a different context and does * not ask for completion interrupts from the HCA. * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger * concurrent processing. * * Note: do not pass -1 as %budget unless it is guaranteed that the number * of completions that will be processed is small. */ int ib_process_cq_direct(struct ib_cq *cq, int budget) { struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); } EXPORT_SYMBOL(ib_process_cq_direct); static void ib_cq_completion_direct(struct ib_cq *cq, void *private) { WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); } static int ib_poll_handler(struct irq_poll *iop, int budget) { struct ib_cq *cq = container_of(iop, struct ib_cq, iop); struct dim *dim = cq->dim; int completed; completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); if (completed < budget) { irq_poll_complete(&cq->iop); if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) { trace_cq_reschedule(cq); irq_poll_sched(&cq->iop); } } if (dim) rdma_dim(dim, completed); return completed; } static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) { trace_cq_schedule(cq); irq_poll_sched(&cq->iop); } static void ib_cq_poll_work(struct work_struct *work) { struct ib_cq *cq = container_of(work, struct ib_cq, work); int completed; completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, IB_POLL_BATCH); if (completed >= IB_POLL_BUDGET_WORKQUEUE || ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) queue_work(cq->comp_wq, &cq->work); else if (cq->dim) rdma_dim(cq->dim, completed); } static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) { trace_cq_schedule(cq); queue_work(cq->comp_wq, &cq->work); } /** * __ib_alloc_cq - allocate a completion queue * @dev: device to allocate the CQ for * @private: driver private data, accessible from cq->cq_context * @nr_cqe: number of CQEs to allocate * @comp_vector: HCA completion vectors for this CQ * @poll_ctx: context to poll the CQ from. * @caller: module owner name. * * This is the proper interface to allocate a CQ for in-kernel users. A * CQ allocated with this interface will automatically be polled from the * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id * to use this CQ abstraction. */ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx, const char *caller) { struct ib_cq_init_attr cq_attr = { .cqe = nr_cqe, .comp_vector = comp_vector, }; struct ib_cq *cq; int ret = -ENOMEM; cq = rdma_zalloc_drv_obj(dev, ib_cq); if (!cq) return ERR_PTR(ret); cq->device = dev; cq->cq_context = private; cq->poll_ctx = poll_ctx; atomic_set(&cq->usecnt, 0); cq->comp_vector = comp_vector; cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); if (!cq->wc) goto out_free_cq; rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); rdma_restrack_set_name(&cq->res, caller); ret = dev->ops.create_cq(cq, &cq_attr, NULL); if (ret) goto out_free_wc; rdma_dim_init(cq); switch (cq->poll_ctx) { case IB_POLL_DIRECT: cq->comp_handler = ib_cq_completion_direct; break; case IB_POLL_SOFTIRQ: cq->comp_handler = ib_cq_completion_softirq; irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); break; case IB_POLL_WORKQUEUE: case IB_POLL_UNBOUND_WORKQUEUE: cq->comp_handler = ib_cq_completion_workqueue; INIT_WORK(&cq->work, ib_cq_poll_work); ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? ib_comp_wq : ib_comp_unbound_wq; break; default: ret = -EINVAL; goto out_destroy_cq; } rdma_restrack_add(&cq->res); trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx); return cq; out_destroy_cq: rdma_dim_destroy(cq); cq->device->ops.destroy_cq(cq, NULL); out_free_wc: rdma_restrack_put(&cq->res); kfree(cq->wc); out_free_cq: kfree(cq); trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret); return ERR_PTR(ret); } EXPORT_SYMBOL(__ib_alloc_cq); /** * __ib_alloc_cq_any - allocate a completion queue * @dev: device to allocate the CQ for * @private: driver private data, accessible from cq->cq_context * @nr_cqe: number of CQEs to allocate * @poll_ctx: context to poll the CQ from * @caller: module owner name * * Attempt to spread ULP Completion Queues over each device's interrupt * vectors. A simple best-effort mechanism is used. */ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, int nr_cqe, enum ib_poll_context poll_ctx, const char *caller) { static atomic_t counter; int comp_vector = 0; if (dev->num_comp_vectors > 1) comp_vector = atomic_inc_return(&counter) % min_t(int, dev->num_comp_vectors, num_online_cpus()); return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, caller); } EXPORT_SYMBOL(__ib_alloc_cq_any); /** * ib_free_cq - free a completion queue * @cq: completion queue to free. */ void ib_free_cq(struct ib_cq *cq) { int ret; if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) return; if (WARN_ON_ONCE(cq->cqe_used)) return; switch (cq->poll_ctx) { case IB_POLL_DIRECT: break; case IB_POLL_SOFTIRQ: irq_poll_disable(&cq->iop); break; case IB_POLL_WORKQUEUE: case IB_POLL_UNBOUND_WORKQUEUE: cancel_work_sync(&cq->work); break; default: WARN_ON_ONCE(1); } rdma_dim_destroy(cq); trace_cq_free(cq); ret = cq->device->ops.destroy_cq(cq, NULL); WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail"); rdma_restrack_del(&cq->res); kfree(cq->wc); kfree(cq); } EXPORT_SYMBOL(ib_free_cq); void ib_cq_pool_cleanup(struct ib_device *dev) { struct ib_cq *cq, *n; unsigned int i; for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) { list_for_each_entry_safe(cq, n, &dev->cq_pools[i], pool_entry) { WARN_ON(cq->cqe_used); list_del(&cq->pool_entry); cq->shared = false; ib_free_cq(cq); } } } static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes, enum ib_poll_context poll_ctx) { LIST_HEAD(tmp_list); unsigned int nr_cqs, i; struct ib_cq *cq, *n; int ret; if (poll_ctx > IB_POLL_LAST_POOL_TYPE) { WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE); return -EINVAL; } /* * Allocate at least as many CQEs as requested, and otherwise * a reasonable batch size so that we can share CQs between * multiple users instead of allocating a larger number of CQs. */ nr_cqes = min_t(unsigned int, dev->attrs.max_cqe, max(nr_cqes, IB_MAX_SHARED_CQ_SZ)); nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus()); for (i = 0; i < nr_cqs; i++) { cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx); if (IS_ERR(cq)) { ret = PTR_ERR(cq); goto out_free_cqs; } cq->shared = true; list_add_tail(&cq->pool_entry, &tmp_list); } spin_lock_irq(&dev->cq_pools_lock); list_splice(&tmp_list, &dev->cq_pools[poll_ctx]); spin_unlock_irq(&dev->cq_pools_lock); return 0; out_free_cqs: list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) { cq->shared = false; ib_free_cq(cq); } return ret; } /** * ib_cq_pool_get() - Find the least used completion queue that matches * a given cpu hint (or least used for wild card affinity) and fits * nr_cqe. * @dev: rdma device * @nr_cqe: number of needed cqe entries * @comp_vector_hint: completion vector hint (-1) for the driver to assign * a comp vector based on internal counter * @poll_ctx: cq polling context * * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and * claim entries in it for us. In case there is no available cq, allocate * a new cq with the requirements and add it to the device pool. * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value * for @poll_ctx. */ struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe, int comp_vector_hint, enum ib_poll_context poll_ctx) { static unsigned int default_comp_vector; unsigned int vector, num_comp_vectors; struct ib_cq *cq, *found = NULL; int ret; if (poll_ctx > IB_POLL_LAST_POOL_TYPE) { WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE); return ERR_PTR(-EINVAL); } num_comp_vectors = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus()); /* Project the affinty to the device completion vector range */ if (comp_vector_hint < 0) { comp_vector_hint = (READ_ONCE(default_comp_vector) + 1) % num_comp_vectors; WRITE_ONCE(default_comp_vector, comp_vector_hint); } vector = comp_vector_hint % num_comp_vectors; /* * Find the least used CQ with correct affinity and * enough free CQ entries */ while (!found) { spin_lock_irq(&dev->cq_pools_lock); list_for_each_entry(cq, &dev->cq_pools[poll_ctx], pool_entry) { /* * Check to see if we have found a CQ with the * correct completion vector */ if (vector != cq->comp_vector) continue; if (cq->cqe_used + nr_cqe > cq->cqe) continue; found = cq; break; } if (found) { found->cqe_used += nr_cqe; spin_unlock_irq(&dev->cq_pools_lock); return found; } spin_unlock_irq(&dev->cq_pools_lock); /* * Didn't find a match or ran out of CQs in the device * pool, allocate a new array of CQs. */ ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx); if (ret) return ERR_PTR(ret); } return found; } EXPORT_SYMBOL(ib_cq_pool_get); /** * ib_cq_pool_put - Return a CQ taken from a shared pool. * @cq: The CQ to return. * @nr_cqe: The max number of cqes that the user had requested. */ void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe) { if (WARN_ON_ONCE(nr_cqe > cq->cqe_used)) return; spin_lock_irq(&cq->device->cq_pools_lock); cq->cqe_used -= nr_cqe; spin_unlock_irq(&cq->device->cq_pools_lock); } EXPORT_SYMBOL(ib_cq_pool_put);
linux-master
drivers/infiniband/core/cq.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/if_vlan.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/netdevice.h> #include <net/addrconf.h> #include <rdma/ib_cache.h> #include "core_priv.h" struct ib_pkey_cache { int table_len; u16 table[]; }; struct ib_update_work { struct work_struct work; struct ib_event event; bool enforce_security; }; union ib_gid zgid; EXPORT_SYMBOL(zgid); enum gid_attr_find_mask { GID_ATTR_FIND_MASK_GID = 1UL << 0, GID_ATTR_FIND_MASK_NETDEV = 1UL << 1, GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2, GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3, }; enum gid_table_entry_state { GID_TABLE_ENTRY_INVALID = 1, GID_TABLE_ENTRY_VALID = 2, /* * Indicates that entry is pending to be removed, there may * be active users of this GID entry. * When last user of the GID entry releases reference to it, * GID entry is detached from the table. */ GID_TABLE_ENTRY_PENDING_DEL = 3, }; struct roce_gid_ndev_storage { struct rcu_head rcu_head; struct net_device *ndev; }; struct ib_gid_table_entry { struct kref kref; struct work_struct del_work; struct ib_gid_attr attr; void *context; /* Store the ndev pointer to release reference later on in * call_rcu context because by that time gid_table_entry * and attr might be already freed. So keep a copy of it. * ndev_storage is freed by rcu callback. */ struct roce_gid_ndev_storage *ndev_storage; enum gid_table_entry_state state; }; struct ib_gid_table { int sz; /* In RoCE, adding a GID to the table requires: * (a) Find if this GID is already exists. * (b) Find a free space. * (c) Write the new GID * * Delete requires different set of operations: * (a) Find the GID * (b) Delete it. * **/ /* Any writer to data_vec must hold this lock and the write side of * rwlock. Readers must hold only rwlock. All writers must be in a * sleepable context. */ struct mutex lock; /* rwlock protects data_vec[ix]->state and entry pointer. */ rwlock_t rwlock; struct ib_gid_table_entry **data_vec; /* bit field, each bit indicates the index of default GID */ u32 default_gid_indices; }; static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port) { struct ib_event event; event.device = ib_dev; event.element.port_num = port; event.event = IB_EVENT_GID_CHANGE; ib_dispatch_event_clients(&event); } static const char * const gid_type_str[] = { /* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for * user space compatibility reasons. */ [IB_GID_TYPE_IB] = "IB/RoCE v1", [IB_GID_TYPE_ROCE] = "IB/RoCE v1", [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2", }; const char *ib_cache_gid_type_str(enum ib_gid_type gid_type) { if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type]) return gid_type_str[gid_type]; return "Invalid GID type"; } EXPORT_SYMBOL(ib_cache_gid_type_str); /** rdma_is_zero_gid - Check if given GID is zero or not. * @gid: GID to check * Returns true if given GID is zero, returns false otherwise. */ bool rdma_is_zero_gid(const union ib_gid *gid) { return !memcmp(gid, &zgid, sizeof(*gid)); } EXPORT_SYMBOL(rdma_is_zero_gid); /** is_gid_index_default - Check if a given index belongs to * reserved default GIDs or not. * @table: GID table pointer * @index: Index to check in GID table * Returns true if index is one of the reserved default GID index otherwise * returns false. */ static bool is_gid_index_default(const struct ib_gid_table *table, unsigned int index) { return index < 32 && (BIT(index) & table->default_gid_indices); } int ib_cache_gid_parse_type_str(const char *buf) { unsigned int i; size_t len; int err = -EINVAL; len = strlen(buf); if (len == 0) return -EINVAL; if (buf[len - 1] == '\n') len--; for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i) if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) && len == strlen(gid_type_str[i])) { err = i; break; } return err; } EXPORT_SYMBOL(ib_cache_gid_parse_type_str); static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port) { return device->port_data[port].cache.gid; } static bool is_gid_entry_free(const struct ib_gid_table_entry *entry) { return !entry; } static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry) { return entry && entry->state == GID_TABLE_ENTRY_VALID; } static void schedule_free_gid(struct kref *kref) { struct ib_gid_table_entry *entry = container_of(kref, struct ib_gid_table_entry, kref); queue_work(ib_wq, &entry->del_work); } static void put_gid_ndev(struct rcu_head *head) { struct roce_gid_ndev_storage *storage = container_of(head, struct roce_gid_ndev_storage, rcu_head); WARN_ON(!storage->ndev); /* At this point its safe to release netdev reference, * as all callers working on gid_attr->ndev are done * using this netdev. */ dev_put(storage->ndev); kfree(storage); } static void free_gid_entry_locked(struct ib_gid_table_entry *entry) { struct ib_device *device = entry->attr.device; u32 port_num = entry->attr.port_num; struct ib_gid_table *table = rdma_gid_table(device, port_num); dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__, port_num, entry->attr.index, entry->attr.gid.raw); write_lock_irq(&table->rwlock); /* * The only way to avoid overwriting NULL in table is * by comparing if it is same entry in table or not! * If new entry in table is added by the time we free here, * don't overwrite the table entry. */ if (entry == table->data_vec[entry->attr.index]) table->data_vec[entry->attr.index] = NULL; /* Now this index is ready to be allocated */ write_unlock_irq(&table->rwlock); if (entry->ndev_storage) call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev); kfree(entry); } static void free_gid_entry(struct kref *kref) { struct ib_gid_table_entry *entry = container_of(kref, struct ib_gid_table_entry, kref); free_gid_entry_locked(entry); } /** * free_gid_work - Release reference to the GID entry * @work: Work structure to refer to GID entry which needs to be * deleted. * * free_gid_work() frees the entry from the HCA's hardware table * if provider supports it. It releases reference to netdevice. */ static void free_gid_work(struct work_struct *work) { struct ib_gid_table_entry *entry = container_of(work, struct ib_gid_table_entry, del_work); struct ib_device *device = entry->attr.device; u32 port_num = entry->attr.port_num; struct ib_gid_table *table = rdma_gid_table(device, port_num); mutex_lock(&table->lock); free_gid_entry_locked(entry); mutex_unlock(&table->lock); } static struct ib_gid_table_entry * alloc_gid_entry(const struct ib_gid_attr *attr) { struct ib_gid_table_entry *entry; struct net_device *ndev; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return NULL; ndev = rcu_dereference_protected(attr->ndev, 1); if (ndev) { entry->ndev_storage = kzalloc(sizeof(*entry->ndev_storage), GFP_KERNEL); if (!entry->ndev_storage) { kfree(entry); return NULL; } dev_hold(ndev); entry->ndev_storage->ndev = ndev; } kref_init(&entry->kref); memcpy(&entry->attr, attr, sizeof(*attr)); INIT_WORK(&entry->del_work, free_gid_work); entry->state = GID_TABLE_ENTRY_INVALID; return entry; } static void store_gid_entry(struct ib_gid_table *table, struct ib_gid_table_entry *entry) { entry->state = GID_TABLE_ENTRY_VALID; dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n", __func__, entry->attr.port_num, entry->attr.index, entry->attr.gid.raw); lockdep_assert_held(&table->lock); write_lock_irq(&table->rwlock); table->data_vec[entry->attr.index] = entry; write_unlock_irq(&table->rwlock); } static void get_gid_entry(struct ib_gid_table_entry *entry) { kref_get(&entry->kref); } static void put_gid_entry(struct ib_gid_table_entry *entry) { kref_put(&entry->kref, schedule_free_gid); } static void put_gid_entry_locked(struct ib_gid_table_entry *entry) { kref_put(&entry->kref, free_gid_entry); } static int add_roce_gid(struct ib_gid_table_entry *entry) { const struct ib_gid_attr *attr = &entry->attr; int ret; if (!attr->ndev) { dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n", __func__, attr->port_num, attr->index); return -EINVAL; } if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) { ret = attr->device->ops.add_gid(attr, &entry->context); if (ret) { dev_err(&attr->device->dev, "%s GID add failed port=%u index=%u\n", __func__, attr->port_num, attr->index); return ret; } } return 0; } /** * del_gid - Delete GID table entry * * @ib_dev: IB device whose GID entry to be deleted * @port: Port number of the IB device * @table: GID table of the IB device for a port * @ix: GID entry index to delete * */ static void del_gid(struct ib_device *ib_dev, u32 port, struct ib_gid_table *table, int ix) { struct roce_gid_ndev_storage *ndev_storage; struct ib_gid_table_entry *entry; lockdep_assert_held(&table->lock); dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port, ix, table->data_vec[ix]->attr.gid.raw); write_lock_irq(&table->rwlock); entry = table->data_vec[ix]; entry->state = GID_TABLE_ENTRY_PENDING_DEL; /* * For non RoCE protocol, GID entry slot is ready to use. */ if (!rdma_protocol_roce(ib_dev, port)) table->data_vec[ix] = NULL; write_unlock_irq(&table->rwlock); if (rdma_cap_roce_gid_table(ib_dev, port)) ib_dev->ops.del_gid(&entry->attr, &entry->context); ndev_storage = entry->ndev_storage; if (ndev_storage) { entry->ndev_storage = NULL; rcu_assign_pointer(entry->attr.ndev, NULL); call_rcu(&ndev_storage->rcu_head, put_gid_ndev); } put_gid_entry_locked(entry); } /** * add_modify_gid - Add or modify GID table entry * * @table: GID table in which GID to be added or modified * @attr: Attributes of the GID * * Returns 0 on success or appropriate error code. It accepts zero * GID addition for non RoCE ports for HCA's who report them as valid * GID. However such zero GIDs are not added to the cache. */ static int add_modify_gid(struct ib_gid_table *table, const struct ib_gid_attr *attr) { struct ib_gid_table_entry *entry; int ret = 0; /* * Invalidate any old entry in the table to make it safe to write to * this index. */ if (is_gid_entry_valid(table->data_vec[attr->index])) del_gid(attr->device, attr->port_num, table, attr->index); /* * Some HCA's report multiple GID entries with only one valid GID, and * leave other unused entries as the zero GID. Convert zero GIDs to * empty table entries instead of storing them. */ if (rdma_is_zero_gid(&attr->gid)) return 0; entry = alloc_gid_entry(attr); if (!entry) return -ENOMEM; if (rdma_protocol_roce(attr->device, attr->port_num)) { ret = add_roce_gid(entry); if (ret) goto done; } store_gid_entry(table, entry); return 0; done: put_gid_entry(entry); return ret; } /* rwlock should be read locked, or lock should be held */ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, const struct ib_gid_attr *val, bool default_gid, unsigned long mask, int *pempty) { int i = 0; int found = -1; int empty = pempty ? -1 : 0; while (i < table->sz && (found < 0 || empty < 0)) { struct ib_gid_table_entry *data = table->data_vec[i]; struct ib_gid_attr *attr; int curr_index = i; i++; /* find_gid() is used during GID addition where it is expected * to return a free entry slot which is not duplicate. * Free entry slot is requested and returned if pempty is set, * so lookup free slot only if requested. */ if (pempty && empty < 0) { if (is_gid_entry_free(data) && default_gid == is_gid_index_default(table, curr_index)) { /* * Found an invalid (free) entry; allocate it. * If default GID is requested, then our * found slot must be one of the DEFAULT * reserved slots or we fail. * This ensures that only DEFAULT reserved * slots are used for default property GIDs. */ empty = curr_index; } } /* * Additionally find_gid() is used to find valid entry during * lookup operation; so ignore the entries which are marked as * pending for removal and the entries which are marked as * invalid. */ if (!is_gid_entry_valid(data)) continue; if (found >= 0) continue; attr = &data->attr; if (mask & GID_ATTR_FIND_MASK_GID_TYPE && attr->gid_type != val->gid_type) continue; if (mask & GID_ATTR_FIND_MASK_GID && memcmp(gid, &data->attr.gid, sizeof(*gid))) continue; if (mask & GID_ATTR_FIND_MASK_NETDEV && attr->ndev != val->ndev) continue; if (mask & GID_ATTR_FIND_MASK_DEFAULT && is_gid_index_default(table, curr_index) != default_gid) continue; found = curr_index; } if (pempty) *pempty = empty; return found; } static void make_default_gid(struct net_device *dev, union ib_gid *gid) { gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); addrconf_ifid_eui48(&gid->raw[8], dev); } static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr, unsigned long mask, bool default_gid) { struct ib_gid_table *table; int ret = 0; int empty; int ix; /* Do not allow adding zero GID in support of * IB spec version 1.3 section 4.1.1 point (6) and * section 12.7.10 and section 12.7.20 */ if (rdma_is_zero_gid(gid)) return -EINVAL; table = rdma_gid_table(ib_dev, port); mutex_lock(&table->lock); ix = find_gid(table, gid, attr, default_gid, mask, &empty); if (ix >= 0) goto out_unlock; if (empty < 0) { ret = -ENOSPC; goto out_unlock; } attr->device = ib_dev; attr->index = empty; attr->port_num = port; attr->gid = *gid; ret = add_modify_gid(table, attr); if (!ret) dispatch_gid_change_event(ib_dev, port); out_unlock: mutex_unlock(&table->lock); if (ret) pr_warn("%s: unable to add gid %pI6 error=%d\n", __func__, gid->raw, ret); return ret; } int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr) { unsigned long mask = GID_ATTR_FIND_MASK_GID | GID_ATTR_FIND_MASK_GID_TYPE | GID_ATTR_FIND_MASK_NETDEV; return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false); } static int _ib_cache_gid_del(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr, unsigned long mask, bool default_gid) { struct ib_gid_table *table; int ret = 0; int ix; table = rdma_gid_table(ib_dev, port); mutex_lock(&table->lock); ix = find_gid(table, gid, attr, default_gid, mask, NULL); if (ix < 0) { ret = -EINVAL; goto out_unlock; } del_gid(ib_dev, port, table, ix); dispatch_gid_change_event(ib_dev, port); out_unlock: mutex_unlock(&table->lock); if (ret) pr_debug("%s: can't delete gid %pI6 error=%d\n", __func__, gid->raw, ret); return ret; } int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr) { unsigned long mask = GID_ATTR_FIND_MASK_GID | GID_ATTR_FIND_MASK_GID_TYPE | GID_ATTR_FIND_MASK_DEFAULT | GID_ATTR_FIND_MASK_NETDEV; return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false); } int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { struct ib_gid_table *table; int ix; bool deleted = false; table = rdma_gid_table(ib_dev, port); mutex_lock(&table->lock); for (ix = 0; ix < table->sz; ix++) { if (is_gid_entry_valid(table->data_vec[ix]) && table->data_vec[ix]->attr.ndev == ndev) { del_gid(ib_dev, port, table, ix); deleted = true; } } mutex_unlock(&table->lock); if (deleted) dispatch_gid_change_event(ib_dev, port); return 0; } /** * rdma_find_gid_by_port - Returns the GID entry attributes when it finds * a valid GID entry for given search parameters. It searches for the specified * GID value in the local software cache. * @ib_dev: The device to query. * @gid: The GID value to search for. * @gid_type: The GID type to search for. * @port: The port number of the device where the GID value should be searched. * @ndev: In RoCE, the net device of the device. NULL means ignore. * * Returns sgid attributes if the GID is found with valid reference or * returns ERR_PTR for the error. * The caller must invoke rdma_put_gid_attr() to release the reference. */ const struct ib_gid_attr * rdma_find_gid_by_port(struct ib_device *ib_dev, const union ib_gid *gid, enum ib_gid_type gid_type, u32 port, struct net_device *ndev) { int local_index; struct ib_gid_table *table; unsigned long mask = GID_ATTR_FIND_MASK_GID | GID_ATTR_FIND_MASK_GID_TYPE; struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type}; const struct ib_gid_attr *attr; unsigned long flags; if (!rdma_is_port_valid(ib_dev, port)) return ERR_PTR(-ENOENT); table = rdma_gid_table(ib_dev, port); if (ndev) mask |= GID_ATTR_FIND_MASK_NETDEV; read_lock_irqsave(&table->rwlock, flags); local_index = find_gid(table, gid, &val, false, mask, NULL); if (local_index >= 0) { get_gid_entry(table->data_vec[local_index]); attr = &table->data_vec[local_index]->attr; read_unlock_irqrestore(&table->rwlock, flags); return attr; } read_unlock_irqrestore(&table->rwlock, flags); return ERR_PTR(-ENOENT); } EXPORT_SYMBOL(rdma_find_gid_by_port); /** * rdma_find_gid_by_filter - Returns the GID table attribute where a * specified GID value occurs * @ib_dev: The device to query. * @gid: The GID value to search for. * @port: The port number of the device where the GID value could be * searched. * @filter: The filter function is executed on any matching GID in the table. * If the filter function returns true, the corresponding index is returned, * otherwise, we continue searching the GID table. It's guaranteed that * while filter is executed, ndev field is valid and the structure won't * change. filter is executed in an atomic context. filter must not be NULL. * @context: Private data to pass into the call-back. * * rdma_find_gid_by_filter() searches for the specified GID value * of which the filter function returns true in the port's GID table. * */ const struct ib_gid_attr *rdma_find_gid_by_filter( struct ib_device *ib_dev, const union ib_gid *gid, u32 port, bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *, void *), void *context) { const struct ib_gid_attr *res = ERR_PTR(-ENOENT); struct ib_gid_table *table; unsigned long flags; unsigned int i; if (!rdma_is_port_valid(ib_dev, port)) return ERR_PTR(-EINVAL); table = rdma_gid_table(ib_dev, port); read_lock_irqsave(&table->rwlock, flags); for (i = 0; i < table->sz; i++) { struct ib_gid_table_entry *entry = table->data_vec[i]; if (!is_gid_entry_valid(entry)) continue; if (memcmp(gid, &entry->attr.gid, sizeof(*gid))) continue; if (filter(gid, &entry->attr, context)) { get_gid_entry(entry); res = &entry->attr; break; } } read_unlock_irqrestore(&table->rwlock, flags); return res; } static struct ib_gid_table *alloc_gid_table(int sz) { struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL); if (!table) return NULL; table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL); if (!table->data_vec) goto err_free_table; mutex_init(&table->lock); table->sz = sz; rwlock_init(&table->rwlock); return table; err_free_table: kfree(table); return NULL; } static void release_gid_table(struct ib_device *device, struct ib_gid_table *table) { bool leak = false; int i; if (!table) return; for (i = 0; i < table->sz; i++) { if (is_gid_entry_free(table->data_vec[i])) continue; if (kref_read(&table->data_vec[i]->kref) > 1) { dev_err(&device->dev, "GID entry ref leak for index %d ref=%u\n", i, kref_read(&table->data_vec[i]->kref)); leak = true; } } if (leak) return; mutex_destroy(&table->lock); kfree(table->data_vec); kfree(table); } static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port, struct ib_gid_table *table) { int i; if (!table) return; mutex_lock(&table->lock); for (i = 0; i < table->sz; ++i) { if (is_gid_entry_valid(table->data_vec[i])) del_gid(ib_dev, port, table, i); } mutex_unlock(&table->lock); } void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, struct net_device *ndev, unsigned long gid_type_mask, enum ib_cache_gid_default_mode mode) { union ib_gid gid = { }; struct ib_gid_attr gid_attr; unsigned int gid_type; unsigned long mask; mask = GID_ATTR_FIND_MASK_GID_TYPE | GID_ATTR_FIND_MASK_DEFAULT | GID_ATTR_FIND_MASK_NETDEV; memset(&gid_attr, 0, sizeof(gid_attr)); gid_attr.ndev = ndev; for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) { if (1UL << gid_type & ~gid_type_mask) continue; gid_attr.gid_type = gid_type; if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) { make_default_gid(ndev, &gid); __ib_cache_gid_add(ib_dev, port, &gid, &gid_attr, mask, true); } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) { _ib_cache_gid_del(ib_dev, port, &gid, &gid_attr, mask, true); } } } static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port, struct ib_gid_table *table) { unsigned int i; unsigned long roce_gid_type_mask; unsigned int num_default_gids; roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port); num_default_gids = hweight_long(roce_gid_type_mask); /* Reserve starting indices for default GIDs */ for (i = 0; i < num_default_gids && i < table->sz; i++) table->default_gid_indices |= BIT(i); } static void gid_table_release_one(struct ib_device *ib_dev) { u32 p; rdma_for_each_port (ib_dev, p) { release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid); ib_dev->port_data[p].cache.gid = NULL; } } static int _gid_table_setup_one(struct ib_device *ib_dev) { struct ib_gid_table *table; u32 rdma_port; rdma_for_each_port (ib_dev, rdma_port) { table = alloc_gid_table( ib_dev->port_data[rdma_port].immutable.gid_tbl_len); if (!table) goto rollback_table_setup; gid_table_reserve_default(ib_dev, rdma_port, table); ib_dev->port_data[rdma_port].cache.gid = table; } return 0; rollback_table_setup: gid_table_release_one(ib_dev); return -ENOMEM; } static void gid_table_cleanup_one(struct ib_device *ib_dev) { u32 p; rdma_for_each_port (ib_dev, p) cleanup_gid_table_port(ib_dev, p, ib_dev->port_data[p].cache.gid); } static int gid_table_setup_one(struct ib_device *ib_dev) { int err; err = _gid_table_setup_one(ib_dev); if (err) return err; rdma_roce_rescan_device(ib_dev); return err; } /** * rdma_query_gid - Read the GID content from the GID software cache * @device: Device to query the GID * @port_num: Port number of the device * @index: Index of the GID table entry to read * @gid: Pointer to GID where to store the entry's GID * * rdma_query_gid() only reads the GID entry content for requested device, * port and index. It reads for IB, RoCE and iWarp link layers. It doesn't * hold any reference to the GID table entry in the HCA or software cache. * * Returns 0 on success or appropriate error code. * */ int rdma_query_gid(struct ib_device *device, u32 port_num, int index, union ib_gid *gid) { struct ib_gid_table *table; unsigned long flags; int res; if (!rdma_is_port_valid(device, port_num)) return -EINVAL; table = rdma_gid_table(device, port_num); read_lock_irqsave(&table->rwlock, flags); if (index < 0 || index >= table->sz) { res = -EINVAL; goto done; } if (!is_gid_entry_valid(table->data_vec[index])) { res = -ENOENT; goto done; } memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid)); res = 0; done: read_unlock_irqrestore(&table->rwlock, flags); return res; } EXPORT_SYMBOL(rdma_query_gid); /** * rdma_read_gid_hw_context - Read the HW GID context from GID attribute * @attr: Potinter to the GID attribute * * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding * to the SGID attr. Callers are required to already be holding the reference * to an existing GID entry. * * Returns the HW GID context * */ void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr) { return container_of(attr, struct ib_gid_table_entry, attr)->context; } EXPORT_SYMBOL(rdma_read_gid_hw_context); /** * rdma_find_gid - Returns SGID attributes if the matching GID is found. * @device: The device to query. * @gid: The GID value to search for. * @gid_type: The GID type to search for. * @ndev: In RoCE, the net device of the device. NULL means ignore. * * rdma_find_gid() searches for the specified GID value in the software cache. * * Returns GID attributes if a valid GID is found or returns ERR_PTR for the * error. The caller must invoke rdma_put_gid_attr() to release the reference. * */ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, const union ib_gid *gid, enum ib_gid_type gid_type, struct net_device *ndev) { unsigned long mask = GID_ATTR_FIND_MASK_GID | GID_ATTR_FIND_MASK_GID_TYPE; struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type}; u32 p; if (ndev) mask |= GID_ATTR_FIND_MASK_NETDEV; rdma_for_each_port(device, p) { struct ib_gid_table *table; unsigned long flags; int index; table = device->port_data[p].cache.gid; read_lock_irqsave(&table->rwlock, flags); index = find_gid(table, gid, &gid_attr_val, false, mask, NULL); if (index >= 0) { const struct ib_gid_attr *attr; get_gid_entry(table->data_vec[index]); attr = &table->data_vec[index]->attr; read_unlock_irqrestore(&table->rwlock, flags); return attr; } read_unlock_irqrestore(&table->rwlock, flags); } return ERR_PTR(-ENOENT); } EXPORT_SYMBOL(rdma_find_gid); int ib_get_cached_pkey(struct ib_device *device, u32 port_num, int index, u16 *pkey) { struct ib_pkey_cache *cache; unsigned long flags; int ret = 0; if (!rdma_is_port_valid(device, port_num)) return -EINVAL; read_lock_irqsave(&device->cache_lock, flags); cache = device->port_data[port_num].cache.pkey; if (!cache || index < 0 || index >= cache->table_len) ret = -EINVAL; else *pkey = cache->table[index]; read_unlock_irqrestore(&device->cache_lock, flags); return ret; } EXPORT_SYMBOL(ib_get_cached_pkey); void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num, u64 *sn_pfx) { unsigned long flags; read_lock_irqsave(&device->cache_lock, flags); *sn_pfx = device->port_data[port_num].cache.subnet_prefix; read_unlock_irqrestore(&device->cache_lock, flags); } EXPORT_SYMBOL(ib_get_cached_subnet_prefix); int ib_find_cached_pkey(struct ib_device *device, u32 port_num, u16 pkey, u16 *index) { struct ib_pkey_cache *cache; unsigned long flags; int i; int ret = -ENOENT; int partial_ix = -1; if (!rdma_is_port_valid(device, port_num)) return -EINVAL; read_lock_irqsave(&device->cache_lock, flags); cache = device->port_data[port_num].cache.pkey; if (!cache) { ret = -EINVAL; goto err; } *index = -1; for (i = 0; i < cache->table_len; ++i) if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { if (cache->table[i] & 0x8000) { *index = i; ret = 0; break; } else { partial_ix = i; } } if (ret && partial_ix >= 0) { *index = partial_ix; ret = 0; } err: read_unlock_irqrestore(&device->cache_lock, flags); return ret; } EXPORT_SYMBOL(ib_find_cached_pkey); int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num, u16 pkey, u16 *index) { struct ib_pkey_cache *cache; unsigned long flags; int i; int ret = -ENOENT; if (!rdma_is_port_valid(device, port_num)) return -EINVAL; read_lock_irqsave(&device->cache_lock, flags); cache = device->port_data[port_num].cache.pkey; if (!cache) { ret = -EINVAL; goto err; } *index = -1; for (i = 0; i < cache->table_len; ++i) if (cache->table[i] == pkey) { *index = i; ret = 0; break; } err: read_unlock_irqrestore(&device->cache_lock, flags); return ret; } EXPORT_SYMBOL(ib_find_exact_cached_pkey); int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc) { unsigned long flags; int ret = 0; if (!rdma_is_port_valid(device, port_num)) return -EINVAL; read_lock_irqsave(&device->cache_lock, flags); *lmc = device->port_data[port_num].cache.lmc; read_unlock_irqrestore(&device->cache_lock, flags); return ret; } EXPORT_SYMBOL(ib_get_cached_lmc); int ib_get_cached_port_state(struct ib_device *device, u32 port_num, enum ib_port_state *port_state) { unsigned long flags; int ret = 0; if (!rdma_is_port_valid(device, port_num)) return -EINVAL; read_lock_irqsave(&device->cache_lock, flags); *port_state = device->port_data[port_num].cache.port_state; read_unlock_irqrestore(&device->cache_lock, flags); return ret; } EXPORT_SYMBOL(ib_get_cached_port_state); /** * rdma_get_gid_attr - Returns GID attributes for a port of a device * at a requested gid_index, if a valid GID entry exists. * @device: The device to query. * @port_num: The port number on the device where the GID value * is to be queried. * @index: Index of the GID table entry whose attributes are to * be queried. * * rdma_get_gid_attr() acquires reference count of gid attributes from the * cached GID table. Caller must invoke rdma_put_gid_attr() to release * reference to gid attribute regardless of link layer. * * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error * code. */ const struct ib_gid_attr * rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index) { const struct ib_gid_attr *attr = ERR_PTR(-ENODATA); struct ib_gid_table *table; unsigned long flags; if (!rdma_is_port_valid(device, port_num)) return ERR_PTR(-EINVAL); table = rdma_gid_table(device, port_num); if (index < 0 || index >= table->sz) return ERR_PTR(-EINVAL); read_lock_irqsave(&table->rwlock, flags); if (!is_gid_entry_valid(table->data_vec[index])) goto done; get_gid_entry(table->data_vec[index]); attr = &table->data_vec[index]->attr; done: read_unlock_irqrestore(&table->rwlock, flags); return attr; } EXPORT_SYMBOL(rdma_get_gid_attr); /** * rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries. * @device: The device to query. * @entries: Entries where GID entries are returned. * @max_entries: Maximum number of entries that can be returned. * Entries array must be allocated to hold max_entries number of entries. * * Returns number of entries on success or appropriate error code. */ ssize_t rdma_query_gid_table(struct ib_device *device, struct ib_uverbs_gid_entry *entries, size_t max_entries) { const struct ib_gid_attr *gid_attr; ssize_t num_entries = 0, ret; struct ib_gid_table *table; u32 port_num, i; struct net_device *ndev; unsigned long flags; rdma_for_each_port(device, port_num) { table = rdma_gid_table(device, port_num); read_lock_irqsave(&table->rwlock, flags); for (i = 0; i < table->sz; i++) { if (!is_gid_entry_valid(table->data_vec[i])) continue; if (num_entries >= max_entries) { ret = -EINVAL; goto err; } gid_attr = &table->data_vec[i]->attr; memcpy(&entries->gid, &gid_attr->gid, sizeof(gid_attr->gid)); entries->gid_index = gid_attr->index; entries->port_num = gid_attr->port_num; entries->gid_type = gid_attr->gid_type; ndev = rcu_dereference_protected( gid_attr->ndev, lockdep_is_held(&table->rwlock)); if (ndev) entries->netdev_ifindex = ndev->ifindex; num_entries++; entries++; } read_unlock_irqrestore(&table->rwlock, flags); } return num_entries; err: read_unlock_irqrestore(&table->rwlock, flags); return ret; } EXPORT_SYMBOL(rdma_query_gid_table); /** * rdma_put_gid_attr - Release reference to the GID attribute * @attr: Pointer to the GID attribute whose reference * needs to be released. * * rdma_put_gid_attr() must be used to release reference whose * reference is acquired using rdma_get_gid_attr() or any APIs * which returns a pointer to the ib_gid_attr regardless of link layer * of IB or RoCE. * */ void rdma_put_gid_attr(const struct ib_gid_attr *attr) { struct ib_gid_table_entry *entry = container_of(attr, struct ib_gid_table_entry, attr); put_gid_entry(entry); } EXPORT_SYMBOL(rdma_put_gid_attr); /** * rdma_hold_gid_attr - Get reference to existing GID attribute * * @attr: Pointer to the GID attribute whose reference * needs to be taken. * * Increase the reference count to a GID attribute to keep it from being * freed. Callers are required to already be holding a reference to attribute. * */ void rdma_hold_gid_attr(const struct ib_gid_attr *attr) { struct ib_gid_table_entry *entry = container_of(attr, struct ib_gid_table_entry, attr); get_gid_entry(entry); } EXPORT_SYMBOL(rdma_hold_gid_attr); /** * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice * which must be in UP state. * * @attr:Pointer to the GID attribute * * Returns pointer to netdevice if the netdevice was attached to GID and * netdevice is in UP state. Caller must hold RCU lock as this API * reads the netdev flags which can change while netdevice migrates to * different net namespace. Returns ERR_PTR with error code otherwise. * */ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr) { struct ib_gid_table_entry *entry = container_of(attr, struct ib_gid_table_entry, attr); struct ib_device *device = entry->attr.device; struct net_device *ndev = ERR_PTR(-EINVAL); u32 port_num = entry->attr.port_num; struct ib_gid_table *table; unsigned long flags; bool valid; table = rdma_gid_table(device, port_num); read_lock_irqsave(&table->rwlock, flags); valid = is_gid_entry_valid(table->data_vec[attr->index]); if (valid) { ndev = rcu_dereference(attr->ndev); if (!ndev) ndev = ERR_PTR(-ENODEV); } read_unlock_irqrestore(&table->rwlock, flags); return ndev; } EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu); static int get_lower_dev_vlan(struct net_device *lower_dev, struct netdev_nested_priv *priv) { u16 *vlan_id = (u16 *)priv->data; if (is_vlan_dev(lower_dev)) *vlan_id = vlan_dev_vlan_id(lower_dev); /* We are interested only in first level vlan device, so * always return 1 to stop iterating over next level devices. */ return 1; } /** * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address * of a GID entry. * * @attr: GID attribute pointer whose L2 fields to be read * @vlan_id: Pointer to vlan id to fill up if the GID entry has * vlan id. It is optional. * @smac: Pointer to smac to fill up for a GID entry. It is optional. * * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id * (if gid entry has vlan) and source MAC, or returns error. */ int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr, u16 *vlan_id, u8 *smac) { struct netdev_nested_priv priv = { .data = (void *)vlan_id, }; struct net_device *ndev; rcu_read_lock(); ndev = rcu_dereference(attr->ndev); if (!ndev) { rcu_read_unlock(); return -ENODEV; } if (smac) ether_addr_copy(smac, ndev->dev_addr); if (vlan_id) { *vlan_id = 0xffff; if (is_vlan_dev(ndev)) { *vlan_id = vlan_dev_vlan_id(ndev); } else { /* If the netdev is upper device and if it's lower * device is vlan device, consider vlan id of * the lower vlan device for this gid entry. */ netdev_walk_all_lower_dev_rcu(attr->ndev, get_lower_dev_vlan, &priv); } } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(rdma_read_gid_l2_fields); static int config_non_roce_gid_cache(struct ib_device *device, u32 port, struct ib_port_attr *tprops) { struct ib_gid_attr gid_attr = {}; struct ib_gid_table *table; int ret = 0; int i; gid_attr.device = device; gid_attr.port_num = port; table = rdma_gid_table(device, port); mutex_lock(&table->lock); for (i = 0; i < tprops->gid_tbl_len; ++i) { if (!device->ops.query_gid) continue; ret = device->ops.query_gid(device, port, i, &gid_attr.gid); if (ret) { dev_warn(&device->dev, "query_gid failed (%d) for index %d\n", ret, i); goto err; } if (rdma_protocol_iwarp(device, port)) { struct net_device *ndev; ndev = ib_device_get_netdev(device, port); if (!ndev) continue; RCU_INIT_POINTER(gid_attr.ndev, ndev); dev_put(ndev); } gid_attr.index = i; tprops->subnet_prefix = be64_to_cpu(gid_attr.gid.global.subnet_prefix); add_modify_gid(table, &gid_attr); } err: mutex_unlock(&table->lock); return ret; } static int ib_cache_update(struct ib_device *device, u32 port, bool update_gids, bool update_pkeys, bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL; struct ib_pkey_cache *old_pkey_cache = NULL; int i; int ret; if (!rdma_is_port_valid(device, port)) return -EINVAL; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return -ENOMEM; ret = ib_query_port(device, port, tprops); if (ret) { dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret); goto err; } if (!rdma_protocol_roce(device, port) && update_gids) { ret = config_non_roce_gid_cache(device, port, tprops); if (ret) goto err; } update_pkeys &= !!tprops->pkey_tbl_len; if (update_pkeys) { pkey_cache = kmalloc(struct_size(pkey_cache, table, tprops->pkey_tbl_len), GFP_KERNEL); if (!pkey_cache) { ret = -ENOMEM; goto err; } pkey_cache->table_len = tprops->pkey_tbl_len; for (i = 0; i < pkey_cache->table_len; ++i) { ret = ib_query_pkey(device, port, i, pkey_cache->table + i); if (ret) { dev_warn(&device->dev, "ib_query_pkey failed (%d) for index %d\n", ret, i); goto err; } } } write_lock_irq(&device->cache_lock); if (update_pkeys) { old_pkey_cache = device->port_data[port].cache.pkey; device->port_data[port].cache.pkey = pkey_cache; } device->port_data[port].cache.lmc = tprops->lmc; device->port_data[port].cache.port_state = tprops->state; device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix; write_unlock_irq(&device->cache_lock); if (enforce_security) ib_security_cache_change(device, port, tprops->subnet_prefix); kfree(old_pkey_cache); kfree(tprops); return 0; err: kfree(pkey_cache); kfree(tprops); return ret; } static void ib_cache_event_task(struct work_struct *_work) { struct ib_update_work *work = container_of(_work, struct ib_update_work, work); int ret; /* Before distributing the cache update event, first sync * the cache. */ ret = ib_cache_update(work->event.device, work->event.element.port_num, work->event.event == IB_EVENT_GID_CHANGE, work->event.event == IB_EVENT_PKEY_CHANGE, work->enforce_security); /* GID event is notified already for individual GID entries by * dispatch_gid_change_event(). Hence, notifiy for rest of the * events. */ if (!ret && work->event.event != IB_EVENT_GID_CHANGE) ib_dispatch_event_clients(&work->event); kfree(work); } static void ib_generic_event_task(struct work_struct *_work) { struct ib_update_work *work = container_of(_work, struct ib_update_work, work); ib_dispatch_event_clients(&work->event); kfree(work); } static bool is_cache_update_event(const struct ib_event *event) { return (event->event == IB_EVENT_PORT_ERR || event->event == IB_EVENT_PORT_ACTIVE || event->event == IB_EVENT_LID_CHANGE || event->event == IB_EVENT_PKEY_CHANGE || event->event == IB_EVENT_CLIENT_REREGISTER || event->event == IB_EVENT_GID_CHANGE); } /** * ib_dispatch_event - Dispatch an asynchronous event * @event:Event to dispatch * * Low-level drivers must call ib_dispatch_event() to dispatch the * event to all registered event handlers when an asynchronous event * occurs. */ void ib_dispatch_event(const struct ib_event *event) { struct ib_update_work *work; work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) return; if (is_cache_update_event(event)) INIT_WORK(&work->work, ib_cache_event_task); else INIT_WORK(&work->work, ib_generic_event_task); work->event = *event; if (event->event == IB_EVENT_PKEY_CHANGE || event->event == IB_EVENT_GID_CHANGE) work->enforce_security = true; queue_work(ib_wq, &work->work); } EXPORT_SYMBOL(ib_dispatch_event); int ib_cache_setup_one(struct ib_device *device) { u32 p; int err; err = gid_table_setup_one(device); if (err) return err; rdma_for_each_port (device, p) { err = ib_cache_update(device, p, true, true, true); if (err) return err; } return 0; } void ib_cache_release_one(struct ib_device *device) { u32 p; /* * The release function frees all the cache elements. * This function should be called as part of freeing * all the device's resources when the cache could no * longer be accessed. */ rdma_for_each_port (device, p) kfree(device->port_data[p].cache.pkey); gid_table_release_one(device); } void ib_cache_cleanup_one(struct ib_device *device) { /* The cleanup function waits for all in-progress workqueue * elements and cleans up the GID cache. This function should be * called after the device was removed from the devices list and * all clients were removed, so the cache exists but is * non-functional and shouldn't be updated anymore. */ flush_workqueue(ib_wq); gid_table_cleanup_one(device); /* * Flush the wq second time for any pending GID delete work. */ flush_workqueue(ib_wq); }
linux-master
drivers/infiniband/core/cache.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ #include <rdma/uverbs_ioctl.h> #include <rdma/rdma_user_ioctl.h> #include <linux/bitops.h> #include "rdma_core.h" #include "uverbs.h" static int ib_uverbs_notsupp(struct uverbs_attr_bundle *attrs) { return -EOPNOTSUPP; } static void *uapi_add_elm(struct uverbs_api *uapi, u32 key, size_t alloc_size) { void *elm; int rc; if (key == UVERBS_API_KEY_ERR) return ERR_PTR(-EOVERFLOW); elm = kzalloc(alloc_size, GFP_KERNEL); if (!elm) return ERR_PTR(-ENOMEM); rc = radix_tree_insert(&uapi->radix, key, elm); if (rc) { kfree(elm); return ERR_PTR(rc); } return elm; } static void *uapi_add_get_elm(struct uverbs_api *uapi, u32 key, size_t alloc_size, bool *exists) { void *elm; elm = uapi_add_elm(uapi, key, alloc_size); if (!IS_ERR(elm)) { *exists = false; return elm; } if (elm != ERR_PTR(-EEXIST)) return elm; elm = radix_tree_lookup(&uapi->radix, key); if (WARN_ON(!elm)) return ERR_PTR(-EINVAL); *exists = true; return elm; } static int uapi_create_write(struct uverbs_api *uapi, struct ib_device *ibdev, const struct uapi_definition *def, u32 obj_key, u32 *cur_method_key) { struct uverbs_api_write_method *method_elm; u32 method_key = obj_key; bool exists; if (def->write.is_ex) method_key |= uapi_key_write_ex_method(def->write.command_num); else method_key |= uapi_key_write_method(def->write.command_num); method_elm = uapi_add_get_elm(uapi, method_key, sizeof(*method_elm), &exists); if (IS_ERR(method_elm)) return PTR_ERR(method_elm); if (WARN_ON(exists && (def->write.is_ex != method_elm->is_ex))) return -EINVAL; method_elm->is_ex = def->write.is_ex; method_elm->handler = def->func_write; if (!def->write.is_ex) method_elm->disabled = !(ibdev->uverbs_cmd_mask & BIT_ULL(def->write.command_num)); if (!def->write.is_ex && def->func_write) { method_elm->has_udata = def->write.has_udata; method_elm->has_resp = def->write.has_resp; method_elm->req_size = def->write.req_size; method_elm->resp_size = def->write.resp_size; } *cur_method_key = method_key; return 0; } static int uapi_merge_method(struct uverbs_api *uapi, struct uverbs_api_object *obj_elm, u32 obj_key, const struct uverbs_method_def *method, bool is_driver) { u32 method_key = obj_key | uapi_key_ioctl_method(method->id); struct uverbs_api_ioctl_method *method_elm; unsigned int i; bool exists; if (!method->attrs) return 0; method_elm = uapi_add_get_elm(uapi, method_key, sizeof(*method_elm), &exists); if (IS_ERR(method_elm)) return PTR_ERR(method_elm); if (exists) { /* * This occurs when a driver uses ADD_UVERBS_ATTRIBUTES_SIMPLE */ if (WARN_ON(method->handler)) return -EINVAL; } else { WARN_ON(!method->handler); rcu_assign_pointer(method_elm->handler, method->handler); if (method->handler != uverbs_destroy_def_handler) method_elm->driver_method = is_driver; } for (i = 0; i != method->num_attrs; i++) { const struct uverbs_attr_def *attr = (*method->attrs)[i]; struct uverbs_api_attr *attr_slot; if (!attr) continue; /* * ENUM_IN contains the 'ids' pointer to the driver's .rodata, * so if it is specified by a driver then it always makes this * into a driver method. */ if (attr->attr.type == UVERBS_ATTR_TYPE_ENUM_IN) method_elm->driver_method |= is_driver; /* * Like other uobject based things we only support a single * uobject being NEW'd or DESTROY'd */ if (attr->attr.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) { u8 access = attr->attr.u2.objs_arr.access; if (WARN_ON(access == UVERBS_ACCESS_NEW || access == UVERBS_ACCESS_DESTROY)) return -EINVAL; } attr_slot = uapi_add_elm(uapi, method_key | uapi_key_attr(attr->id), sizeof(*attr_slot)); /* Attributes are not allowed to be modified by drivers */ if (IS_ERR(attr_slot)) return PTR_ERR(attr_slot); attr_slot->spec = attr->attr; } return 0; } static int uapi_merge_obj_tree(struct uverbs_api *uapi, const struct uverbs_object_def *obj, bool is_driver) { struct uverbs_api_object *obj_elm; unsigned int i; u32 obj_key; bool exists; int rc; obj_key = uapi_key_obj(obj->id); obj_elm = uapi_add_get_elm(uapi, obj_key, sizeof(*obj_elm), &exists); if (IS_ERR(obj_elm)) return PTR_ERR(obj_elm); if (obj->type_attrs) { if (WARN_ON(obj_elm->type_attrs)) return -EINVAL; obj_elm->id = obj->id; obj_elm->type_attrs = obj->type_attrs; obj_elm->type_class = obj->type_attrs->type_class; /* * Today drivers are only permitted to use idr_class and * fd_class types. We can revoke the IDR types during * disassociation, and the FD types require the driver to use * struct file_operations.owner to prevent the driver module * code from unloading while the file is open. This provides * enough safety that uverbs_uobject_fd_release() will * continue to work. Drivers using FD are responsible to * handle disassociation of the device on their own. */ if (WARN_ON(is_driver && obj->type_attrs->type_class != &uverbs_idr_class && obj->type_attrs->type_class != &uverbs_fd_class)) return -EINVAL; } if (!obj->methods) return 0; for (i = 0; i != obj->num_methods; i++) { const struct uverbs_method_def *method = (*obj->methods)[i]; if (!method) continue; rc = uapi_merge_method(uapi, obj_elm, obj_key, method, is_driver); if (rc) return rc; } return 0; } static int uapi_disable_elm(struct uverbs_api *uapi, const struct uapi_definition *def, u32 obj_key, u32 method_key) { bool exists; if (def->scope == UAPI_SCOPE_OBJECT) { struct uverbs_api_object *obj_elm; obj_elm = uapi_add_get_elm( uapi, obj_key, sizeof(*obj_elm), &exists); if (IS_ERR(obj_elm)) return PTR_ERR(obj_elm); obj_elm->disabled = 1; return 0; } if (def->scope == UAPI_SCOPE_METHOD && uapi_key_is_ioctl_method(method_key)) { struct uverbs_api_ioctl_method *method_elm; method_elm = uapi_add_get_elm(uapi, method_key, sizeof(*method_elm), &exists); if (IS_ERR(method_elm)) return PTR_ERR(method_elm); method_elm->disabled = 1; return 0; } if (def->scope == UAPI_SCOPE_METHOD && (uapi_key_is_write_method(method_key) || uapi_key_is_write_ex_method(method_key))) { struct uverbs_api_write_method *write_elm; write_elm = uapi_add_get_elm(uapi, method_key, sizeof(*write_elm), &exists); if (IS_ERR(write_elm)) return PTR_ERR(write_elm); write_elm->disabled = 1; return 0; } WARN_ON(true); return -EINVAL; } static int uapi_merge_def(struct uverbs_api *uapi, struct ib_device *ibdev, const struct uapi_definition *def_list, bool is_driver) { const struct uapi_definition *def = def_list; u32 cur_obj_key = UVERBS_API_KEY_ERR; u32 cur_method_key = UVERBS_API_KEY_ERR; bool exists; int rc; if (!def_list) return 0; for (;; def++) { switch ((enum uapi_definition_kind)def->kind) { case UAPI_DEF_CHAIN: rc = uapi_merge_def(uapi, ibdev, def->chain, is_driver); if (rc) return rc; continue; case UAPI_DEF_CHAIN_OBJ_TREE: if (WARN_ON(def->object_start.object_id != def->chain_obj_tree->id)) return -EINVAL; cur_obj_key = uapi_key_obj(def->object_start.object_id); rc = uapi_merge_obj_tree(uapi, def->chain_obj_tree, is_driver); if (rc) return rc; continue; case UAPI_DEF_END: return 0; case UAPI_DEF_IS_SUPPORTED_DEV_FN: { void **ibdev_fn = (void *)(&ibdev->ops) + def->needs_fn_offset; if (*ibdev_fn) continue; rc = uapi_disable_elm( uapi, def, cur_obj_key, cur_method_key); if (rc) return rc; continue; } case UAPI_DEF_IS_SUPPORTED_FUNC: if (def->func_is_supported(ibdev)) continue; rc = uapi_disable_elm( uapi, def, cur_obj_key, cur_method_key); if (rc) return rc; continue; case UAPI_DEF_OBJECT_START: { struct uverbs_api_object *obj_elm; cur_obj_key = uapi_key_obj(def->object_start.object_id); obj_elm = uapi_add_get_elm(uapi, cur_obj_key, sizeof(*obj_elm), &exists); if (IS_ERR(obj_elm)) return PTR_ERR(obj_elm); continue; } case UAPI_DEF_WRITE: rc = uapi_create_write( uapi, ibdev, def, cur_obj_key, &cur_method_key); if (rc) return rc; continue; } WARN_ON(true); return -EINVAL; } } static int uapi_finalize_ioctl_method(struct uverbs_api *uapi, struct uverbs_api_ioctl_method *method_elm, u32 method_key) { struct radix_tree_iter iter; unsigned int num_attrs = 0; unsigned int max_bkey = 0; bool single_uobj = false; void __rcu **slot; method_elm->destroy_bkey = UVERBS_API_ATTR_BKEY_LEN; radix_tree_for_each_slot (slot, &uapi->radix, &iter, uapi_key_attrs_start(method_key)) { struct uverbs_api_attr *elm = rcu_dereference_protected(*slot, true); u32 attr_key = iter.index & UVERBS_API_ATTR_KEY_MASK; u32 attr_bkey = uapi_bkey_attr(attr_key); u8 type = elm->spec.type; if (uapi_key_attr_to_ioctl_method(iter.index) != uapi_key_attr_to_ioctl_method(method_key)) break; if (elm->spec.mandatory) __set_bit(attr_bkey, method_elm->attr_mandatory); if (elm->spec.is_udata) method_elm->has_udata = true; if (type == UVERBS_ATTR_TYPE_IDR || type == UVERBS_ATTR_TYPE_FD) { u8 access = elm->spec.u.obj.access; /* * Verbs specs may only have one NEW/DESTROY, we don't * have the infrastructure to abort multiple NEW's or * cope with multiple DESTROY failure. */ if (access == UVERBS_ACCESS_NEW || access == UVERBS_ACCESS_DESTROY) { if (WARN_ON(single_uobj)) return -EINVAL; single_uobj = true; if (WARN_ON(!elm->spec.mandatory)) return -EINVAL; } if (access == UVERBS_ACCESS_DESTROY) method_elm->destroy_bkey = attr_bkey; } max_bkey = max(max_bkey, attr_bkey); num_attrs++; } method_elm->key_bitmap_len = max_bkey + 1; WARN_ON(method_elm->key_bitmap_len > UVERBS_API_ATTR_BKEY_LEN); uapi_compute_bundle_size(method_elm, num_attrs); return 0; } static int uapi_finalize(struct uverbs_api *uapi) { const struct uverbs_api_write_method **data; unsigned long max_write_ex = 0; unsigned long max_write = 0; struct radix_tree_iter iter; void __rcu **slot; int rc; int i; radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) { struct uverbs_api_ioctl_method *method_elm = rcu_dereference_protected(*slot, true); if (uapi_key_is_ioctl_method(iter.index)) { rc = uapi_finalize_ioctl_method(uapi, method_elm, iter.index); if (rc) return rc; } if (uapi_key_is_write_method(iter.index)) max_write = max(max_write, iter.index & UVERBS_API_ATTR_KEY_MASK); if (uapi_key_is_write_ex_method(iter.index)) max_write_ex = max(max_write_ex, iter.index & UVERBS_API_ATTR_KEY_MASK); } uapi->notsupp_method.handler = ib_uverbs_notsupp; uapi->num_write = max_write + 1; uapi->num_write_ex = max_write_ex + 1; data = kmalloc_array(uapi->num_write + uapi->num_write_ex, sizeof(*uapi->write_methods), GFP_KERNEL); if (!data) return -ENOMEM; for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++) data[i] = &uapi->notsupp_method; uapi->write_methods = data; uapi->write_ex_methods = data + uapi->num_write; radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) { if (uapi_key_is_write_method(iter.index)) uapi->write_methods[iter.index & UVERBS_API_ATTR_KEY_MASK] = rcu_dereference_protected(*slot, true); if (uapi_key_is_write_ex_method(iter.index)) uapi->write_ex_methods[iter.index & UVERBS_API_ATTR_KEY_MASK] = rcu_dereference_protected(*slot, true); } return 0; } static void uapi_remove_range(struct uverbs_api *uapi, u32 start, u32 last) { struct radix_tree_iter iter; void __rcu **slot; radix_tree_for_each_slot (slot, &uapi->radix, &iter, start) { if (iter.index > last) return; kfree(rcu_dereference_protected(*slot, true)); radix_tree_iter_delete(&uapi->radix, &iter, slot); } } static void uapi_remove_object(struct uverbs_api *uapi, u32 obj_key) { uapi_remove_range(uapi, obj_key, obj_key | UVERBS_API_METHOD_KEY_MASK | UVERBS_API_ATTR_KEY_MASK); } static void uapi_remove_method(struct uverbs_api *uapi, u32 method_key) { uapi_remove_range(uapi, method_key, method_key | UVERBS_API_ATTR_KEY_MASK); } static u32 uapi_get_obj_id(struct uverbs_attr_spec *spec) { if (spec->type == UVERBS_ATTR_TYPE_IDR || spec->type == UVERBS_ATTR_TYPE_FD) return spec->u.obj.obj_type; if (spec->type == UVERBS_ATTR_TYPE_IDRS_ARRAY) return spec->u2.objs_arr.obj_type; return UVERBS_API_KEY_ERR; } static void uapi_key_okay(u32 key) { unsigned int count = 0; if (uapi_key_is_object(key)) count++; if (uapi_key_is_ioctl_method(key)) count++; if (uapi_key_is_write_method(key)) count++; if (uapi_key_is_write_ex_method(key)) count++; if (uapi_key_is_attr(key)) count++; WARN(count != 1, "Bad count %u key=%x", count, key); } static void uapi_finalize_disable(struct uverbs_api *uapi) { struct radix_tree_iter iter; u32 starting_key = 0; bool scan_again = false; void __rcu **slot; again: radix_tree_for_each_slot (slot, &uapi->radix, &iter, starting_key) { uapi_key_okay(iter.index); if (uapi_key_is_object(iter.index)) { struct uverbs_api_object *obj_elm = rcu_dereference_protected(*slot, true); if (obj_elm->disabled) { /* Have to check all the attrs again */ scan_again = true; starting_key = iter.index; uapi_remove_object(uapi, iter.index); goto again; } continue; } if (uapi_key_is_ioctl_method(iter.index)) { struct uverbs_api_ioctl_method *method_elm = rcu_dereference_protected(*slot, true); if (method_elm->disabled) { starting_key = iter.index; uapi_remove_method(uapi, iter.index); goto again; } continue; } if (uapi_key_is_write_method(iter.index) || uapi_key_is_write_ex_method(iter.index)) { struct uverbs_api_write_method *method_elm = rcu_dereference_protected(*slot, true); if (method_elm->disabled) { kfree(method_elm); radix_tree_iter_delete(&uapi->radix, &iter, slot); } continue; } if (uapi_key_is_attr(iter.index)) { struct uverbs_api_attr *attr_elm = rcu_dereference_protected(*slot, true); const struct uverbs_api_object *tmp_obj; u32 obj_key; /* * If the method has a mandatory object handle * attribute which relies on an object which is not * present then the entire method is uncallable. */ if (!attr_elm->spec.mandatory) continue; obj_key = uapi_get_obj_id(&attr_elm->spec); if (obj_key == UVERBS_API_KEY_ERR) continue; tmp_obj = uapi_get_object(uapi, obj_key); if (IS_ERR(tmp_obj)) { if (PTR_ERR(tmp_obj) == -ENOMSG) continue; } else { if (!tmp_obj->disabled) continue; } starting_key = iter.index; uapi_remove_method( uapi, iter.index & (UVERBS_API_OBJ_KEY_MASK | UVERBS_API_METHOD_KEY_MASK)); goto again; } WARN_ON(false); } if (!scan_again) return; scan_again = false; starting_key = 0; goto again; } void uverbs_destroy_api(struct uverbs_api *uapi) { if (!uapi) return; uapi_remove_range(uapi, 0, U32_MAX); kfree(uapi->write_methods); kfree(uapi); } static const struct uapi_definition uverbs_core_api[] = { UAPI_DEF_CHAIN(uverbs_def_obj_async_fd), UAPI_DEF_CHAIN(uverbs_def_obj_counters), UAPI_DEF_CHAIN(uverbs_def_obj_cq), UAPI_DEF_CHAIN(uverbs_def_obj_device), UAPI_DEF_CHAIN(uverbs_def_obj_dm), UAPI_DEF_CHAIN(uverbs_def_obj_flow_action), UAPI_DEF_CHAIN(uverbs_def_obj_intf), UAPI_DEF_CHAIN(uverbs_def_obj_mr), UAPI_DEF_CHAIN(uverbs_def_obj_qp), UAPI_DEF_CHAIN(uverbs_def_obj_srq), UAPI_DEF_CHAIN(uverbs_def_obj_wq), UAPI_DEF_CHAIN(uverbs_def_write_intf), {}, }; struct uverbs_api *uverbs_alloc_api(struct ib_device *ibdev) { struct uverbs_api *uapi; int rc; uapi = kzalloc(sizeof(*uapi), GFP_KERNEL); if (!uapi) return ERR_PTR(-ENOMEM); INIT_RADIX_TREE(&uapi->radix, GFP_KERNEL); uapi->driver_id = ibdev->ops.driver_id; rc = uapi_merge_def(uapi, ibdev, uverbs_core_api, false); if (rc) goto err; rc = uapi_merge_def(uapi, ibdev, ibdev->driver_def, true); if (rc) goto err; uapi_finalize_disable(uapi); rc = uapi_finalize(uapi); if (rc) goto err; return uapi; err: if (rc != -ENOMEM) dev_err(&ibdev->dev, "Setup of uverbs_api failed, kernel parsing tree description is not valid (%d)??\n", rc); uverbs_destroy_api(uapi); return ERR_PTR(rc); } /* * The pre version is done before destroying the HW objects, it only blocks * off method access. All methods that require the ib_dev or the module data * must test one of these assignments prior to continuing. */ void uverbs_disassociate_api_pre(struct ib_uverbs_device *uverbs_dev) { struct uverbs_api *uapi = uverbs_dev->uapi; struct radix_tree_iter iter; void __rcu **slot; rcu_assign_pointer(uverbs_dev->ib_dev, NULL); radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) { if (uapi_key_is_ioctl_method(iter.index)) { struct uverbs_api_ioctl_method *method_elm = rcu_dereference_protected(*slot, true); if (method_elm->driver_method) rcu_assign_pointer(method_elm->handler, NULL); } } synchronize_srcu(&uverbs_dev->disassociate_srcu); } /* * Called when a driver disassociates from the ib_uverbs_device. The * assumption is that the driver module will unload after. Replace everything * related to the driver with NULL as a safety measure. */ void uverbs_disassociate_api(struct uverbs_api *uapi) { struct radix_tree_iter iter; void __rcu **slot; radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) { if (uapi_key_is_object(iter.index)) { struct uverbs_api_object *object_elm = rcu_dereference_protected(*slot, true); /* * Some type_attrs are in the driver module. We don't * bother to keep track of which since there should be * no use of this after disassociate. */ object_elm->type_attrs = NULL; } else if (uapi_key_is_attr(iter.index)) { struct uverbs_api_attr *elm = rcu_dereference_protected(*slot, true); if (elm->spec.type == UVERBS_ATTR_TYPE_ENUM_IN) elm->spec.u2.enum_def.ids = NULL; } } }
linux-master
drivers/infiniband/core/uverbs_uapi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Trace points for the IB Connection Manager. * * Author: Chuck Lever <chuck.lever@oracle.com> * * Copyright (c) 2020, Oracle and/or its affiliates. */ #include <rdma/rdma_cm.h> #include "cma_priv.h" #define CREATE_TRACE_POINTS #include "cm_trace.h"
linux-master
drivers/infiniband/core/cm_trace.c
/* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/security.h> #include <linux/completion.h> #include <linux/list.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> #include "core_priv.h" #include "mad_priv.h" static LIST_HEAD(mad_agent_list); /* Lock to protect mad_agent_list */ static DEFINE_SPINLOCK(mad_agent_list_lock); static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp) { struct pkey_index_qp_list *pkey = NULL; struct pkey_index_qp_list *tmp_pkey; struct ib_device *dev = pp->sec->dev; spin_lock(&dev->port_data[pp->port_num].pkey_list_lock); list_for_each_entry (tmp_pkey, &dev->port_data[pp->port_num].pkey_list, pkey_index_list) { if (tmp_pkey->pkey_index == pp->pkey_index) { pkey = tmp_pkey; break; } } spin_unlock(&dev->port_data[pp->port_num].pkey_list_lock); return pkey; } static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp, u16 *pkey, u64 *subnet_prefix) { struct ib_device *dev = pp->sec->dev; int ret; ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey); if (ret) return ret; ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix); return ret; } static int enforce_qp_pkey_security(u16 pkey, u64 subnet_prefix, struct ib_qp_security *qp_sec) { struct ib_qp_security *shared_qp_sec; int ret; ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey); if (ret) return ret; list_for_each_entry(shared_qp_sec, &qp_sec->shared_qp_list, shared_qp_list) { ret = security_ib_pkey_access(shared_qp_sec->security, subnet_prefix, pkey); if (ret) return ret; } return 0; } /* The caller of this function must hold the QP security * mutex of the QP of the security structure in *pps. * * It takes separate ports_pkeys and security structure * because in some cases the pps will be for a new settings * or the pps will be for the real QP and security structure * will be for a shared QP. */ static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps, struct ib_qp_security *sec) { u64 subnet_prefix; u16 pkey; int ret = 0; if (!pps) return 0; if (pps->main.state != IB_PORT_PKEY_NOT_VALID) { ret = get_pkey_and_subnet_prefix(&pps->main, &pkey, &subnet_prefix); if (ret) return ret; ret = enforce_qp_pkey_security(pkey, subnet_prefix, sec); if (ret) return ret; } if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) { ret = get_pkey_and_subnet_prefix(&pps->alt, &pkey, &subnet_prefix); if (ret) return ret; ret = enforce_qp_pkey_security(pkey, subnet_prefix, sec); } return ret; } /* The caller of this function must hold the QP security * mutex. */ static void qp_to_error(struct ib_qp_security *sec) { struct ib_qp_security *shared_qp_sec; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_event event = { .event = IB_EVENT_QP_FATAL }; /* If the QP is in the process of being destroyed * the qp pointer in the security structure is * undefined. It cannot be modified now. */ if (sec->destroying) return; ib_modify_qp(sec->qp, &attr, IB_QP_STATE); if (sec->qp->event_handler && sec->qp->qp_context) { event.element.qp = sec->qp; sec->qp->event_handler(&event, sec->qp->qp_context); } list_for_each_entry(shared_qp_sec, &sec->shared_qp_list, shared_qp_list) { struct ib_qp *qp = shared_qp_sec->qp; if (qp->event_handler && qp->qp_context) { event.element.qp = qp; event.device = qp->device; qp->event_handler(&event, qp->qp_context); } } } static inline void check_pkey_qps(struct pkey_index_qp_list *pkey, struct ib_device *device, u32 port_num, u64 subnet_prefix) { struct ib_port_pkey *pp, *tmp_pp; bool comp; LIST_HEAD(to_error_list); u16 pkey_val; if (!ib_get_cached_pkey(device, port_num, pkey->pkey_index, &pkey_val)) { spin_lock(&pkey->qp_list_lock); list_for_each_entry(pp, &pkey->qp_list, qp_list) { if (atomic_read(&pp->sec->error_list_count)) continue; if (enforce_qp_pkey_security(pkey_val, subnet_prefix, pp->sec)) { atomic_inc(&pp->sec->error_list_count); list_add(&pp->to_error_list, &to_error_list); } } spin_unlock(&pkey->qp_list_lock); } list_for_each_entry_safe(pp, tmp_pp, &to_error_list, to_error_list) { mutex_lock(&pp->sec->mutex); qp_to_error(pp->sec); list_del(&pp->to_error_list); atomic_dec(&pp->sec->error_list_count); comp = pp->sec->destroying; mutex_unlock(&pp->sec->mutex); if (comp) complete(&pp->sec->error_complete); } } /* The caller of this function must hold the QP security * mutex. */ static int port_pkey_list_insert(struct ib_port_pkey *pp) { struct pkey_index_qp_list *tmp_pkey; struct pkey_index_qp_list *pkey; struct ib_device *dev; u32 port_num = pp->port_num; int ret = 0; if (pp->state != IB_PORT_PKEY_VALID) return 0; dev = pp->sec->dev; pkey = get_pkey_idx_qp_list(pp); if (!pkey) { bool found = false; pkey = kzalloc(sizeof(*pkey), GFP_KERNEL); if (!pkey) return -ENOMEM; spin_lock(&dev->port_data[port_num].pkey_list_lock); /* Check for the PKey again. A racing process may * have created it. */ list_for_each_entry(tmp_pkey, &dev->port_data[port_num].pkey_list, pkey_index_list) { if (tmp_pkey->pkey_index == pp->pkey_index) { kfree(pkey); pkey = tmp_pkey; found = true; break; } } if (!found) { pkey->pkey_index = pp->pkey_index; spin_lock_init(&pkey->qp_list_lock); INIT_LIST_HEAD(&pkey->qp_list); list_add(&pkey->pkey_index_list, &dev->port_data[port_num].pkey_list); } spin_unlock(&dev->port_data[port_num].pkey_list_lock); } spin_lock(&pkey->qp_list_lock); list_add(&pp->qp_list, &pkey->qp_list); spin_unlock(&pkey->qp_list_lock); pp->state = IB_PORT_PKEY_LISTED; return ret; } /* The caller of this function must hold the QP security * mutex. */ static void port_pkey_list_remove(struct ib_port_pkey *pp) { struct pkey_index_qp_list *pkey; if (pp->state != IB_PORT_PKEY_LISTED) return; pkey = get_pkey_idx_qp_list(pp); spin_lock(&pkey->qp_list_lock); list_del(&pp->qp_list); spin_unlock(&pkey->qp_list_lock); /* The setting may still be valid, i.e. after * a destroy has failed for example. */ pp->state = IB_PORT_PKEY_VALID; } static void destroy_qp_security(struct ib_qp_security *sec) { security_ib_free_security(sec->security); kfree(sec->ports_pkeys); kfree(sec); } /* The caller of this function must hold the QP security * mutex. */ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, const struct ib_qp_attr *qp_attr, int qp_attr_mask) { struct ib_ports_pkeys *new_pps; struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys; new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL); if (!new_pps) return NULL; if (qp_attr_mask & IB_QP_PORT) new_pps->main.port_num = qp_attr->port_num; else if (qp_pps) new_pps->main.port_num = qp_pps->main.port_num; if (qp_attr_mask & IB_QP_PKEY_INDEX) new_pps->main.pkey_index = qp_attr->pkey_index; else if (qp_pps) new_pps->main.pkey_index = qp_pps->main.pkey_index; if (((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT)) || (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)) new_pps->main.state = IB_PORT_PKEY_VALID; if (qp_attr_mask & IB_QP_ALT_PATH) { new_pps->alt.port_num = qp_attr->alt_port_num; new_pps->alt.pkey_index = qp_attr->alt_pkey_index; new_pps->alt.state = IB_PORT_PKEY_VALID; } else if (qp_pps) { new_pps->alt.port_num = qp_pps->alt.port_num; new_pps->alt.pkey_index = qp_pps->alt.pkey_index; if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID) new_pps->alt.state = IB_PORT_PKEY_VALID; } new_pps->main.sec = qp->qp_sec; new_pps->alt.sec = qp->qp_sec; return new_pps; } int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) { struct ib_qp *real_qp = qp->real_qp; int ret; ret = ib_create_qp_security(qp, dev); if (ret) return ret; if (!qp->qp_sec) return 0; mutex_lock(&real_qp->qp_sec->mutex); ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, qp->qp_sec); if (ret) goto ret; if (qp != real_qp) list_add(&qp->qp_sec->shared_qp_list, &real_qp->qp_sec->shared_qp_list); ret: mutex_unlock(&real_qp->qp_sec->mutex); if (ret) destroy_qp_security(qp->qp_sec); return ret; } void ib_close_shared_qp_security(struct ib_qp_security *sec) { struct ib_qp *real_qp = sec->qp->real_qp; mutex_lock(&real_qp->qp_sec->mutex); list_del(&sec->shared_qp_list); mutex_unlock(&real_qp->qp_sec->mutex); destroy_qp_security(sec); } int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) { unsigned int i; bool is_ib = false; int ret; rdma_for_each_port (dev, i) { is_ib = rdma_protocol_ib(dev, i); if (is_ib) break; } /* If this isn't an IB device don't create the security context */ if (!is_ib) return 0; qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); if (!qp->qp_sec) return -ENOMEM; qp->qp_sec->qp = qp; qp->qp_sec->dev = dev; mutex_init(&qp->qp_sec->mutex); INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list); atomic_set(&qp->qp_sec->error_list_count, 0); init_completion(&qp->qp_sec->error_complete); ret = security_ib_alloc_security(&qp->qp_sec->security); if (ret) { kfree(qp->qp_sec); qp->qp_sec = NULL; } return ret; } EXPORT_SYMBOL(ib_create_qp_security); void ib_destroy_qp_security_begin(struct ib_qp_security *sec) { /* Return if not IB */ if (!sec) return; mutex_lock(&sec->mutex); /* Remove the QP from the lists so it won't get added to * a to_error_list during the destroy process. */ if (sec->ports_pkeys) { port_pkey_list_remove(&sec->ports_pkeys->main); port_pkey_list_remove(&sec->ports_pkeys->alt); } /* If the QP is already in one or more of those lists * the destroying flag will ensure the to error flow * doesn't operate on an undefined QP. */ sec->destroying = true; /* Record the error list count to know how many completions * to wait for. */ sec->error_comps_pending = atomic_read(&sec->error_list_count); mutex_unlock(&sec->mutex); } void ib_destroy_qp_security_abort(struct ib_qp_security *sec) { int ret; int i; /* Return if not IB */ if (!sec) return; /* If a concurrent cache update is in progress this * QP security could be marked for an error state * transition. Wait for this to complete. */ for (i = 0; i < sec->error_comps_pending; i++) wait_for_completion(&sec->error_complete); mutex_lock(&sec->mutex); sec->destroying = false; /* Restore the position in the lists and verify * access is still allowed in case a cache update * occurred while attempting to destroy. * * Because these setting were listed already * and removed during ib_destroy_qp_security_begin * we know the pkey_index_qp_list for the PKey * already exists so port_pkey_list_insert won't fail. */ if (sec->ports_pkeys) { port_pkey_list_insert(&sec->ports_pkeys->main); port_pkey_list_insert(&sec->ports_pkeys->alt); } ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec); if (ret) qp_to_error(sec); mutex_unlock(&sec->mutex); } void ib_destroy_qp_security_end(struct ib_qp_security *sec) { int i; /* Return if not IB */ if (!sec) return; /* If a concurrent cache update is occurring we must * wait until this QP security structure is processed * in the QP to error flow before destroying it because * the to_error_list is in use. */ for (i = 0; i < sec->error_comps_pending; i++) wait_for_completion(&sec->error_complete); destroy_qp_security(sec); } void ib_security_cache_change(struct ib_device *device, u32 port_num, u64 subnet_prefix) { struct pkey_index_qp_list *pkey; list_for_each_entry (pkey, &device->port_data[port_num].pkey_list, pkey_index_list) { check_pkey_qps(pkey, device, port_num, subnet_prefix); } } void ib_security_release_port_pkey_list(struct ib_device *device) { struct pkey_index_qp_list *pkey, *tmp_pkey; unsigned int i; rdma_for_each_port (device, i) { list_for_each_entry_safe(pkey, tmp_pkey, &device->port_data[i].pkey_list, pkey_index_list) { list_del(&pkey->pkey_index_list); kfree(pkey); } } } int ib_security_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_udata *udata) { int ret = 0; struct ib_ports_pkeys *tmp_pps; struct ib_ports_pkeys *new_pps = NULL; struct ib_qp *real_qp = qp->real_qp; bool special_qp = (real_qp->qp_type == IB_QPT_SMI || real_qp->qp_type == IB_QPT_GSI || real_qp->qp_type >= IB_QPT_RESERVED1); bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || (qp_attr_mask & IB_QP_ALT_PATH)); WARN_ONCE((qp_attr_mask & IB_QP_PORT && rdma_protocol_ib(real_qp->device, qp_attr->port_num) && !real_qp->qp_sec), "%s: QP security is not initialized for IB QP: %u\n", __func__, real_qp->qp_num); /* The port/pkey settings are maintained only for the real QP. Open * handles on the real QP will be in the shared_qp_list. When * enforcing security on the real QP all the shared QPs will be * checked as well. */ if (pps_change && !special_qp && real_qp->qp_sec) { mutex_lock(&real_qp->qp_sec->mutex); new_pps = get_new_pps(real_qp, qp_attr, qp_attr_mask); if (!new_pps) { mutex_unlock(&real_qp->qp_sec->mutex); return -ENOMEM; } /* Add this QP to the lists for the new port * and pkey settings before checking for permission * in case there is a concurrent cache update * occurring. Walking the list for a cache change * doesn't acquire the security mutex unless it's * sending the QP to error. */ ret = port_pkey_list_insert(&new_pps->main); if (!ret) ret = port_pkey_list_insert(&new_pps->alt); if (!ret) ret = check_qp_port_pkey_settings(new_pps, real_qp->qp_sec); } if (!ret) ret = real_qp->device->ops.modify_qp(real_qp, qp_attr, qp_attr_mask, udata); if (new_pps) { /* Clean up the lists and free the appropriate * ports_pkeys structure. */ if (ret) { tmp_pps = new_pps; } else { tmp_pps = real_qp->qp_sec->ports_pkeys; real_qp->qp_sec->ports_pkeys = new_pps; } if (tmp_pps) { port_pkey_list_remove(&tmp_pps->main); port_pkey_list_remove(&tmp_pps->alt); } kfree(tmp_pps); mutex_unlock(&real_qp->qp_sec->mutex); } return ret; } static int ib_security_pkey_access(struct ib_device *dev, u32 port_num, u16 pkey_index, void *sec) { u64 subnet_prefix; u16 pkey; int ret; if (!rdma_protocol_ib(dev, port_num)) return 0; ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); if (ret) return ret; ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix); return security_ib_pkey_access(sec, subnet_prefix, pkey); } void ib_mad_agent_security_change(void) { struct ib_mad_agent *ag; spin_lock(&mad_agent_list_lock); list_for_each_entry(ag, &mad_agent_list, mad_agent_sec_list) WRITE_ONCE(ag->smp_allowed, !security_ib_endport_manage_subnet(ag->security, dev_name(&ag->device->dev), ag->port_num)); spin_unlock(&mad_agent_list_lock); } int ib_mad_agent_security_setup(struct ib_mad_agent *agent, enum ib_qp_type qp_type) { int ret; if (!rdma_protocol_ib(agent->device, agent->port_num)) return 0; INIT_LIST_HEAD(&agent->mad_agent_sec_list); ret = security_ib_alloc_security(&agent->security); if (ret) return ret; if (qp_type != IB_QPT_SMI) return 0; spin_lock(&mad_agent_list_lock); ret = security_ib_endport_manage_subnet(agent->security, dev_name(&agent->device->dev), agent->port_num); if (ret) goto free_security; WRITE_ONCE(agent->smp_allowed, true); list_add(&agent->mad_agent_sec_list, &mad_agent_list); spin_unlock(&mad_agent_list_lock); return 0; free_security: spin_unlock(&mad_agent_list_lock); security_ib_free_security(agent->security); return ret; } void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) { if (!rdma_protocol_ib(agent->device, agent->port_num)) return; if (agent->qp->qp_type == IB_QPT_SMI) { spin_lock(&mad_agent_list_lock); list_del(&agent->mad_agent_sec_list); spin_unlock(&mad_agent_list_lock); } security_ib_free_security(agent->security); } int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) { if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) return 0; if (map->agent.qp->qp_type == IB_QPT_SMI) { if (!READ_ONCE(map->agent.smp_allowed)) return -EACCES; return 0; } return ib_security_pkey_access(map->agent.device, map->agent.port_num, pkey_index, map->agent.security); }
linux-master
drivers/infiniband/core/security.c
/* * Copyright (c) 2017 Mellanox Technologies Inc. All rights reserved. * Copyright (c) 2010 Voltaire Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ #include <linux/export.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/sock.h> #include <rdma/rdma_netlink.h> #include <linux/module.h> #include "core_priv.h" static struct { const struct rdma_nl_cbs *cb_table; /* Synchronizes between ongoing netlink commands and netlink client * unregistration. */ struct rw_semaphore sem; } rdma_nl_types[RDMA_NL_NUM_CLIENTS]; bool rdma_nl_chk_listeners(unsigned int group) { struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net); return netlink_has_listeners(rnet->nl_sock, group); } EXPORT_SYMBOL(rdma_nl_chk_listeners); static bool is_nl_msg_valid(unsigned int type, unsigned int op) { static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = { [RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS, [RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS, [RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS, }; /* * This BUILD_BUG_ON is intended to catch addition of new * RDMA netlink protocol without updating the array above. */ BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6); if (type >= RDMA_NL_NUM_CLIENTS) return false; return op < max_num_ops[type]; } static const struct rdma_nl_cbs * get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op) { const struct rdma_nl_cbs *cb_table; /* * Currently only NLDEV client is supporting netlink commands in * non init_net net namespace. */ if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV) return NULL; cb_table = READ_ONCE(rdma_nl_types[type].cb_table); if (!cb_table) { /* * Didn't get valid reference of the table, attempt module * load once. */ up_read(&rdma_nl_types[type].sem); request_module("rdma-netlink-subsys-%u", type); down_read(&rdma_nl_types[type].sem); cb_table = READ_ONCE(rdma_nl_types[type].cb_table); } if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit)) return NULL; return cb_table; } void rdma_nl_register(unsigned int index, const struct rdma_nl_cbs cb_table[]) { if (WARN_ON(!is_nl_msg_valid(index, 0)) || WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table))) return; /* Pairs with the READ_ONCE in is_nl_valid() */ smp_store_release(&rdma_nl_types[index].cb_table, cb_table); } EXPORT_SYMBOL(rdma_nl_register); void rdma_nl_unregister(unsigned int index) { down_write(&rdma_nl_types[index].sem); rdma_nl_types[index].cb_table = NULL; up_write(&rdma_nl_types[index].sem); } EXPORT_SYMBOL(rdma_nl_unregister); void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, int len, int client, int op, int flags) { *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags); if (!*nlh) return NULL; return nlmsg_data(*nlh); } EXPORT_SYMBOL(ibnl_put_msg); int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, int len, void *data, int type) { if (nla_put(skb, type, len, data)) { nlmsg_cancel(skb, nlh); return -EMSGSIZE; } return 0; } EXPORT_SYMBOL(ibnl_put_attr); static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { int type = nlh->nlmsg_type; unsigned int index = RDMA_NL_GET_CLIENT(type); unsigned int op = RDMA_NL_GET_OP(type); const struct rdma_nl_cbs *cb_table; int err = -EINVAL; if (!is_nl_msg_valid(index, op)) return -EINVAL; down_read(&rdma_nl_types[index].sem); cb_table = get_cb_table(skb, index, op); if (!cb_table) goto done; if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) && !netlink_capable(skb, CAP_NET_ADMIN)) { err = -EPERM; goto done; } /* * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't * mistakenly call the .dump() function. */ if (index == RDMA_NL_LS) { if (cb_table[op].doit) err = cb_table[op].doit(skb, nlh, extack); goto done; } /* FIXME: Convert IWCM to properly handle doit callbacks */ if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) { struct netlink_dump_control c = { .dump = cb_table[op].dump, }; if (c.dump) err = netlink_dump_start(skb->sk, skb, nlh, &c); goto done; } if (cb_table[op].doit) err = cb_table[op].doit(skb, nlh, extack); done: up_read(&rdma_nl_types[index].sem); return err; } /* * This function is similar to netlink_rcv_skb with one exception: * It calls to the callback for the netlink messages without NLM_F_REQUEST * flag. These messages are intended for RDMA_NL_LS consumer, so it is allowed * for that consumer only. */ static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *)) { struct netlink_ext_ack extack = {}; struct nlmsghdr *nlh; int err; while (skb->len >= nlmsg_total_size(0)) { int msglen; nlh = nlmsg_hdr(skb); err = 0; if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) return 0; /* * Generally speaking, the only requests are handled * by the kernel, but RDMA_NL_LS is different, because it * runs backward netlink scheme. Kernel initiates messages * and waits for reply with data to keep pathrecord cache * in sync. */ if (!(nlh->nlmsg_flags & NLM_F_REQUEST) && (RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS)) goto ack; /* Skip control messages */ if (nlh->nlmsg_type < NLMSG_MIN_TYPE) goto ack; err = cb(skb, nlh, &extack); if (err == -EINTR) goto skip; ack: if (nlh->nlmsg_flags & NLM_F_ACK || err) netlink_ack(skb, nlh, err, &extack); skip: msglen = NLMSG_ALIGN(nlh->nlmsg_len); if (msglen > skb->len) msglen = skb->len; skb_pull(skb, msglen); } return 0; } static void rdma_nl_rcv(struct sk_buff *skb) { rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg); } int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid) { struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); int err; err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT); return (err < 0) ? err : 0; } EXPORT_SYMBOL(rdma_nl_unicast); int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid) { struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); int err; err = netlink_unicast(rnet->nl_sock, skb, pid, 0); return (err < 0) ? err : 0; } EXPORT_SYMBOL(rdma_nl_unicast_wait); int rdma_nl_multicast(struct net *net, struct sk_buff *skb, unsigned int group, gfp_t flags) { struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags); } EXPORT_SYMBOL(rdma_nl_multicast); void rdma_nl_init(void) { int idx; for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) init_rwsem(&rdma_nl_types[idx].sem); } void rdma_nl_exit(void) { int idx; for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) WARN(rdma_nl_types[idx].cb_table, "Netlink client %d wasn't released prior to unloading %s\n", idx, KBUILD_MODNAME); } int rdma_nl_net_init(struct rdma_dev_net *rnet) { struct net *net = read_pnet(&rnet->net); struct netlink_kernel_cfg cfg = { .input = rdma_nl_rcv, }; struct sock *nls; nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg); if (!nls) return -ENOMEM; nls->sk_sndtimeo = 10 * HZ; rnet->nl_sock = nls; return 0; } void rdma_nl_net_exit(struct rdma_dev_net *rnet) { netlink_kernel_release(rnet->nl_sock); } MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);
linux-master
drivers/infiniband/core/netlink.c
/* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/err.h> #include <linux/export.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/in6.h> #include <net/addrconf.h> #include <linux/security.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> #include <rdma/ib_addr.h> #include <rdma/rw.h> #include <rdma/lag.h> #include "core_priv.h" #include <trace/events/rdma_core.h> static int ib_resolve_eth_dmac(struct ib_device *device, struct rdma_ah_attr *ah_attr); static const char * const ib_events[] = { [IB_EVENT_CQ_ERR] = "CQ error", [IB_EVENT_QP_FATAL] = "QP fatal error", [IB_EVENT_QP_REQ_ERR] = "QP request error", [IB_EVENT_QP_ACCESS_ERR] = "QP access error", [IB_EVENT_COMM_EST] = "communication established", [IB_EVENT_SQ_DRAINED] = "send queue drained", [IB_EVENT_PATH_MIG] = "path migration successful", [IB_EVENT_PATH_MIG_ERR] = "path migration error", [IB_EVENT_DEVICE_FATAL] = "device fatal error", [IB_EVENT_PORT_ACTIVE] = "port active", [IB_EVENT_PORT_ERR] = "port error", [IB_EVENT_LID_CHANGE] = "LID change", [IB_EVENT_PKEY_CHANGE] = "P_key change", [IB_EVENT_SM_CHANGE] = "SM change", [IB_EVENT_SRQ_ERR] = "SRQ error", [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", [IB_EVENT_CLIENT_REREGISTER] = "client reregister", [IB_EVENT_GID_CHANGE] = "GID changed", }; const char *__attribute_const__ ib_event_msg(enum ib_event_type event) { size_t index = event; return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? ib_events[index] : "unrecognized event"; } EXPORT_SYMBOL(ib_event_msg); static const char * const wc_statuses[] = { [IB_WC_SUCCESS] = "success", [IB_WC_LOC_LEN_ERR] = "local length error", [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", [IB_WC_LOC_PROT_ERR] = "local protection error", [IB_WC_WR_FLUSH_ERR] = "WR flushed", [IB_WC_MW_BIND_ERR] = "memory bind operation error", [IB_WC_BAD_RESP_ERR] = "bad response error", [IB_WC_LOC_ACCESS_ERR] = "local access error", [IB_WC_REM_INV_REQ_ERR] = "remote invalid request error", [IB_WC_REM_ACCESS_ERR] = "remote access error", [IB_WC_REM_OP_ERR] = "remote operation error", [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", [IB_WC_REM_ABORT_ERR] = "operation aborted", [IB_WC_INV_EECN_ERR] = "invalid EE context number", [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", [IB_WC_FATAL_ERR] = "fatal error", [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", [IB_WC_GENERAL_ERR] = "general error", }; const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) { size_t index = status; return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? wc_statuses[index] : "unrecognized status"; } EXPORT_SYMBOL(ib_wc_status_msg); __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) { switch (rate) { case IB_RATE_2_5_GBPS: return 1; case IB_RATE_5_GBPS: return 2; case IB_RATE_10_GBPS: return 4; case IB_RATE_20_GBPS: return 8; case IB_RATE_30_GBPS: return 12; case IB_RATE_40_GBPS: return 16; case IB_RATE_60_GBPS: return 24; case IB_RATE_80_GBPS: return 32; case IB_RATE_120_GBPS: return 48; case IB_RATE_14_GBPS: return 6; case IB_RATE_56_GBPS: return 22; case IB_RATE_112_GBPS: return 45; case IB_RATE_168_GBPS: return 67; case IB_RATE_25_GBPS: return 10; case IB_RATE_100_GBPS: return 40; case IB_RATE_200_GBPS: return 80; case IB_RATE_300_GBPS: return 120; case IB_RATE_28_GBPS: return 11; case IB_RATE_50_GBPS: return 20; case IB_RATE_400_GBPS: return 160; case IB_RATE_600_GBPS: return 240; default: return -1; } } EXPORT_SYMBOL(ib_rate_to_mult); __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) { switch (mult) { case 1: return IB_RATE_2_5_GBPS; case 2: return IB_RATE_5_GBPS; case 4: return IB_RATE_10_GBPS; case 8: return IB_RATE_20_GBPS; case 12: return IB_RATE_30_GBPS; case 16: return IB_RATE_40_GBPS; case 24: return IB_RATE_60_GBPS; case 32: return IB_RATE_80_GBPS; case 48: return IB_RATE_120_GBPS; case 6: return IB_RATE_14_GBPS; case 22: return IB_RATE_56_GBPS; case 45: return IB_RATE_112_GBPS; case 67: return IB_RATE_168_GBPS; case 10: return IB_RATE_25_GBPS; case 40: return IB_RATE_100_GBPS; case 80: return IB_RATE_200_GBPS; case 120: return IB_RATE_300_GBPS; case 11: return IB_RATE_28_GBPS; case 20: return IB_RATE_50_GBPS; case 160: return IB_RATE_400_GBPS; case 240: return IB_RATE_600_GBPS; default: return IB_RATE_PORT_CURRENT; } } EXPORT_SYMBOL(mult_to_ib_rate); __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) { switch (rate) { case IB_RATE_2_5_GBPS: return 2500; case IB_RATE_5_GBPS: return 5000; case IB_RATE_10_GBPS: return 10000; case IB_RATE_20_GBPS: return 20000; case IB_RATE_30_GBPS: return 30000; case IB_RATE_40_GBPS: return 40000; case IB_RATE_60_GBPS: return 60000; case IB_RATE_80_GBPS: return 80000; case IB_RATE_120_GBPS: return 120000; case IB_RATE_14_GBPS: return 14062; case IB_RATE_56_GBPS: return 56250; case IB_RATE_112_GBPS: return 112500; case IB_RATE_168_GBPS: return 168750; case IB_RATE_25_GBPS: return 25781; case IB_RATE_100_GBPS: return 103125; case IB_RATE_200_GBPS: return 206250; case IB_RATE_300_GBPS: return 309375; case IB_RATE_28_GBPS: return 28125; case IB_RATE_50_GBPS: return 53125; case IB_RATE_400_GBPS: return 425000; case IB_RATE_600_GBPS: return 637500; default: return -1; } } EXPORT_SYMBOL(ib_rate_to_mbps); __attribute_const__ enum rdma_transport_type rdma_node_get_transport(unsigned int node_type) { if (node_type == RDMA_NODE_USNIC) return RDMA_TRANSPORT_USNIC; if (node_type == RDMA_NODE_USNIC_UDP) return RDMA_TRANSPORT_USNIC_UDP; if (node_type == RDMA_NODE_RNIC) return RDMA_TRANSPORT_IWARP; if (node_type == RDMA_NODE_UNSPECIFIED) return RDMA_TRANSPORT_UNSPECIFIED; return RDMA_TRANSPORT_IB; } EXPORT_SYMBOL(rdma_node_get_transport); enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u32 port_num) { enum rdma_transport_type lt; if (device->ops.get_link_layer) return device->ops.get_link_layer(device, port_num); lt = rdma_node_get_transport(device->node_type); if (lt == RDMA_TRANSPORT_IB) return IB_LINK_LAYER_INFINIBAND; return IB_LINK_LAYER_ETHERNET; } EXPORT_SYMBOL(rdma_port_get_link_layer); /* Protection domains */ /** * __ib_alloc_pd - Allocates an unused protection domain. * @device: The device on which to allocate the protection domain. * @flags: protection domain flags * @caller: caller's build-time module name * * A protection domain object provides an association between QPs, shared * receive queues, address handles, memory regions, and memory windows. * * Every PD has a local_dma_lkey which can be used as the lkey value for local * memory operations. */ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, const char *caller) { struct ib_pd *pd; int mr_access_flags = 0; int ret; pd = rdma_zalloc_drv_obj(device, ib_pd); if (!pd) return ERR_PTR(-ENOMEM); pd->device = device; pd->flags = flags; rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD); rdma_restrack_set_name(&pd->res, caller); ret = device->ops.alloc_pd(pd, NULL); if (ret) { rdma_restrack_put(&pd->res); kfree(pd); return ERR_PTR(ret); } rdma_restrack_add(&pd->res); if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY) pd->local_dma_lkey = device->local_dma_lkey; else mr_access_flags |= IB_ACCESS_LOCAL_WRITE; if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { pr_warn("%s: enabling unsafe global rkey\n", caller); mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; } if (mr_access_flags) { struct ib_mr *mr; mr = pd->device->ops.get_dma_mr(pd, mr_access_flags); if (IS_ERR(mr)) { ib_dealloc_pd(pd); return ERR_CAST(mr); } mr->device = pd->device; mr->pd = pd; mr->type = IB_MR_TYPE_DMA; mr->uobject = NULL; mr->need_inval = false; pd->__internal_mr = mr; if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)) pd->local_dma_lkey = pd->__internal_mr->lkey; if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) pd->unsafe_global_rkey = pd->__internal_mr->rkey; } return pd; } EXPORT_SYMBOL(__ib_alloc_pd); /** * ib_dealloc_pd_user - Deallocates a protection domain. * @pd: The protection domain to deallocate. * @udata: Valid user data or NULL for kernel object * * It is an error to call this function while any resources in the pd still * exist. The caller is responsible to synchronously destroy them and * guarantee no new allocations will happen. */ int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata) { int ret; if (pd->__internal_mr) { ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL); WARN_ON(ret); pd->__internal_mr = NULL; } ret = pd->device->ops.dealloc_pd(pd, udata); if (ret) return ret; rdma_restrack_del(&pd->res); kfree(pd); return ret; } EXPORT_SYMBOL(ib_dealloc_pd_user); /* Address handles */ /** * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination. * @dest: Pointer to destination ah_attr. Contents of the destination * pointer is assumed to be invalid and attribute are overwritten. * @src: Pointer to source ah_attr. */ void rdma_copy_ah_attr(struct rdma_ah_attr *dest, const struct rdma_ah_attr *src) { *dest = *src; if (dest->grh.sgid_attr) rdma_hold_gid_attr(dest->grh.sgid_attr); } EXPORT_SYMBOL(rdma_copy_ah_attr); /** * rdma_replace_ah_attr - Replace valid ah_attr with new new one. * @old: Pointer to existing ah_attr which needs to be replaced. * old is assumed to be valid or zero'd * @new: Pointer to the new ah_attr. * * rdma_replace_ah_attr() first releases any reference in the old ah_attr if * old the ah_attr is valid; after that it copies the new attribute and holds * the reference to the replaced ah_attr. */ void rdma_replace_ah_attr(struct rdma_ah_attr *old, const struct rdma_ah_attr *new) { rdma_destroy_ah_attr(old); *old = *new; if (old->grh.sgid_attr) rdma_hold_gid_attr(old->grh.sgid_attr); } EXPORT_SYMBOL(rdma_replace_ah_attr); /** * rdma_move_ah_attr - Move ah_attr pointed by source to destination. * @dest: Pointer to destination ah_attr to copy to. * dest is assumed to be valid or zero'd * @src: Pointer to the new ah_attr. * * rdma_move_ah_attr() first releases any reference in the destination ah_attr * if it is valid. This also transfers ownership of internal references from * src to dest, making src invalid in the process. No new reference of the src * ah_attr is taken. */ void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src) { rdma_destroy_ah_attr(dest); *dest = *src; src->grh.sgid_attr = NULL; } EXPORT_SYMBOL(rdma_move_ah_attr); /* * Validate that the rdma_ah_attr is valid for the device before passing it * off to the driver. */ static int rdma_check_ah_attr(struct ib_device *device, struct rdma_ah_attr *ah_attr) { if (!rdma_is_port_valid(device, ah_attr->port_num)) return -EINVAL; if ((rdma_is_grh_required(device, ah_attr->port_num) || ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) && !(ah_attr->ah_flags & IB_AH_GRH)) return -EINVAL; if (ah_attr->grh.sgid_attr) { /* * Make sure the passed sgid_attr is consistent with the * parameters */ if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index || ah_attr->grh.sgid_attr->port_num != ah_attr->port_num) return -EINVAL; } return 0; } /* * If the ah requires a GRH then ensure that sgid_attr pointer is filled in. * On success the caller is responsible to call rdma_unfill_sgid_attr(). */ static int rdma_fill_sgid_attr(struct ib_device *device, struct rdma_ah_attr *ah_attr, const struct ib_gid_attr **old_sgid_attr) { const struct ib_gid_attr *sgid_attr; struct ib_global_route *grh; int ret; *old_sgid_attr = ah_attr->grh.sgid_attr; ret = rdma_check_ah_attr(device, ah_attr); if (ret) return ret; if (!(ah_attr->ah_flags & IB_AH_GRH)) return 0; grh = rdma_ah_retrieve_grh(ah_attr); if (grh->sgid_attr) return 0; sgid_attr = rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index); if (IS_ERR(sgid_attr)) return PTR_ERR(sgid_attr); /* Move ownerhip of the kref into the ah_attr */ grh->sgid_attr = sgid_attr; return 0; } static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr, const struct ib_gid_attr *old_sgid_attr) { /* * Fill didn't change anything, the caller retains ownership of * whatever it passed */ if (ah_attr->grh.sgid_attr == old_sgid_attr) return; /* * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller * doesn't see any change in the rdma_ah_attr. If we get here * old_sgid_attr is NULL. */ rdma_destroy_ah_attr(ah_attr); } static const struct ib_gid_attr * rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr, const struct ib_gid_attr *old_attr) { if (old_attr) rdma_put_gid_attr(old_attr); if (ah_attr->ah_flags & IB_AH_GRH) { rdma_hold_gid_attr(ah_attr->grh.sgid_attr); return ah_attr->grh.sgid_attr; } return NULL; } static struct ib_ah *_rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, u32 flags, struct ib_udata *udata, struct net_device *xmit_slave) { struct rdma_ah_init_attr init_attr = {}; struct ib_device *device = pd->device; struct ib_ah *ah; int ret; might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE); if (!udata && !device->ops.create_ah) return ERR_PTR(-EOPNOTSUPP); ah = rdma_zalloc_drv_obj_gfp( device, ib_ah, (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC); if (!ah) return ERR_PTR(-ENOMEM); ah->device = device; ah->pd = pd; ah->type = ah_attr->type; ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL); init_attr.ah_attr = ah_attr; init_attr.flags = flags; init_attr.xmit_slave = xmit_slave; if (udata) ret = device->ops.create_user_ah(ah, &init_attr, udata); else ret = device->ops.create_ah(ah, &init_attr, NULL); if (ret) { if (ah->sgid_attr) rdma_put_gid_attr(ah->sgid_attr); kfree(ah); return ERR_PTR(ret); } atomic_inc(&pd->usecnt); return ah; } /** * rdma_create_ah - Creates an address handle for the * given address vector. * @pd: The protection domain associated with the address handle. * @ah_attr: The attributes of the address vector. * @flags: Create address handle flags (see enum rdma_create_ah_flags). * * It returns 0 on success and returns appropriate error code on error. * The address handle is used to reference a local or global destination * in all UD QP post sends. */ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, u32 flags) { const struct ib_gid_attr *old_sgid_attr; struct net_device *slave; struct ib_ah *ah; int ret; ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr); if (ret) return ERR_PTR(ret); slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr, (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC); if (IS_ERR(slave)) { rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); return (void *)slave; } ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave); rdma_lag_put_ah_roce_slave(slave); rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); return ah; } EXPORT_SYMBOL(rdma_create_ah); /** * rdma_create_user_ah - Creates an address handle for the * given address vector. * It resolves destination mac address for ah attribute of RoCE type. * @pd: The protection domain associated with the address handle. * @ah_attr: The attributes of the address vector. * @udata: pointer to user's input output buffer information need by * provider driver. * * It returns 0 on success and returns appropriate error code on error. * The address handle is used to reference a local or global destination * in all UD QP post sends. */ struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, struct ib_udata *udata) { const struct ib_gid_attr *old_sgid_attr; struct ib_ah *ah; int err; err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr); if (err) return ERR_PTR(err); if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { err = ib_resolve_eth_dmac(pd->device, ah_attr); if (err) { ah = ERR_PTR(err); goto out; } } ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, udata, NULL); out: rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); return ah; } EXPORT_SYMBOL(rdma_create_user_ah); int ib_get_rdma_header_version(const union rdma_network_hdr *hdr) { const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; struct iphdr ip4h_checked; const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh; /* If it's IPv6, the version must be 6, otherwise, the first * 20 bytes (before the IPv4 header) are garbled. */ if (ip6h->version != 6) return (ip4h->version == 4) ? 4 : 0; /* version may be 6 or 4 because the first 20 bytes could be garbled */ /* RoCE v2 requires no options, thus header length * must be 5 words */ if (ip4h->ihl != 5) return 6; /* Verify checksum. * We can't write on scattered buffers so we need to copy to * temp buffer. */ memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); ip4h_checked.check = 0; ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5); /* if IPv4 header checksum is OK, believe it */ if (ip4h->check == ip4h_checked.check) return 4; return 6; } EXPORT_SYMBOL(ib_get_rdma_header_version); static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, u32 port_num, const struct ib_grh *grh) { int grh_version; if (rdma_protocol_ib(device, port_num)) return RDMA_NETWORK_IB; grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh); if (grh_version == 4) return RDMA_NETWORK_IPV4; if (grh->next_hdr == IPPROTO_UDP) return RDMA_NETWORK_IPV6; return RDMA_NETWORK_ROCE_V1; } struct find_gid_index_context { u16 vlan_id; enum ib_gid_type gid_type; }; static bool find_gid_index(const union ib_gid *gid, const struct ib_gid_attr *gid_attr, void *context) { struct find_gid_index_context *ctx = context; u16 vlan_id = 0xffff; int ret; if (ctx->gid_type != gid_attr->gid_type) return false; ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL); if (ret) return false; return ctx->vlan_id == vlan_id; } static const struct ib_gid_attr * get_sgid_attr_from_eth(struct ib_device *device, u32 port_num, u16 vlan_id, const union ib_gid *sgid, enum ib_gid_type gid_type) { struct find_gid_index_context context = {.vlan_id = vlan_id, .gid_type = gid_type}; return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index, &context); } int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, enum rdma_network_type net_type, union ib_gid *sgid, union ib_gid *dgid) { struct sockaddr_in src_in; struct sockaddr_in dst_in; __be32 src_saddr, dst_saddr; if (!sgid || !dgid) return -EINVAL; if (net_type == RDMA_NETWORK_IPV4) { memcpy(&src_in.sin_addr.s_addr, &hdr->roce4grh.saddr, 4); memcpy(&dst_in.sin_addr.s_addr, &hdr->roce4grh.daddr, 4); src_saddr = src_in.sin_addr.s_addr; dst_saddr = dst_in.sin_addr.s_addr; ipv6_addr_set_v4mapped(src_saddr, (struct in6_addr *)sgid); ipv6_addr_set_v4mapped(dst_saddr, (struct in6_addr *)dgid); return 0; } else if (net_type == RDMA_NETWORK_IPV6 || net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) { *dgid = hdr->ibgrh.dgid; *sgid = hdr->ibgrh.sgid; return 0; } else { return -EINVAL; } } EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); /* Resolve destination mac address and hop limit for unicast destination * GID entry, considering the source GID entry as well. * ah_attribute must have have valid port_num, sgid_index. */ static int ib_resolve_unicast_gid_dmac(struct ib_device *device, struct rdma_ah_attr *ah_attr) { struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr); const struct ib_gid_attr *sgid_attr = grh->sgid_attr; int hop_limit = 0xff; int ret = 0; /* If destination is link local and source GID is RoCEv1, * IP stack is not used. */ if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) && sgid_attr->gid_type == IB_GID_TYPE_ROCE) { rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw, ah_attr->roce.dmac); return ret; } ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid, ah_attr->roce.dmac, sgid_attr, &hop_limit); grh->hop_limit = hop_limit; return ret; } /* * This function initializes address handle attributes from the incoming packet. * Incoming packet has dgid of the receiver node on which this code is * getting executed and, sgid contains the GID of the sender. * * When resolving mac address of destination, the arrived dgid is used * as sgid and, sgid is used as dgid because sgid contains destinations * GID whom to respond to. * * On success the caller is responsible to call rdma_destroy_ah_attr on the * attr. */ int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num, const struct ib_wc *wc, const struct ib_grh *grh, struct rdma_ah_attr *ah_attr) { u32 flow_class; int ret; enum rdma_network_type net_type = RDMA_NETWORK_IB; enum ib_gid_type gid_type = IB_GID_TYPE_IB; const struct ib_gid_attr *sgid_attr; int hoplimit = 0xff; union ib_gid dgid; union ib_gid sgid; might_sleep(); memset(ah_attr, 0, sizeof *ah_attr); ah_attr->type = rdma_ah_find_type(device, port_num); if (rdma_cap_eth_ah(device, port_num)) { if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) net_type = wc->network_hdr_type; else net_type = ib_get_net_type_by_grh(device, port_num, grh); gid_type = ib_network_to_gid_type(net_type); } ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, &sgid, &dgid); if (ret) return ret; rdma_ah_set_sl(ah_attr, wc->sl); rdma_ah_set_port_num(ah_attr, port_num); if (rdma_protocol_roce(device, port_num)) { u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? wc->vlan_id : 0xffff; if (!(wc->wc_flags & IB_WC_GRH)) return -EPROTOTYPE; sgid_attr = get_sgid_attr_from_eth(device, port_num, vlan_id, &dgid, gid_type); if (IS_ERR(sgid_attr)) return PTR_ERR(sgid_attr); flow_class = be32_to_cpu(grh->version_tclass_flow); rdma_move_grh_sgid_attr(ah_attr, &sgid, flow_class & 0xFFFFF, hoplimit, (flow_class >> 20) & 0xFF, sgid_attr); ret = ib_resolve_unicast_gid_dmac(device, ah_attr); if (ret) rdma_destroy_ah_attr(ah_attr); return ret; } else { rdma_ah_set_dlid(ah_attr, wc->slid); rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits); if ((wc->wc_flags & IB_WC_GRH) == 0) return 0; if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { sgid_attr = rdma_find_gid_by_port( device, &dgid, IB_GID_TYPE_IB, port_num, NULL); } else sgid_attr = rdma_get_gid_attr(device, port_num, 0); if (IS_ERR(sgid_attr)) return PTR_ERR(sgid_attr); flow_class = be32_to_cpu(grh->version_tclass_flow); rdma_move_grh_sgid_attr(ah_attr, &sgid, flow_class & 0xFFFFF, hoplimit, (flow_class >> 20) & 0xFF, sgid_attr); return 0; } } EXPORT_SYMBOL(ib_init_ah_attr_from_wc); /** * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership * of the reference * * @attr: Pointer to AH attribute structure * @dgid: Destination GID * @flow_label: Flow label * @hop_limit: Hop limit * @traffic_class: traffic class * @sgid_attr: Pointer to SGID attribute * * This takes ownership of the sgid_attr reference. The caller must ensure * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after * calling this function. */ void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid, u32 flow_label, u8 hop_limit, u8 traffic_class, const struct ib_gid_attr *sgid_attr) { rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit, traffic_class); attr->grh.sgid_attr = sgid_attr; } EXPORT_SYMBOL(rdma_move_grh_sgid_attr); /** * rdma_destroy_ah_attr - Release reference to SGID attribute of * ah attribute. * @ah_attr: Pointer to ah attribute * * Release reference to the SGID attribute of the ah attribute if it is * non NULL. It is safe to call this multiple times, and safe to call it on * a zero initialized ah_attr. */ void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr) { if (ah_attr->grh.sgid_attr) { rdma_put_gid_attr(ah_attr->grh.sgid_attr); ah_attr->grh.sgid_attr = NULL; } } EXPORT_SYMBOL(rdma_destroy_ah_attr); struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, const struct ib_grh *grh, u32 port_num) { struct rdma_ah_attr ah_attr; struct ib_ah *ah; int ret; ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr); if (ret) return ERR_PTR(ret); ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE); rdma_destroy_ah_attr(&ah_attr); return ah; } EXPORT_SYMBOL(ib_create_ah_from_wc); int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) { const struct ib_gid_attr *old_sgid_attr; int ret; if (ah->type != ah_attr->type) return -EINVAL; ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr); if (ret) return ret; ret = ah->device->ops.modify_ah ? ah->device->ops.modify_ah(ah, ah_attr) : -EOPNOTSUPP; ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr); rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); return ret; } EXPORT_SYMBOL(rdma_modify_ah); int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) { ah_attr->grh.sgid_attr = NULL; return ah->device->ops.query_ah ? ah->device->ops.query_ah(ah, ah_attr) : -EOPNOTSUPP; } EXPORT_SYMBOL(rdma_query_ah); int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata) { const struct ib_gid_attr *sgid_attr = ah->sgid_attr; struct ib_pd *pd; int ret; might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE); pd = ah->pd; ret = ah->device->ops.destroy_ah(ah, flags); if (ret) return ret; atomic_dec(&pd->usecnt); if (sgid_attr) rdma_put_gid_attr(sgid_attr); kfree(ah); return ret; } EXPORT_SYMBOL(rdma_destroy_ah_user); /* Shared receive queues */ /** * ib_create_srq_user - Creates a SRQ associated with the specified protection * domain. * @pd: The protection domain associated with the SRQ. * @srq_init_attr: A list of initial attributes required to create the * SRQ. If SRQ creation succeeds, then the attributes are updated to * the actual capabilities of the created SRQ. * @uobject: uobject pointer if this is not a kernel SRQ * @udata: udata pointer if this is not a kernel SRQ * * srq_attr->max_wr and srq_attr->max_sge are read the determine the * requested size of the SRQ, and set to the actual values allocated * on return. If ib_create_srq() succeeds, then max_wr and max_sge * will always be at least as large as the requested values. */ struct ib_srq *ib_create_srq_user(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr, struct ib_usrq_object *uobject, struct ib_udata *udata) { struct ib_srq *srq; int ret; srq = rdma_zalloc_drv_obj(pd->device, ib_srq); if (!srq) return ERR_PTR(-ENOMEM); srq->device = pd->device; srq->pd = pd; srq->event_handler = srq_init_attr->event_handler; srq->srq_context = srq_init_attr->srq_context; srq->srq_type = srq_init_attr->srq_type; srq->uobject = uobject; if (ib_srq_has_cq(srq->srq_type)) { srq->ext.cq = srq_init_attr->ext.cq; atomic_inc(&srq->ext.cq->usecnt); } if (srq->srq_type == IB_SRQT_XRC) { srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; if (srq->ext.xrc.xrcd) atomic_inc(&srq->ext.xrc.xrcd->usecnt); } atomic_inc(&pd->usecnt); rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ); rdma_restrack_parent_name(&srq->res, &pd->res); ret = pd->device->ops.create_srq(srq, srq_init_attr, udata); if (ret) { rdma_restrack_put(&srq->res); atomic_dec(&pd->usecnt); if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd) atomic_dec(&srq->ext.xrc.xrcd->usecnt); if (ib_srq_has_cq(srq->srq_type)) atomic_dec(&srq->ext.cq->usecnt); kfree(srq); return ERR_PTR(ret); } rdma_restrack_add(&srq->res); return srq; } EXPORT_SYMBOL(ib_create_srq_user); int ib_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr, enum ib_srq_attr_mask srq_attr_mask) { return srq->device->ops.modify_srq ? srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask, NULL) : -EOPNOTSUPP; } EXPORT_SYMBOL(ib_modify_srq); int ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) { return srq->device->ops.query_srq ? srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP; } EXPORT_SYMBOL(ib_query_srq); int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata) { int ret; if (atomic_read(&srq->usecnt)) return -EBUSY; ret = srq->device->ops.destroy_srq(srq, udata); if (ret) return ret; atomic_dec(&srq->pd->usecnt); if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd) atomic_dec(&srq->ext.xrc.xrcd->usecnt); if (ib_srq_has_cq(srq->srq_type)) atomic_dec(&srq->ext.cq->usecnt); rdma_restrack_del(&srq->res); kfree(srq); return ret; } EXPORT_SYMBOL(ib_destroy_srq_user); /* Queue pairs */ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) { struct ib_qp *qp = context; unsigned long flags; spin_lock_irqsave(&qp->device->qp_open_list_lock, flags); list_for_each_entry(event->element.qp, &qp->open_list, open_list) if (event->element.qp->event_handler) event->element.qp->event_handler(event, event->element.qp->qp_context); spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags); } static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, void (*event_handler)(struct ib_event *, void *), void *qp_context) { struct ib_qp *qp; unsigned long flags; int err; qp = kzalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->real_qp = real_qp; err = ib_open_shared_qp_security(qp, real_qp->device); if (err) { kfree(qp); return ERR_PTR(err); } qp->real_qp = real_qp; atomic_inc(&real_qp->usecnt); qp->device = real_qp->device; qp->event_handler = event_handler; qp->qp_context = qp_context; qp->qp_num = real_qp->qp_num; qp->qp_type = real_qp->qp_type; spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); list_add(&qp->open_list, &real_qp->open_list); spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags); return qp; } struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, struct ib_qp_open_attr *qp_open_attr) { struct ib_qp *qp, *real_qp; if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) return ERR_PTR(-EINVAL); down_read(&xrcd->tgt_qps_rwsem); real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num); if (!real_qp) { up_read(&xrcd->tgt_qps_rwsem); return ERR_PTR(-EINVAL); } qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, qp_open_attr->qp_context); up_read(&xrcd->tgt_qps_rwsem); return qp; } EXPORT_SYMBOL(ib_open_qp); static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr) { struct ib_qp *real_qp = qp; int err; qp->event_handler = __ib_shared_qp_event_handler; qp->qp_context = qp; qp->pd = NULL; qp->send_cq = qp->recv_cq = NULL; qp->srq = NULL; qp->xrcd = qp_init_attr->xrcd; atomic_inc(&qp_init_attr->xrcd->usecnt); INIT_LIST_HEAD(&qp->open_list); qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, qp_init_attr->qp_context); if (IS_ERR(qp)) return qp; err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num, real_qp, GFP_KERNEL)); if (err) { ib_close_qp(qp); return ERR_PTR(err); } return qp; } static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd, struct ib_qp_init_attr *attr, struct ib_udata *udata, struct ib_uqp_object *uobj, const char *caller) { struct ib_udata dummy = {}; struct ib_qp *qp; int ret; if (!dev->ops.create_qp) return ERR_PTR(-EOPNOTSUPP); qp = rdma_zalloc_drv_obj_numa(dev, ib_qp); if (!qp) return ERR_PTR(-ENOMEM); qp->device = dev; qp->pd = pd; qp->uobject = uobj; qp->real_qp = qp; qp->qp_type = attr->qp_type; qp->rwq_ind_tbl = attr->rwq_ind_tbl; qp->srq = attr->srq; qp->event_handler = attr->event_handler; qp->port = attr->port_num; qp->qp_context = attr->qp_context; spin_lock_init(&qp->mr_lock); INIT_LIST_HEAD(&qp->rdma_mrs); INIT_LIST_HEAD(&qp->sig_mrs); qp->send_cq = attr->send_cq; qp->recv_cq = attr->recv_cq; rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP); WARN_ONCE(!udata && !caller, "Missing kernel QP owner"); rdma_restrack_set_name(&qp->res, udata ? NULL : caller); ret = dev->ops.create_qp(qp, attr, udata); if (ret) goto err_create; /* * TODO: The mlx4 internally overwrites send_cq and recv_cq. * Unfortunately, it is not an easy task to fix that driver. */ qp->send_cq = attr->send_cq; qp->recv_cq = attr->recv_cq; ret = ib_create_qp_security(qp, dev); if (ret) goto err_security; rdma_restrack_add(&qp->res); return qp; err_security: qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL); err_create: rdma_restrack_put(&qp->res); kfree(qp); return ERR_PTR(ret); } /** * ib_create_qp_user - Creates a QP associated with the specified protection * domain. * @dev: IB device * @pd: The protection domain associated with the QP. * @attr: A list of initial attributes required to create the * QP. If QP creation succeeds, then the attributes are updated to * the actual capabilities of the created QP. * @udata: User data * @uobj: uverbs obect * @caller: caller's build-time module name */ struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd, struct ib_qp_init_attr *attr, struct ib_udata *udata, struct ib_uqp_object *uobj, const char *caller) { struct ib_qp *qp, *xrc_qp; if (attr->qp_type == IB_QPT_XRC_TGT) qp = create_qp(dev, pd, attr, NULL, NULL, caller); else qp = create_qp(dev, pd, attr, udata, uobj, NULL); if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp)) return qp; xrc_qp = create_xrc_qp_user(qp, attr); if (IS_ERR(xrc_qp)) { ib_destroy_qp(qp); return xrc_qp; } xrc_qp->uobject = uobj; return xrc_qp; } EXPORT_SYMBOL(ib_create_qp_user); void ib_qp_usecnt_inc(struct ib_qp *qp) { if (qp->pd) atomic_inc(&qp->pd->usecnt); if (qp->send_cq) atomic_inc(&qp->send_cq->usecnt); if (qp->recv_cq) atomic_inc(&qp->recv_cq->usecnt); if (qp->srq) atomic_inc(&qp->srq->usecnt); if (qp->rwq_ind_tbl) atomic_inc(&qp->rwq_ind_tbl->usecnt); } EXPORT_SYMBOL(ib_qp_usecnt_inc); void ib_qp_usecnt_dec(struct ib_qp *qp) { if (qp->rwq_ind_tbl) atomic_dec(&qp->rwq_ind_tbl->usecnt); if (qp->srq) atomic_dec(&qp->srq->usecnt); if (qp->recv_cq) atomic_dec(&qp->recv_cq->usecnt); if (qp->send_cq) atomic_dec(&qp->send_cq->usecnt); if (qp->pd) atomic_dec(&qp->pd->usecnt); } EXPORT_SYMBOL(ib_qp_usecnt_dec); struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr, const char *caller) { struct ib_device *device = pd->device; struct ib_qp *qp; int ret; /* * If the callers is using the RDMA API calculate the resources * needed for the RDMA READ/WRITE operations. * * Note that these callers need to pass in a port number. */ if (qp_init_attr->cap.max_rdma_ctxs) rdma_rw_init_qp(device, qp_init_attr); qp = create_qp(device, pd, qp_init_attr, NULL, NULL, caller); if (IS_ERR(qp)) return qp; ib_qp_usecnt_inc(qp); if (qp_init_attr->cap.max_rdma_ctxs) { ret = rdma_rw_init_mrs(qp, qp_init_attr); if (ret) goto err; } /* * Note: all hw drivers guarantee that max_send_sge is lower than * the device RDMA WRITE SGE limit but not all hw drivers ensure that * max_send_sge <= max_sge_rd. */ qp->max_write_sge = qp_init_attr->cap.max_send_sge; qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, device->attrs.max_sge_rd); if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) qp->integrity_en = true; return qp; err: ib_destroy_qp(qp); return ERR_PTR(ret); } EXPORT_SYMBOL(ib_create_qp_kernel); static const struct { int valid; enum ib_qp_attr_mask req_param[IB_QPT_MAX]; enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_INIT] = { .valid = 1, .req_param = { [IB_QPT_UD] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY), [IB_QPT_RAW_PACKET] = IB_QP_PORT, [IB_QPT_UC] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_RC] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), } }, }, [IB_QPS_INIT] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_INIT] = { .valid = 1, .opt_param = { [IB_QPT_UD] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_RC] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), } }, [IB_QPS_RTR] = { .valid = 1, .req_param = { [IB_QPT_UC] = (IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN), [IB_QPT_RC] = (IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER), [IB_QPT_XRC_INI] = (IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN), [IB_QPT_XRC_TGT] = (IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER), }, .opt_param = { [IB_QPT_UD] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX), [IB_QPT_RC] = (IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX), [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX), [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX), [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), }, }, }, [IB_QPS_RTR] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .req_param = { [IB_QPT_UD] = IB_QP_SQ_PSN, [IB_QPT_UC] = IB_QP_SQ_PSN, [IB_QPT_RC] = (IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_SQ_PSN | IB_QP_MAX_QP_RD_ATOMIC), [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_SQ_PSN | IB_QP_MAX_QP_RD_ATOMIC), [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | IB_QP_SQ_PSN), [IB_QPT_SMI] = IB_QP_SQ_PSN, [IB_QPT_GSI] = IB_QP_SQ_PSN, }, .opt_param = { [IB_QPT_UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PATH_MIG_STATE), [IB_QPT_RC] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_MIN_RNR_TIMER | IB_QP_PATH_MIG_STATE), [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PATH_MIG_STATE), [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_MIN_RNR_TIMER | IB_QP_PATH_MIG_STATE), [IB_QPT_SMI] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, } } }, [IB_QPS_RTS] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .opt_param = { [IB_QPT_UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_CUR_STATE | IB_QP_ACCESS_FLAGS | IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE), [IB_QPT_RC] = (IB_QP_CUR_STATE | IB_QP_ACCESS_FLAGS | IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE | IB_QP_MIN_RNR_TIMER), [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | IB_QP_ACCESS_FLAGS | IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE), [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | IB_QP_ACCESS_FLAGS | IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE | IB_QP_MIN_RNR_TIMER), [IB_QPT_SMI] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, } }, [IB_QPS_SQD] = { .valid = 1, .opt_param = { [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY } }, }, [IB_QPS_SQD] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .opt_param = { [IB_QPT_UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PATH_MIG_STATE), [IB_QPT_RC] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_MIN_RNR_TIMER | IB_QP_PATH_MIG_STATE), [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PATH_MIG_STATE), [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_MIN_RNR_TIMER | IB_QP_PATH_MIG_STATE), [IB_QPT_SMI] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_CUR_STATE | IB_QP_QKEY), } }, [IB_QPS_SQD] = { .valid = 1, .opt_param = { [IB_QPT_UD] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_AV | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PATH_MIG_STATE), [IB_QPT_RC] = (IB_QP_PORT | IB_QP_AV | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_MIN_RNR_TIMER | IB_QP_PATH_MIG_STATE), [IB_QPT_XRC_INI] = (IB_QP_PORT | IB_QP_AV | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PATH_MIG_STATE), [IB_QPT_XRC_TGT] = (IB_QP_PORT | IB_QP_AV | IB_QP_TIMEOUT | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_MIN_RNR_TIMER | IB_QP_PATH_MIG_STATE), [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), } } }, [IB_QPS_SQE] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .opt_param = { [IB_QPT_UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_CUR_STATE | IB_QP_ACCESS_FLAGS), [IB_QPT_SMI] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_CUR_STATE | IB_QP_QKEY), } } }, [IB_QPS_ERR] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 } } }; bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, enum ib_qp_type type, enum ib_qp_attr_mask mask) { enum ib_qp_attr_mask req_param, opt_param; if (mask & IB_QP_CUR_STATE && cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) return false; if (!qp_state_table[cur_state][next_state].valid) return false; req_param = qp_state_table[cur_state][next_state].req_param[type]; opt_param = qp_state_table[cur_state][next_state].opt_param[type]; if ((mask & req_param) != req_param) return false; if (mask & ~(req_param | opt_param | IB_QP_STATE)) return false; return true; } EXPORT_SYMBOL(ib_modify_qp_is_ok); /** * ib_resolve_eth_dmac - Resolve destination mac address * @device: Device to consider * @ah_attr: address handle attribute which describes the * source and destination parameters * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It * returns 0 on success or appropriate error code. It initializes the * necessary ah_attr fields when call is successful. */ static int ib_resolve_eth_dmac(struct ib_device *device, struct rdma_ah_attr *ah_attr) { int ret = 0; if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) { __be32 addr = 0; memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4); ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac); } else { ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw, (char *)ah_attr->roce.dmac); } } else { ret = ib_resolve_unicast_gid_dmac(device, ah_attr); } return ret; } static bool is_qp_type_connected(const struct ib_qp *qp) { return (qp->qp_type == IB_QPT_UC || qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT); } /* * IB core internal function to perform QP attributes modification. */ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; const struct ib_gid_attr *old_sgid_attr_av; const struct ib_gid_attr *old_sgid_attr_alt_av; int ret; attr->xmit_slave = NULL; if (attr_mask & IB_QP_AV) { ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr, &old_sgid_attr_av); if (ret) return ret; if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE && is_qp_type_connected(qp)) { struct net_device *slave; /* * If the user provided the qp_attr then we have to * resolve it. Kerne users have to provide already * resolved rdma_ah_attr's. */ if (udata) { ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); if (ret) goto out_av; } slave = rdma_lag_get_ah_roce_slave(qp->device, &attr->ah_attr, GFP_KERNEL); if (IS_ERR(slave)) { ret = PTR_ERR(slave); goto out_av; } attr->xmit_slave = slave; } } if (attr_mask & IB_QP_ALT_PATH) { /* * FIXME: This does not track the migration state, so if the * user loads a new alternate path after the HW has migrated * from primary->alternate we will keep the wrong * references. This is OK for IB because the reference * counting does not serve any functional purpose. */ ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr, &old_sgid_attr_alt_av); if (ret) goto out_av; /* * Today the core code can only handle alternate paths and APM * for IB. Ban them in roce mode. */ if (!(rdma_protocol_ib(qp->device, attr->alt_ah_attr.port_num) && rdma_protocol_ib(qp->device, port))) { ret = -EINVAL; goto out; } } if (rdma_ib_or_roce(qp->device, port)) { if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) { dev_warn(&qp->device->dev, "%s rq_psn overflow, masking to 24 bits\n", __func__); attr->rq_psn &= 0xffffff; } if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) { dev_warn(&qp->device->dev, " %s sq_psn overflow, masking to 24 bits\n", __func__); attr->sq_psn &= 0xffffff; } } /* * Bind this qp to a counter automatically based on the rdma counter * rules. This only set in RST2INIT with port specified */ if (!qp->counter && (attr_mask & IB_QP_PORT) && ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT)) rdma_counter_bind_qp_auto(qp, attr->port_num); ret = ib_security_modify_qp(qp, attr, attr_mask, udata); if (ret) goto out; if (attr_mask & IB_QP_PORT) qp->port = attr->port_num; if (attr_mask & IB_QP_AV) qp->av_sgid_attr = rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr); if (attr_mask & IB_QP_ALT_PATH) qp->alt_path_sgid_attr = rdma_update_sgid_attr( &attr->alt_ah_attr, qp->alt_path_sgid_attr); out: if (attr_mask & IB_QP_ALT_PATH) rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av); out_av: if (attr_mask & IB_QP_AV) { rdma_lag_put_ah_roce_slave(attr->xmit_slave); rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av); } return ret; } /** * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. * @ib_qp: The QP to modify. * @attr: On input, specifies the QP attributes to modify. On output, * the current values of selected QP attributes are returned. * @attr_mask: A bit-mask used to specify which attributes of the QP * are being modified. * @udata: pointer to user's input output buffer information * are being modified. * It returns 0 on success and returns appropriate error code on error. */ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata); } EXPORT_SYMBOL(ib_modify_qp_with_udata); static void ib_get_width_and_speed(u32 netdev_speed, u32 lanes, u16 *speed, u8 *width) { if (!lanes) { if (netdev_speed <= SPEED_1000) { *width = IB_WIDTH_1X; *speed = IB_SPEED_SDR; } else if (netdev_speed <= SPEED_10000) { *width = IB_WIDTH_1X; *speed = IB_SPEED_FDR10; } else if (netdev_speed <= SPEED_20000) { *width = IB_WIDTH_4X; *speed = IB_SPEED_DDR; } else if (netdev_speed <= SPEED_25000) { *width = IB_WIDTH_1X; *speed = IB_SPEED_EDR; } else if (netdev_speed <= SPEED_40000) { *width = IB_WIDTH_4X; *speed = IB_SPEED_FDR10; } else if (netdev_speed <= SPEED_50000) { *width = IB_WIDTH_2X; *speed = IB_SPEED_EDR; } else if (netdev_speed <= SPEED_100000) { *width = IB_WIDTH_4X; *speed = IB_SPEED_EDR; } else if (netdev_speed <= SPEED_200000) { *width = IB_WIDTH_4X; *speed = IB_SPEED_HDR; } else { *width = IB_WIDTH_4X; *speed = IB_SPEED_NDR; } return; } switch (lanes) { case 1: *width = IB_WIDTH_1X; break; case 2: *width = IB_WIDTH_2X; break; case 4: *width = IB_WIDTH_4X; break; case 8: *width = IB_WIDTH_8X; break; case 12: *width = IB_WIDTH_12X; break; default: *width = IB_WIDTH_1X; } switch (netdev_speed / lanes) { case SPEED_2500: *speed = IB_SPEED_SDR; break; case SPEED_5000: *speed = IB_SPEED_DDR; break; case SPEED_10000: *speed = IB_SPEED_FDR10; break; case SPEED_14000: *speed = IB_SPEED_FDR; break; case SPEED_25000: *speed = IB_SPEED_EDR; break; case SPEED_50000: *speed = IB_SPEED_HDR; break; case SPEED_100000: *speed = IB_SPEED_NDR; break; default: *speed = IB_SPEED_SDR; } } int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width) { int rc; u32 netdev_speed; struct net_device *netdev; struct ethtool_link_ksettings lksettings; if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) return -EINVAL; netdev = ib_device_get_netdev(dev, port_num); if (!netdev) return -ENODEV; rtnl_lock(); rc = __ethtool_get_link_ksettings(netdev, &lksettings); rtnl_unlock(); dev_put(netdev); if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { netdev_speed = lksettings.base.speed; } else { netdev_speed = SPEED_1000; if (rc) pr_warn("%s speed is unknown, defaulting to %u\n", netdev->name, netdev_speed); } ib_get_width_and_speed(netdev_speed, lksettings.lanes, speed, width); return 0; } EXPORT_SYMBOL(ib_get_eth_speed); int ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask) { return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); } EXPORT_SYMBOL(ib_modify_qp); int ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { qp_attr->ah_attr.grh.sgid_attr = NULL; qp_attr->alt_ah_attr.grh.sgid_attr = NULL; return qp->device->ops.query_qp ? qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : -EOPNOTSUPP; } EXPORT_SYMBOL(ib_query_qp); int ib_close_qp(struct ib_qp *qp) { struct ib_qp *real_qp; unsigned long flags; real_qp = qp->real_qp; if (real_qp == qp) return -EINVAL; spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); list_del(&qp->open_list); spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags); atomic_dec(&real_qp->usecnt); if (qp->qp_sec) ib_close_shared_qp_security(qp->qp_sec); kfree(qp); return 0; } EXPORT_SYMBOL(ib_close_qp); static int __ib_destroy_shared_qp(struct ib_qp *qp) { struct ib_xrcd *xrcd; struct ib_qp *real_qp; int ret; real_qp = qp->real_qp; xrcd = real_qp->xrcd; down_write(&xrcd->tgt_qps_rwsem); ib_close_qp(qp); if (atomic_read(&real_qp->usecnt) == 0) xa_erase(&xrcd->tgt_qps, real_qp->qp_num); else real_qp = NULL; up_write(&xrcd->tgt_qps_rwsem); if (real_qp) { ret = ib_destroy_qp(real_qp); if (!ret) atomic_dec(&xrcd->usecnt); } return 0; } int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata) { const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr; const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr; struct ib_qp_security *sec; int ret; WARN_ON_ONCE(qp->mrs_used > 0); if (atomic_read(&qp->usecnt)) return -EBUSY; if (qp->real_qp != qp) return __ib_destroy_shared_qp(qp); sec = qp->qp_sec; if (sec) ib_destroy_qp_security_begin(sec); if (!qp->uobject) rdma_rw_cleanup_mrs(qp); rdma_counter_unbind_qp(qp, true); ret = qp->device->ops.destroy_qp(qp, udata); if (ret) { if (sec) ib_destroy_qp_security_abort(sec); return ret; } if (alt_path_sgid_attr) rdma_put_gid_attr(alt_path_sgid_attr); if (av_sgid_attr) rdma_put_gid_attr(av_sgid_attr); ib_qp_usecnt_dec(qp); if (sec) ib_destroy_qp_security_end(sec); rdma_restrack_del(&qp->res); kfree(qp); return ret; } EXPORT_SYMBOL(ib_destroy_qp_user); /* Completion queues */ struct ib_cq *__ib_create_cq(struct ib_device *device, ib_comp_handler comp_handler, void (*event_handler)(struct ib_event *, void *), void *cq_context, const struct ib_cq_init_attr *cq_attr, const char *caller) { struct ib_cq *cq; int ret; cq = rdma_zalloc_drv_obj(device, ib_cq); if (!cq) return ERR_PTR(-ENOMEM); cq->device = device; cq->uobject = NULL; cq->comp_handler = comp_handler; cq->event_handler = event_handler; cq->cq_context = cq_context; atomic_set(&cq->usecnt, 0); rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); rdma_restrack_set_name(&cq->res, caller); ret = device->ops.create_cq(cq, cq_attr, NULL); if (ret) { rdma_restrack_put(&cq->res); kfree(cq); return ERR_PTR(ret); } rdma_restrack_add(&cq->res); return cq; } EXPORT_SYMBOL(__ib_create_cq); int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) { if (cq->shared) return -EOPNOTSUPP; return cq->device->ops.modify_cq ? cq->device->ops.modify_cq(cq, cq_count, cq_period) : -EOPNOTSUPP; } EXPORT_SYMBOL(rdma_set_cq_moderation); int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata) { int ret; if (WARN_ON_ONCE(cq->shared)) return -EOPNOTSUPP; if (atomic_read(&cq->usecnt)) return -EBUSY; ret = cq->device->ops.destroy_cq(cq, udata); if (ret) return ret; rdma_restrack_del(&cq->res); kfree(cq); return ret; } EXPORT_SYMBOL(ib_destroy_cq_user); int ib_resize_cq(struct ib_cq *cq, int cqe) { if (cq->shared) return -EOPNOTSUPP; return cq->device->ops.resize_cq ? cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; } EXPORT_SYMBOL(ib_resize_cq); /* Memory regions */ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags) { struct ib_mr *mr; if (access_flags & IB_ACCESS_ON_DEMAND) { if (!(pd->device->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING)) { pr_debug("ODP support not available\n"); return ERR_PTR(-EINVAL); } } mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr, access_flags, NULL); if (IS_ERR(mr)) return mr; mr->device = pd->device; mr->type = IB_MR_TYPE_USER; mr->pd = pd; mr->dm = NULL; atomic_inc(&pd->usecnt); mr->iova = virt_addr; mr->length = length; rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_parent_name(&mr->res, &pd->res); rdma_restrack_add(&mr->res); return mr; } EXPORT_SYMBOL(ib_reg_user_mr); int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 flags, struct ib_sge *sg_list, u32 num_sge) { if (!pd->device->ops.advise_mr) return -EOPNOTSUPP; if (!num_sge) return 0; return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge, NULL); } EXPORT_SYMBOL(ib_advise_mr); int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata) { struct ib_pd *pd = mr->pd; struct ib_dm *dm = mr->dm; struct ib_sig_attrs *sig_attrs = mr->sig_attrs; int ret; trace_mr_dereg(mr); rdma_restrack_del(&mr->res); ret = mr->device->ops.dereg_mr(mr, udata); if (!ret) { atomic_dec(&pd->usecnt); if (dm) atomic_dec(&dm->usecnt); kfree(sig_attrs); } return ret; } EXPORT_SYMBOL(ib_dereg_mr_user); /** * ib_alloc_mr() - Allocates a memory region * @pd: protection domain associated with the region * @mr_type: memory region type * @max_num_sg: maximum sg entries available for registration. * * Notes: * Memory registeration page/sg lists must not exceed max_num_sg. * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed * max_num_sg * used_page_size. * */ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) { struct ib_mr *mr; if (!pd->device->ops.alloc_mr) { mr = ERR_PTR(-EOPNOTSUPP); goto out; } if (mr_type == IB_MR_TYPE_INTEGRITY) { WARN_ON_ONCE(1); mr = ERR_PTR(-EINVAL); goto out; } mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg); if (IS_ERR(mr)) goto out; mr->device = pd->device; mr->pd = pd; mr->dm = NULL; mr->uobject = NULL; atomic_inc(&pd->usecnt); mr->need_inval = false; mr->type = mr_type; mr->sig_attrs = NULL; rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_parent_name(&mr->res, &pd->res); rdma_restrack_add(&mr->res); out: trace_mr_alloc(pd, mr_type, max_num_sg, mr); return mr; } EXPORT_SYMBOL(ib_alloc_mr); /** * ib_alloc_mr_integrity() - Allocates an integrity memory region * @pd: protection domain associated with the region * @max_num_data_sg: maximum data sg entries available for registration * @max_num_meta_sg: maximum metadata sg entries available for * registration * * Notes: * Memory registration page/sg lists must not exceed max_num_sg, * also the integrity page/sg lists must not exceed max_num_meta_sg. * */ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, u32 max_num_data_sg, u32 max_num_meta_sg) { struct ib_mr *mr; struct ib_sig_attrs *sig_attrs; if (!pd->device->ops.alloc_mr_integrity || !pd->device->ops.map_mr_sg_pi) { mr = ERR_PTR(-EOPNOTSUPP); goto out; } if (!max_num_meta_sg) { mr = ERR_PTR(-EINVAL); goto out; } sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL); if (!sig_attrs) { mr = ERR_PTR(-ENOMEM); goto out; } mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg, max_num_meta_sg); if (IS_ERR(mr)) { kfree(sig_attrs); goto out; } mr->device = pd->device; mr->pd = pd; mr->dm = NULL; mr->uobject = NULL; atomic_inc(&pd->usecnt); mr->need_inval = false; mr->type = IB_MR_TYPE_INTEGRITY; mr->sig_attrs = sig_attrs; rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_parent_name(&mr->res, &pd->res); rdma_restrack_add(&mr->res); out: trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr); return mr; } EXPORT_SYMBOL(ib_alloc_mr_integrity); /* Multicast groups */ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) { struct ib_qp_init_attr init_attr = {}; struct ib_qp_attr attr = {}; int num_eth_ports = 0; unsigned int port; /* If QP state >= init, it is assigned to a port and we can check this * port only. */ if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { if (attr.qp_state >= IB_QPS_INIT) { if (rdma_port_get_link_layer(qp->device, attr.port_num) != IB_LINK_LAYER_INFINIBAND) return true; goto lid_check; } } /* Can't get a quick answer, iterate over all ports */ rdma_for_each_port(qp->device, port) if (rdma_port_get_link_layer(qp->device, port) != IB_LINK_LAYER_INFINIBAND) num_eth_ports++; /* If we have at lease one Ethernet port, RoCE annex declares that * multicast LID should be ignored. We can't tell at this step if the * QP belongs to an IB or Ethernet port. */ if (num_eth_ports) return true; /* If all the ports are IB, we can check according to IB spec. */ lid_check: return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || lid == be16_to_cpu(IB_LID_PERMISSIVE)); } int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) { int ret; if (!qp->device->ops.attach_mcast) return -EOPNOTSUPP; if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) return -EINVAL; ret = qp->device->ops.attach_mcast(qp, gid, lid); if (!ret) atomic_inc(&qp->usecnt); return ret; } EXPORT_SYMBOL(ib_attach_mcast); int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) { int ret; if (!qp->device->ops.detach_mcast) return -EOPNOTSUPP; if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) return -EINVAL; ret = qp->device->ops.detach_mcast(qp, gid, lid); if (!ret) atomic_dec(&qp->usecnt); return ret; } EXPORT_SYMBOL(ib_detach_mcast); /** * ib_alloc_xrcd_user - Allocates an XRC domain. * @device: The device on which to allocate the XRC domain. * @inode: inode to connect XRCD * @udata: Valid user data or NULL for kernel object */ struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, struct inode *inode, struct ib_udata *udata) { struct ib_xrcd *xrcd; int ret; if (!device->ops.alloc_xrcd) return ERR_PTR(-EOPNOTSUPP); xrcd = rdma_zalloc_drv_obj(device, ib_xrcd); if (!xrcd) return ERR_PTR(-ENOMEM); xrcd->device = device; xrcd->inode = inode; atomic_set(&xrcd->usecnt, 0); init_rwsem(&xrcd->tgt_qps_rwsem); xa_init(&xrcd->tgt_qps); ret = device->ops.alloc_xrcd(xrcd, udata); if (ret) goto err; return xrcd; err: kfree(xrcd); return ERR_PTR(ret); } EXPORT_SYMBOL(ib_alloc_xrcd_user); /** * ib_dealloc_xrcd_user - Deallocates an XRC domain. * @xrcd: The XRC domain to deallocate. * @udata: Valid user data or NULL for kernel object */ int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata) { int ret; if (atomic_read(&xrcd->usecnt)) return -EBUSY; WARN_ON(!xa_empty(&xrcd->tgt_qps)); ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata); if (ret) return ret; kfree(xrcd); return ret; } EXPORT_SYMBOL(ib_dealloc_xrcd_user); /** * ib_create_wq - Creates a WQ associated with the specified protection * domain. * @pd: The protection domain associated with the WQ. * @wq_attr: A list of initial attributes required to create the * WQ. If WQ creation succeeds, then the attributes are updated to * the actual capabilities of the created WQ. * * wq_attr->max_wr and wq_attr->max_sge determine * the requested size of the WQ, and set to the actual values allocated * on return. * If ib_create_wq() succeeds, then max_wr and max_sge will always be * at least as large as the requested values. */ struct ib_wq *ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *wq_attr) { struct ib_wq *wq; if (!pd->device->ops.create_wq) return ERR_PTR(-EOPNOTSUPP); wq = pd->device->ops.create_wq(pd, wq_attr, NULL); if (!IS_ERR(wq)) { wq->event_handler = wq_attr->event_handler; wq->wq_context = wq_attr->wq_context; wq->wq_type = wq_attr->wq_type; wq->cq = wq_attr->cq; wq->device = pd->device; wq->pd = pd; wq->uobject = NULL; atomic_inc(&pd->usecnt); atomic_inc(&wq_attr->cq->usecnt); atomic_set(&wq->usecnt, 0); } return wq; } EXPORT_SYMBOL(ib_create_wq); /** * ib_destroy_wq_user - Destroys the specified user WQ. * @wq: The WQ to destroy. * @udata: Valid user data */ int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata) { struct ib_cq *cq = wq->cq; struct ib_pd *pd = wq->pd; int ret; if (atomic_read(&wq->usecnt)) return -EBUSY; ret = wq->device->ops.destroy_wq(wq, udata); if (ret) return ret; atomic_dec(&pd->usecnt); atomic_dec(&cq->usecnt); return ret; } EXPORT_SYMBOL(ib_destroy_wq_user); int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, struct ib_mr_status *mr_status) { if (!mr->device->ops.check_mr_status) return -EOPNOTSUPP; return mr->device->ops.check_mr_status(mr, check_mask, mr_status); } EXPORT_SYMBOL(ib_check_mr_status); int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port, int state) { if (!device->ops.set_vf_link_state) return -EOPNOTSUPP; return device->ops.set_vf_link_state(device, vf, port, state); } EXPORT_SYMBOL(ib_set_vf_link_state); int ib_get_vf_config(struct ib_device *device, int vf, u32 port, struct ifla_vf_info *info) { if (!device->ops.get_vf_config) return -EOPNOTSUPP; return device->ops.get_vf_config(device, vf, port, info); } EXPORT_SYMBOL(ib_get_vf_config); int ib_get_vf_stats(struct ib_device *device, int vf, u32 port, struct ifla_vf_stats *stats) { if (!device->ops.get_vf_stats) return -EOPNOTSUPP; return device->ops.get_vf_stats(device, vf, port, stats); } EXPORT_SYMBOL(ib_get_vf_stats); int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid, int type) { if (!device->ops.set_vf_guid) return -EOPNOTSUPP; return device->ops.set_vf_guid(device, vf, port, guid, type); } EXPORT_SYMBOL(ib_set_vf_guid); int ib_get_vf_guid(struct ib_device *device, int vf, u32 port, struct ifla_vf_guid *node_guid, struct ifla_vf_guid *port_guid) { if (!device->ops.get_vf_guid) return -EOPNOTSUPP; return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid); } EXPORT_SYMBOL(ib_get_vf_guid); /** * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection * information) and set an appropriate memory region for registration. * @mr: memory region * @data_sg: dma mapped scatterlist for data * @data_sg_nents: number of entries in data_sg * @data_sg_offset: offset in bytes into data_sg * @meta_sg: dma mapped scatterlist for metadata * @meta_sg_nents: number of entries in meta_sg * @meta_sg_offset: offset in bytes into meta_sg * @page_size: page vector desired page size * * Constraints: * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY. * * Return: 0 on success. * * After this completes successfully, the memory region * is ready for registration. */ int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, int data_sg_nents, unsigned int *data_sg_offset, struct scatterlist *meta_sg, int meta_sg_nents, unsigned int *meta_sg_offset, unsigned int page_size) { if (unlikely(!mr->device->ops.map_mr_sg_pi || WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY))) return -EOPNOTSUPP; mr->page_size = page_size; return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents, data_sg_offset, meta_sg, meta_sg_nents, meta_sg_offset); } EXPORT_SYMBOL(ib_map_mr_sg_pi); /** * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list * and set it the memory region. * @mr: memory region * @sg: dma mapped scatterlist * @sg_nents: number of entries in sg * @sg_offset: offset in bytes into sg * @page_size: page vector desired page size * * Constraints: * * - The first sg element is allowed to have an offset. * - Each sg element must either be aligned to page_size or virtually * contiguous to the previous element. In case an sg element has a * non-contiguous offset, the mapping prefix will not include it. * - The last sg element is allowed to have length less than page_size. * - If sg_nents total byte length exceeds the mr max_num_sge * page_size * then only max_num_sg entries will be mapped. * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these * constraints holds and the page_size argument is ignored. * * Returns the number of sg elements that were mapped to the memory region. * * After this completes successfully, the memory region * is ready for registration. */ int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset, unsigned int page_size) { if (unlikely(!mr->device->ops.map_mr_sg)) return -EOPNOTSUPP; mr->page_size = page_size; return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset); } EXPORT_SYMBOL(ib_map_mr_sg); /** * ib_sg_to_pages() - Convert the largest prefix of a sg list * to a page vector * @mr: memory region * @sgl: dma mapped scatterlist * @sg_nents: number of entries in sg * @sg_offset_p: ==== ======================================================= * IN start offset in bytes into sg * OUT offset in bytes for element n of the sg of the first * byte that has not been processed where n is the return * value of this function. * ==== ======================================================= * @set_page: driver page assignment function pointer * * Core service helper for drivers to convert the largest * prefix of given sg list to a page vector. The sg list * prefix converted is the prefix that meet the requirements * of ib_map_mr_sg. * * Returns the number of sg elements that were assigned to * a page vector. */ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) { struct scatterlist *sg; u64 last_end_dma_addr = 0; unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; unsigned int last_page_off = 0; u64 page_mask = ~((u64)mr->page_size - 1); int i, ret; if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) return -EINVAL; mr->iova = sg_dma_address(&sgl[0]) + sg_offset; mr->length = 0; for_each_sg(sgl, sg, sg_nents, i) { u64 dma_addr = sg_dma_address(sg) + sg_offset; u64 prev_addr = dma_addr; unsigned int dma_len = sg_dma_len(sg) - sg_offset; u64 end_dma_addr = dma_addr + dma_len; u64 page_addr = dma_addr & page_mask; /* * For the second and later elements, check whether either the * end of element i-1 or the start of element i is not aligned * on a page boundary. */ if (i && (last_page_off != 0 || page_addr != dma_addr)) { /* Stop mapping if there is a gap. */ if (last_end_dma_addr != dma_addr) break; /* * Coalesce this element with the last. If it is small * enough just update mr->length. Otherwise start * mapping from the next page. */ goto next_page; } do { ret = set_page(mr, page_addr); if (unlikely(ret < 0)) { sg_offset = prev_addr - sg_dma_address(sg); mr->length += prev_addr - dma_addr; if (sg_offset_p) *sg_offset_p = sg_offset; return i || sg_offset ? i : ret; } prev_addr = page_addr; next_page: page_addr += mr->page_size; } while (page_addr < end_dma_addr); mr->length += dma_len; last_end_dma_addr = end_dma_addr; last_page_off = end_dma_addr & ~page_mask; sg_offset = 0; } if (sg_offset_p) *sg_offset_p = 0; return i; } EXPORT_SYMBOL(ib_sg_to_pages); struct ib_drain_cqe { struct ib_cqe cqe; struct completion done; }; static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) { struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, cqe); complete(&cqe->done); } /* * Post a WR and block until its completion is reaped for the SQ. */ static void __ib_drain_sq(struct ib_qp *qp) { struct ib_cq *cq = qp->send_cq; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_drain_cqe sdrain; struct ib_rdma_wr swr = { .wr = { .next = NULL, { .wr_cqe = &sdrain.cqe, }, .opcode = IB_WR_RDMA_WRITE, }, }; int ret; ret = ib_modify_qp(qp, &attr, IB_QP_STATE); if (ret) { WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); return; } sdrain.cqe.done = ib_drain_qp_done; init_completion(&sdrain.done); ret = ib_post_send(qp, &swr.wr, NULL); if (ret) { WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); return; } if (cq->poll_ctx == IB_POLL_DIRECT) while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0) ib_process_cq_direct(cq, -1); else wait_for_completion(&sdrain.done); } /* * Post a WR and block until its completion is reaped for the RQ. */ static void __ib_drain_rq(struct ib_qp *qp) { struct ib_cq *cq = qp->recv_cq; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_drain_cqe rdrain; struct ib_recv_wr rwr = {}; int ret; ret = ib_modify_qp(qp, &attr, IB_QP_STATE); if (ret) { WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); return; } rwr.wr_cqe = &rdrain.cqe; rdrain.cqe.done = ib_drain_qp_done; init_completion(&rdrain.done); ret = ib_post_recv(qp, &rwr, NULL); if (ret) { WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); return; } if (cq->poll_ctx == IB_POLL_DIRECT) while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0) ib_process_cq_direct(cq, -1); else wait_for_completion(&rdrain.done); } /** * ib_drain_sq() - Block until all SQ CQEs have been consumed by the * application. * @qp: queue pair to drain * * If the device has a provider-specific drain function, then * call that. Otherwise call the generic drain function * __ib_drain_sq(). * * The caller must: * * ensure there is room in the CQ and SQ for the drain work request and * completion. * * allocate the CQ using ib_alloc_cq(). * * ensure that there are no other contexts that are posting WRs concurrently. * Otherwise the drain is not guaranteed. */ void ib_drain_sq(struct ib_qp *qp) { if (qp->device->ops.drain_sq) qp->device->ops.drain_sq(qp); else __ib_drain_sq(qp); trace_cq_drain_complete(qp->send_cq); } EXPORT_SYMBOL(ib_drain_sq); /** * ib_drain_rq() - Block until all RQ CQEs have been consumed by the * application. * @qp: queue pair to drain * * If the device has a provider-specific drain function, then * call that. Otherwise call the generic drain function * __ib_drain_rq(). * * The caller must: * * ensure there is room in the CQ and RQ for the drain work request and * completion. * * allocate the CQ using ib_alloc_cq(). * * ensure that there are no other contexts that are posting WRs concurrently. * Otherwise the drain is not guaranteed. */ void ib_drain_rq(struct ib_qp *qp) { if (qp->device->ops.drain_rq) qp->device->ops.drain_rq(qp); else __ib_drain_rq(qp); trace_cq_drain_complete(qp->recv_cq); } EXPORT_SYMBOL(ib_drain_rq); /** * ib_drain_qp() - Block until all CQEs have been consumed by the * application on both the RQ and SQ. * @qp: queue pair to drain * * The caller must: * * ensure there is room in the CQ(s), SQ, and RQ for drain work requests * and completions. * * allocate the CQs using ib_alloc_cq(). * * ensure that there are no other contexts that are posting WRs concurrently. * Otherwise the drain is not guaranteed. */ void ib_drain_qp(struct ib_qp *qp) { ib_drain_sq(qp); if (!qp->srq) ib_drain_rq(qp); } EXPORT_SYMBOL(ib_drain_qp); struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *)) { struct rdma_netdev_alloc_params params; struct net_device *netdev; int rc; if (!device->ops.rdma_netdev_get_params) return ERR_PTR(-EOPNOTSUPP); rc = device->ops.rdma_netdev_get_params(device, port_num, type, &params); if (rc) return ERR_PTR(rc); netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type, setup, params.txqs, params.rxqs); if (!netdev) return ERR_PTR(-ENOMEM); return netdev; } EXPORT_SYMBOL(rdma_alloc_netdev); int rdma_init_netdev(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *), struct net_device *netdev) { struct rdma_netdev_alloc_params params; int rc; if (!device->ops.rdma_netdev_get_params) return -EOPNOTSUPP; rc = device->ops.rdma_netdev_get_params(device, port_num, type, &params); if (rc) return rc; return params.initialize_rdma_netdev(device, port_num, netdev, params.param); } EXPORT_SYMBOL(rdma_init_netdev); void __rdma_block_iter_start(struct ib_block_iter *biter, struct scatterlist *sglist, unsigned int nents, unsigned long pgsz) { memset(biter, 0, sizeof(struct ib_block_iter)); biter->__sg = sglist; biter->__sg_nents = nents; /* Driver provides best block size to use */ biter->__pg_bit = __fls(pgsz); } EXPORT_SYMBOL(__rdma_block_iter_start); bool __rdma_block_iter_next(struct ib_block_iter *biter) { unsigned int block_offset; unsigned int sg_delta; if (!biter->__sg_nents || !biter->__sg) return false; biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { biter->__sg_advance += sg_delta; } else { biter->__sg_advance = 0; biter->__sg = sg_next(biter->__sg); biter->__sg_nents--; } return true; } EXPORT_SYMBOL(__rdma_block_iter_next); /** * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct * for the drivers. * @descs: array of static descriptors * @num_counters: number of elements in array * @lifespan: milliseconds between updates */ struct rdma_hw_stats *rdma_alloc_hw_stats_struct( const struct rdma_stat_desc *descs, int num_counters, unsigned long lifespan) { struct rdma_hw_stats *stats; stats = kzalloc(struct_size(stats, value, num_counters), GFP_KERNEL); if (!stats) return NULL; stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters), sizeof(*stats->is_disabled), GFP_KERNEL); if (!stats->is_disabled) goto err; stats->descs = descs; stats->num_counters = num_counters; stats->lifespan = msecs_to_jiffies(lifespan); mutex_init(&stats->lock); return stats; err: kfree(stats); return NULL; } EXPORT_SYMBOL(rdma_alloc_hw_stats_struct); /** * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats * @stats: statistics to release */ void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats) { if (!stats) return; kfree(stats->is_disabled); kfree(stats); } EXPORT_SYMBOL(rdma_free_hw_stats_struct);
linux-master
drivers/infiniband/core/verbs.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2004-2007 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved. */ #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/module.h> #include <linux/err.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/random.h> #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/workqueue.h> #include <linux/kdev_t.h> #include <linux/etherdevice.h> #include <rdma/ib_cache.h> #include <rdma/ib_cm.h> #include <rdma/ib_sysfs.h> #include "cm_msgs.h" #include "core_priv.h" #include "cm_trace.h" MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("InfiniBand CM"); MODULE_LICENSE("Dual BSD/GPL"); static const char * const ibcm_rej_reason_strs[] = { [IB_CM_REJ_NO_QP] = "no QP", [IB_CM_REJ_NO_EEC] = "no EEC", [IB_CM_REJ_NO_RESOURCES] = "no resources", [IB_CM_REJ_TIMEOUT] = "timeout", [IB_CM_REJ_UNSUPPORTED] = "unsupported", [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID", [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance", [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID", [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type", [IB_CM_REJ_STALE_CONN] = "stale conn", [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist", [IB_CM_REJ_INVALID_GID] = "invalid GID", [IB_CM_REJ_INVALID_LID] = "invalid LID", [IB_CM_REJ_INVALID_SL] = "invalid SL", [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class", [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit", [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate", [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID", [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID", [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL", [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class", [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit", [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate", [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect", [IB_CM_REJ_PORT_REDIRECT] = "port redirect", [IB_CM_REJ_INVALID_MTU] = "invalid MTU", [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources", [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined", [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry", [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID", [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version", [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label", [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label", [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] = "vendor option is not supported", }; const char *__attribute_const__ ibcm_reject_msg(int reason) { size_t index = reason; if (index < ARRAY_SIZE(ibcm_rej_reason_strs) && ibcm_rej_reason_strs[index]) return ibcm_rej_reason_strs[index]; else return "unrecognized reason"; } EXPORT_SYMBOL(ibcm_reject_msg); struct cm_id_private; struct cm_work; static int cm_add_one(struct ib_device *device); static void cm_remove_one(struct ib_device *device, void *client_data); static void cm_process_work(struct cm_id_private *cm_id_priv, struct cm_work *work); static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param); static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len); static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len); static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len); static struct ib_client cm_client = { .name = "cm", .add = cm_add_one, .remove = cm_remove_one }; static struct ib_cm { spinlock_t lock; struct list_head device_list; rwlock_t device_lock; struct rb_root listen_service_table; u64 listen_service_id; /* struct rb_root peer_service_table; todo: fix peer to peer */ struct rb_root remote_qp_table; struct rb_root remote_id_table; struct rb_root remote_sidr_table; struct xarray local_id_table; u32 local_id_next; __be32 random_id_operand; struct list_head timewait_list; struct workqueue_struct *wq; } cm; /* Counter indexes ordered by attribute ID */ enum { CM_REQ_COUNTER, CM_MRA_COUNTER, CM_REJ_COUNTER, CM_REP_COUNTER, CM_RTU_COUNTER, CM_DREQ_COUNTER, CM_DREP_COUNTER, CM_SIDR_REQ_COUNTER, CM_SIDR_REP_COUNTER, CM_LAP_COUNTER, CM_APR_COUNTER, CM_ATTR_COUNT, CM_ATTR_ID_OFFSET = 0x0010, }; enum { CM_XMIT, CM_XMIT_RETRIES, CM_RECV, CM_RECV_DUPLICATES, CM_COUNTER_GROUPS }; struct cm_counter_attribute { struct ib_port_attribute attr; unsigned short group; unsigned short index; }; struct cm_port { struct cm_device *cm_dev; struct ib_mad_agent *mad_agent; u32 port_num; atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT]; }; struct cm_device { struct kref kref; struct list_head list; spinlock_t mad_agent_lock; struct ib_device *ib_device; u8 ack_delay; int going_down; struct cm_port *port[]; }; struct cm_av { struct cm_port *port; struct rdma_ah_attr ah_attr; u16 dlid_datapath; u16 pkey_index; u8 timeout; }; struct cm_work { struct delayed_work work; struct list_head list; struct cm_port *port; struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ __be32 local_id; /* Established / timewait */ __be32 remote_id; struct ib_cm_event cm_event; struct sa_path_rec path[]; }; struct cm_timewait_info { struct cm_work work; struct list_head list; struct rb_node remote_qp_node; struct rb_node remote_id_node; __be64 remote_ca_guid; __be32 remote_qpn; u8 inserted_remote_qp; u8 inserted_remote_id; }; struct cm_id_private { struct ib_cm_id id; struct rb_node service_node; struct rb_node sidr_id_node; u32 sidr_slid; spinlock_t lock; /* Do not acquire inside cm.lock */ struct completion comp; refcount_t refcount; /* Number of clients sharing this ib_cm_id. Only valid for listeners. * Protected by the cm.lock spinlock. */ int listen_sharecount; struct rcu_head rcu; struct ib_mad_send_buf *msg; struct cm_timewait_info *timewait_info; /* todo: use alternate port on send failure */ struct cm_av av; struct cm_av alt_av; void *private_data; __be64 tid; __be32 local_qpn; __be32 remote_qpn; enum ib_qp_type qp_type; __be32 sq_psn; __be32 rq_psn; int timeout_ms; enum ib_mtu path_mtu; __be16 pkey; u8 private_data_len; u8 max_cm_retries; u8 responder_resources; u8 initiator_depth; u8 retry_count; u8 rnr_retry_count; u8 service_timeout; u8 target_ack_delay; struct list_head work_list; atomic_t work_count; struct rdma_ucm_ece ece; }; static void cm_dev_release(struct kref *kref) { struct cm_device *cm_dev = container_of(kref, struct cm_device, kref); u32 i; rdma_for_each_port(cm_dev->ib_device, i) kfree(cm_dev->port[i - 1]); kfree(cm_dev); } static void cm_device_put(struct cm_device *cm_dev) { kref_put(&cm_dev->kref, cm_dev_release); } static void cm_work_handler(struct work_struct *work); static inline void cm_deref_id(struct cm_id_private *cm_id_priv) { if (refcount_dec_and_test(&cm_id_priv->refcount)) complete(&cm_id_priv->comp); } static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv) { struct ib_mad_agent *mad_agent; struct ib_mad_send_buf *m; struct ib_ah *ah; lockdep_assert_held(&cm_id_priv->lock); if (!cm_id_priv->av.port) return ERR_PTR(-EINVAL); spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); mad_agent = cm_id_priv->av.port->mad_agent; if (!mad_agent) { m = ERR_PTR(-EINVAL); goto out; } ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0); if (IS_ERR(ah)) { m = ERR_CAST(ah); goto out; } m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, cm_id_priv->av.pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC, IB_MGMT_BASE_VERSION); if (IS_ERR(m)) { rdma_destroy_ah(ah, 0); goto out; } /* Timeout set by caller if response is expected. */ m->ah = ah; m->retries = cm_id_priv->max_cm_retries; refcount_inc(&cm_id_priv->refcount); m->context[0] = cm_id_priv; out: spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); return m; } static void cm_free_msg(struct ib_mad_send_buf *msg) { struct cm_id_private *cm_id_priv = msg->context[0]; if (msg->ah) rdma_destroy_ah(msg->ah, 0); cm_deref_id(cm_id_priv); ib_free_send_mad(msg); } static struct ib_mad_send_buf * cm_alloc_priv_msg(struct cm_id_private *cm_id_priv) { struct ib_mad_send_buf *msg; lockdep_assert_held(&cm_id_priv->lock); msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return msg; cm_id_priv->msg = msg; return msg; } static void cm_free_priv_msg(struct ib_mad_send_buf *msg) { struct cm_id_private *cm_id_priv = msg->context[0]; lockdep_assert_held(&cm_id_priv->lock); if (!WARN_ON(cm_id_priv->msg != msg)) cm_id_priv->msg = NULL; if (msg->ah) rdma_destroy_ah(msg->ah, 0); cm_deref_id(cm_id_priv); ib_free_send_mad(msg); } static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc) { return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC, IB_MGMT_BASE_VERSION); } static int cm_create_response_msg_ah(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, struct ib_mad_send_buf *msg) { struct ib_ah *ah; ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, port->port_num); if (IS_ERR(ah)) return PTR_ERR(ah); msg->ah = ah; return 0; } static int cm_alloc_response_msg(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, struct ib_mad_send_buf **msg) { struct ib_mad_send_buf *m; int ret; m = cm_alloc_response_msg_no_ah(port, mad_recv_wc); if (IS_ERR(m)) return PTR_ERR(m); ret = cm_create_response_msg_ah(port, mad_recv_wc, m); if (ret) { ib_free_send_mad(m); return ret; } *msg = m; return 0; } static void cm_free_response_msg(struct ib_mad_send_buf *msg) { if (msg->ah) rdma_destroy_ah(msg->ah, 0); ib_free_send_mad(msg); } static void *cm_copy_private_data(const void *private_data, u8 private_data_len) { void *data; if (!private_data || !private_data_len) return NULL; data = kmemdup(private_data, private_data_len, GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); return data; } static void cm_set_private_data(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len) { if (cm_id_priv->private_data && cm_id_priv->private_data_len) kfree(cm_id_priv->private_data); cm_id_priv->private_data = private_data; cm_id_priv->private_data_len = private_data_len; } static void cm_set_av_port(struct cm_av *av, struct cm_port *port) { struct cm_port *old_port = av->port; if (old_port == port) return; av->port = port; if (old_port) cm_device_put(old_port->cm_dev); if (port) kref_get(&port->cm_dev->kref); } static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc, struct rdma_ah_attr *ah_attr, struct cm_av *av) { cm_set_av_port(av, port); av->pkey_index = wc->pkey_index; rdma_move_ah_attr(&av->ah_attr, ah_attr); } static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, struct ib_grh *grh, struct cm_av *av) { cm_set_av_port(av, port); av->pkey_index = wc->pkey_index; return ib_init_ah_attr_from_wc(port->cm_dev->ib_device, port->port_num, wc, grh, &av->ah_attr); } static struct cm_port * get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr) { struct cm_device *cm_dev; struct cm_port *port = NULL; unsigned long flags; if (attr) { read_lock_irqsave(&cm.device_lock, flags); list_for_each_entry(cm_dev, &cm.device_list, list) { if (cm_dev->ib_device == attr->device) { port = cm_dev->port[attr->port_num - 1]; break; } } read_unlock_irqrestore(&cm.device_lock, flags); } else { /* SGID attribute can be NULL in following * conditions. * (a) Alternative path * (b) IB link layer without GRH * (c) LAP send messages */ read_lock_irqsave(&cm.device_lock, flags); list_for_each_entry(cm_dev, &cm.device_list, list) { attr = rdma_find_gid(cm_dev->ib_device, &path->sgid, sa_conv_pathrec_to_gid_type(path), NULL); if (!IS_ERR(attr)) { port = cm_dev->port[attr->port_num - 1]; break; } } read_unlock_irqrestore(&cm.device_lock, flags); if (port) rdma_put_gid_attr(attr); } return port; } static int cm_init_av_by_path(struct sa_path_rec *path, const struct ib_gid_attr *sgid_attr, struct cm_av *av) { struct rdma_ah_attr new_ah_attr; struct cm_device *cm_dev; struct cm_port *port; int ret; port = get_cm_port_from_path(path, sgid_attr); if (!port) return -EINVAL; cm_dev = port->cm_dev; ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num, be16_to_cpu(path->pkey), &av->pkey_index); if (ret) return ret; cm_set_av_port(av, port); /* * av->ah_attr might be initialized based on wc or during * request processing time which might have reference to sgid_attr. * So initialize a new ah_attr on stack. * If initialization fails, old ah_attr is used for sending any * responses. If initialization is successful, than new ah_attr * is used by overwriting the old one. So that right ah_attr * can be used to return an error response. */ ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path, &new_ah_attr, sgid_attr); if (ret) return ret; av->timeout = path->packet_life_time + 1; rdma_move_ah_attr(&av->ah_attr, &new_ah_attr); return 0; } /* Move av created by cm_init_av_by_path(), so av.dgid is not moved */ static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src) { cm_set_av_port(dest, src->port); cm_set_av_port(src, NULL); dest->pkey_index = src->pkey_index; rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr); dest->timeout = src->timeout; } static void cm_destroy_av(struct cm_av *av) { rdma_destroy_ah_attr(&av->ah_attr); cm_set_av_port(av, NULL); } static u32 cm_local_id(__be32 local_id) { return (__force u32) (local_id ^ cm.random_id_operand); } static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; rcu_read_lock(); cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id)); if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id || !refcount_inc_not_zero(&cm_id_priv->refcount)) cm_id_priv = NULL; rcu_read_unlock(); return cm_id_priv; } /* * Trivial helpers to strip endian annotation and compare; the * endianness doesn't actually matter since we just need a stable * order for the RB tree. */ static int be32_lt(__be32 a, __be32 b) { return (__force u32) a < (__force u32) b; } static int be32_gt(__be32 a, __be32 b) { return (__force u32) a > (__force u32) b; } static int be64_lt(__be64 a, __be64 b) { return (__force u64) a < (__force u64) b; } static int be64_gt(__be64 a, __be64 b) { return (__force u64) a > (__force u64) b; } /* * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv * if the new ID was inserted, NULL if it could not be inserted due to a * collision, or the existing cm_id_priv ready for shared usage. */ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv, ib_cm_handler shared_handler) { struct rb_node **link = &cm.listen_service_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; __be64 service_id = cm_id_priv->id.service_id; unsigned long flags; spin_lock_irqsave(&cm.lock, flags); while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, service_node); if (cm_id_priv->id.device < cur_cm_id_priv->id.device) link = &(*link)->rb_left; else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) link = &(*link)->rb_right; else if (be64_lt(service_id, cur_cm_id_priv->id.service_id)) link = &(*link)->rb_left; else if (be64_gt(service_id, cur_cm_id_priv->id.service_id)) link = &(*link)->rb_right; else { /* * Sharing an ib_cm_id with different handlers is not * supported */ if (cur_cm_id_priv->id.cm_handler != shared_handler || cur_cm_id_priv->id.context || WARN_ON(!cur_cm_id_priv->id.cm_handler)) { spin_unlock_irqrestore(&cm.lock, flags); return NULL; } refcount_inc(&cur_cm_id_priv->refcount); cur_cm_id_priv->listen_sharecount++; spin_unlock_irqrestore(&cm.lock, flags); return cur_cm_id_priv; } } cm_id_priv->listen_sharecount++; rb_link_node(&cm_id_priv->service_node, parent, link); rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); spin_unlock_irqrestore(&cm.lock, flags); return cm_id_priv; } static struct cm_id_private *cm_find_listen(struct ib_device *device, __be64 service_id) { struct rb_node *node = cm.listen_service_table.rb_node; struct cm_id_private *cm_id_priv; while (node) { cm_id_priv = rb_entry(node, struct cm_id_private, service_node); if (device < cm_id_priv->id.device) node = node->rb_left; else if (device > cm_id_priv->id.device) node = node->rb_right; else if (be64_lt(service_id, cm_id_priv->id.service_id)) node = node->rb_left; else if (be64_gt(service_id, cm_id_priv->id.service_id)) node = node->rb_right; else { refcount_inc(&cm_id_priv->refcount); return cm_id_priv; } } return NULL; } static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_id_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; __be64 remote_ca_guid = timewait_info->remote_ca_guid; __be32 remote_id = timewait_info->work.remote_id; while (*link) { parent = *link; cur_timewait_info = rb_entry(parent, struct cm_timewait_info, remote_id_node); if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) link = &(*link)->rb_left; else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) link = &(*link)->rb_right; else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_left; else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_right; else return cur_timewait_info; } timewait_info->inserted_remote_id = 1; rb_link_node(&timewait_info->remote_id_node, parent, link); rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); return NULL; } static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid, __be32 remote_id) { struct rb_node *node = cm.remote_id_table.rb_node; struct cm_timewait_info *timewait_info; struct cm_id_private *res = NULL; spin_lock_irq(&cm.lock); while (node) { timewait_info = rb_entry(node, struct cm_timewait_info, remote_id_node); if (be32_lt(remote_id, timewait_info->work.remote_id)) node = node->rb_left; else if (be32_gt(remote_id, timewait_info->work.remote_id)) node = node->rb_right; else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid)) node = node->rb_left; else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid)) node = node->rb_right; else { res = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); break; } } spin_unlock_irq(&cm.lock); return res; } static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_qp_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; __be64 remote_ca_guid = timewait_info->remote_ca_guid; __be32 remote_qpn = timewait_info->remote_qpn; while (*link) { parent = *link; cur_timewait_info = rb_entry(parent, struct cm_timewait_info, remote_qp_node); if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn)) link = &(*link)->rb_left; else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn)) link = &(*link)->rb_right; else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_left; else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_right; else return cur_timewait_info; } timewait_info->inserted_remote_qp = 1; rb_link_node(&timewait_info->remote_qp_node, parent, link); rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); return NULL; } static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private *cm_id_priv) { struct rb_node **link = &cm.remote_sidr_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; __be32 remote_id = cm_id_priv->id.remote_id; while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, sidr_id_node); if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id)) link = &(*link)->rb_left; else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id)) link = &(*link)->rb_right; else { if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid) link = &(*link)->rb_left; else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid) link = &(*link)->rb_right; else return cur_cm_id_priv; } } rb_link_node(&cm_id_priv->sidr_id_node, parent, link); rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); return NULL; } static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device, ib_cm_handler cm_handler, void *context) { struct cm_id_private *cm_id_priv; u32 id; int ret; cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); if (!cm_id_priv) return ERR_PTR(-ENOMEM); cm_id_priv->id.state = IB_CM_IDLE; cm_id_priv->id.device = device; cm_id_priv->id.cm_handler = cm_handler; cm_id_priv->id.context = context; cm_id_priv->id.remote_cm_qpn = 1; RB_CLEAR_NODE(&cm_id_priv->service_node); RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); spin_lock_init(&cm_id_priv->lock); init_completion(&cm_id_priv->comp); INIT_LIST_HEAD(&cm_id_priv->work_list); atomic_set(&cm_id_priv->work_count, -1); refcount_set(&cm_id_priv->refcount, 1); ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b, &cm.local_id_next, GFP_KERNEL); if (ret < 0) goto error; cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; return cm_id_priv; error: kfree(cm_id_priv); return ERR_PTR(ret); } /* * Make the ID visible to the MAD handlers and other threads that use the * xarray. */ static void cm_finalize_id(struct cm_id_private *cm_id_priv) { xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id), cm_id_priv, GFP_ATOMIC); } struct ib_cm_id *ib_create_cm_id(struct ib_device *device, ib_cm_handler cm_handler, void *context) { struct cm_id_private *cm_id_priv; cm_id_priv = cm_alloc_id_priv(device, cm_handler, context); if (IS_ERR(cm_id_priv)) return ERR_CAST(cm_id_priv); cm_finalize_id(cm_id_priv); return &cm_id_priv->id; } EXPORT_SYMBOL(ib_create_cm_id); static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv) { struct cm_work *work; if (list_empty(&cm_id_priv->work_list)) return NULL; work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); list_del(&work->list); return work; } static void cm_free_work(struct cm_work *work) { if (work->mad_recv_wc) ib_free_recv_mad(work->mad_recv_wc); kfree(work); } static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv, struct cm_work *work) __releases(&cm_id_priv->lock) { bool immediate; /* * To deliver the event to the user callback we have the drop the * spinlock, however, we need to ensure that the user callback is single * threaded and receives events in the temporal order. If there are * already events being processed then thread new events onto a list, * the thread currently processing will pick them up. */ immediate = atomic_inc_and_test(&cm_id_priv->work_count); if (!immediate) { list_add_tail(&work->list, &cm_id_priv->work_list); /* * This routine always consumes incoming reference. Once queued * to the work_list then a reference is held by the thread * currently running cm_process_work() and this reference is not * needed. */ cm_deref_id(cm_id_priv); } spin_unlock_irq(&cm_id_priv->lock); if (immediate) cm_process_work(cm_id_priv, work); } static inline int cm_convert_to_ms(int iba_time) { /* approximate conversion to ms from 4.096us x 2^iba_time */ return 1 << max(iba_time - 8, 0); } /* * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time * Because of how ack_timeout is stored, adding one doubles the timeout. * To avoid large timeouts, select the max(ack_delay, life_time + 1), and * increment it (round up) only if the other is within 50%. */ static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time) { int ack_timeout = packet_life_time + 1; if (ack_timeout >= ca_ack_delay) ack_timeout += (ca_ack_delay >= (ack_timeout - 1)); else ack_timeout = ca_ack_delay + (ack_timeout >= (ca_ack_delay - 1)); return min(31, ack_timeout); } static void cm_remove_remote(struct cm_id_private *cm_id_priv) { struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info; if (timewait_info->inserted_remote_id) { rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); timewait_info->inserted_remote_id = 0; } if (timewait_info->inserted_remote_qp) { rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); timewait_info->inserted_remote_qp = 0; } } static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id) { struct cm_timewait_info *timewait_info; timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); if (!timewait_info) return ERR_PTR(-ENOMEM); timewait_info->work.local_id = local_id; INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; return timewait_info; } static void cm_enter_timewait(struct cm_id_private *cm_id_priv) { int wait_time; unsigned long flags; struct cm_device *cm_dev; lockdep_assert_held(&cm_id_priv->lock); cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client); if (!cm_dev) return; spin_lock_irqsave(&cm.lock, flags); cm_remove_remote(cm_id_priv); list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); spin_unlock_irqrestore(&cm.lock, flags); /* * The cm_id could be destroyed by the user before we exit timewait. * To protect against this, we search for the cm_id after exiting * timewait before notifying the user that we've exited timewait. */ cm_id_priv->id.state = IB_CM_TIMEWAIT; wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); /* Check if the device started its remove_one */ spin_lock_irqsave(&cm.lock, flags); if (!cm_dev->going_down) queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, msecs_to_jiffies(wait_time)); spin_unlock_irqrestore(&cm.lock, flags); /* * The timewait_info is converted into a work and gets freed during * cm_free_work() in cm_timewait_handler(). */ BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0); cm_id_priv->timewait_info = NULL; } static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) { unsigned long flags; lockdep_assert_held(&cm_id_priv->lock); cm_id_priv->id.state = IB_CM_IDLE; if (cm_id_priv->timewait_info) { spin_lock_irqsave(&cm.lock, flags); cm_remove_remote(cm_id_priv); spin_unlock_irqrestore(&cm.lock, flags); kfree(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; } } static void cm_destroy_id(struct ib_cm_id *cm_id, int err) { struct cm_id_private *cm_id_priv; struct cm_work *work; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irq(&cm_id_priv->lock); retest: switch (cm_id->state) { case IB_CM_LISTEN: spin_lock(&cm.lock); if (--cm_id_priv->listen_sharecount > 0) { /* The id is still shared. */ WARN_ON(refcount_read(&cm_id_priv->refcount) == 1); spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); cm_deref_id(cm_id_priv); return; } cm_id->state = IB_CM_IDLE; rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); RB_CLEAR_NODE(&cm_id_priv->service_node); spin_unlock(&cm.lock); break; case IB_CM_SIDR_REQ_SENT: cm_id->state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->msg); break; case IB_CM_SIDR_REQ_RCVD: cm_send_sidr_rep_locked(cm_id_priv, &(struct ib_cm_sidr_rep_param){ .status = IB_SIDR_REJECT }); /* cm_send_sidr_rep_locked will not move to IDLE if it fails */ cm_id->state = IB_CM_IDLE; break; case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: ib_cancel_mad(cm_id_priv->msg); cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT, &cm_id_priv->id.device->node_guid, sizeof(cm_id_priv->id.device->node_guid), NULL, 0); break; case IB_CM_REQ_RCVD: if (err == -ENOMEM) { /* Do not reject to allow future retries. */ cm_reset_to_idle(cm_id_priv); } else { cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); } break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->msg); cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); goto retest; case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); break; case IB_CM_ESTABLISHED: if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) { cm_id->state = IB_CM_IDLE; break; } cm_send_dreq_locked(cm_id_priv, NULL, 0); goto retest; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->msg); cm_enter_timewait(cm_id_priv); goto retest; case IB_CM_DREQ_RCVD: cm_send_drep_locked(cm_id_priv, NULL, 0); WARN_ON(cm_id->state != IB_CM_TIMEWAIT); goto retest; case IB_CM_TIMEWAIT: /* * The cm_acquire_id in cm_timewait_handler will stop working * once we do xa_erase below, so just move to idle here for * consistency. */ cm_id->state = IB_CM_IDLE; break; case IB_CM_IDLE: break; } WARN_ON(cm_id->state != IB_CM_IDLE); spin_lock(&cm.lock); /* Required for cleanup paths related cm_req_handler() */ if (cm_id_priv->timewait_info) { cm_remove_remote(cm_id_priv); kfree(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; } WARN_ON(cm_id_priv->listen_sharecount); WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node)); if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id)); cm_deref_id(cm_id_priv); wait_for_completion(&cm_id_priv->comp); while ((work = cm_dequeue_work(cm_id_priv)) != NULL) cm_free_work(work); cm_destroy_av(&cm_id_priv->av); cm_destroy_av(&cm_id_priv->alt_av); kfree(cm_id_priv->private_data); kfree_rcu(cm_id_priv, rcu); } void ib_destroy_cm_id(struct ib_cm_id *cm_id) { cm_destroy_id(cm_id, 0); } EXPORT_SYMBOL(ib_destroy_cm_id); static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id) { if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && (service_id != IB_CM_ASSIGN_SERVICE_ID)) return -EINVAL; if (service_id == IB_CM_ASSIGN_SERVICE_ID) cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++); else cm_id_priv->id.service_id = service_id; return 0; } /** * ib_cm_listen - Initiates listening on the specified service ID for * connection and service ID resolution requests. * @cm_id: Connection identifier associated with the listen request. * @service_id: Service identifier matched against incoming connection * and service ID resolution requests. The service ID should be specified * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will * assign a service ID to the caller. */ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id) { struct cm_id_private *cm_id_priv = container_of(cm_id, struct cm_id_private, id); unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id_priv->id.state != IB_CM_IDLE) { ret = -EINVAL; goto out; } ret = cm_init_listen(cm_id_priv, service_id); if (ret) goto out; if (!cm_insert_listen(cm_id_priv, NULL)) { ret = -EBUSY; goto out; } cm_id_priv->id.state = IB_CM_LISTEN; ret = 0; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_cm_listen); /** * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on * the given service ID. * * If there's an existing ID listening on that same device and service ID, * return it. * * @device: Device associated with the cm_id. All related communication will * be associated with the specified device. * @cm_handler: Callback invoked to notify the user of CM events. * @service_id: Service identifier matched against incoming connection * and service ID resolution requests. The service ID should be specified * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will * assign a service ID to the caller. * * Callers should call ib_destroy_cm_id when done with the listener ID. */ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, ib_cm_handler cm_handler, __be64 service_id) { struct cm_id_private *listen_id_priv; struct cm_id_private *cm_id_priv; int err = 0; /* Create an ID in advance, since the creation may sleep */ cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL); if (IS_ERR(cm_id_priv)) return ERR_CAST(cm_id_priv); err = cm_init_listen(cm_id_priv, service_id); if (err) { ib_destroy_cm_id(&cm_id_priv->id); return ERR_PTR(err); } spin_lock_irq(&cm_id_priv->lock); listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler); if (listen_id_priv != cm_id_priv) { spin_unlock_irq(&cm_id_priv->lock); ib_destroy_cm_id(&cm_id_priv->id); if (!listen_id_priv) return ERR_PTR(-EINVAL); return &listen_id_priv->id; } cm_id_priv->id.state = IB_CM_LISTEN; spin_unlock_irq(&cm_id_priv->lock); /* * A listen ID does not need to be in the xarray since it does not * receive mads, is not placed in the remote_id or remote_qpn rbtree, * and does not enter timewait. */ return &cm_id_priv->id; } EXPORT_SYMBOL(ib_cm_insert_listen); static __be64 cm_form_tid(struct cm_id_private *cm_id_priv) { u64 hi_tid = 0, low_tid; lockdep_assert_held(&cm_id_priv->lock); low_tid = (u64)cm_id_priv->id.local_id; if (!cm_id_priv->av.port) return cpu_to_be64(low_tid); spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); if (cm_id_priv->av.port->mad_agent) hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32; spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); return cpu_to_be64(hi_tid | low_tid); } static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, __be16 attr_id, __be64 tid) { hdr->base_version = IB_MGMT_BASE_VERSION; hdr->mgmt_class = IB_MGMT_CLASS_CM; hdr->class_version = IB_CM_CLASS_VERSION; hdr->method = IB_MGMT_METHOD_SEND; hdr->attr_id = attr_id; hdr->tid = tid; } static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id, __be64 tid, u32 attr_mod) { cm_format_mad_hdr(hdr, attr_id, tid); hdr->attr_mod = cpu_to_be32(attr_mod); } static void cm_format_req(struct cm_req_msg *req_msg, struct cm_id_private *cm_id_priv, struct ib_cm_req_param *param) { struct sa_path_rec *pri_path = param->primary_path; struct sa_path_rec *alt_path = param->alternate_path; bool pri_ext = false; __be16 lid; if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA) pri_ext = opa_is_extended_lid(pri_path->opa.dlid, pri_path->opa.slid); cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, cm_form_tid(cm_id_priv), param->ece.attr_mod); IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id)); IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg, be64_to_cpu(cm_id_priv->id.device->node_guid)); IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num); IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth); IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg, param->remote_cm_response_timeout); cm_req_set_qp_type(req_msg, param->qp_type); IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control); IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn); IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg, param->local_cm_response_timeout); IBA_SET(CM_REQ_PARTITION_KEY, req_msg, be16_to_cpu(param->primary_path->pkey)); IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg, param->primary_path->mtu); IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries); if (param->qp_type != IB_QPT_XRC_INI) { IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg, param->responder_resources); IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count); IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg, param->rnr_retry_count); IBA_SET(CM_REQ_SRQ, req_msg, param->srq); } *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) = pri_path->sgid; *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) = pri_path->dgid; if (pri_ext) { IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) ->global.interface_id = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid)); IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) ->global.interface_id = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid)); } if (pri_path->hop_limit <= 1) { IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, be16_to_cpu(pri_ext ? 0 : htons(ntohl(sa_path_get_slid( pri_path))))); IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, be16_to_cpu(pri_ext ? 0 : htons(ntohl(sa_path_get_dlid( pri_path))))); } else { if (param->primary_path_inbound) { lid = param->primary_path_inbound->ib.dlid; IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, be16_to_cpu(lid)); } else IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, be16_to_cpu(IB_LID_PERMISSIVE)); /* Work-around until there's a way to obtain remote LID info */ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, be16_to_cpu(IB_LID_PERMISSIVE)); } IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg, be32_to_cpu(pri_path->flow_label)); IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate); IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class); IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit); IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl); IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg, (pri_path->hop_limit <= 1)); IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, pri_path->packet_life_time)); if (alt_path) { bool alt_ext = false; if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA) alt_ext = opa_is_extended_lid(alt_path->opa.dlid, alt_path->opa.slid); *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) = alt_path->sgid; *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) = alt_path->dgid; if (alt_ext) { IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) ->global.interface_id = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid)); IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) ->global.interface_id = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid)); } if (alt_path->hop_limit <= 1) { IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, be16_to_cpu( alt_ext ? 0 : htons(ntohl(sa_path_get_slid( alt_path))))); IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, be16_to_cpu( alt_ext ? 0 : htons(ntohl(sa_path_get_dlid( alt_path))))); } else { IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, be16_to_cpu(IB_LID_PERMISSIVE)); IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, be16_to_cpu(IB_LID_PERMISSIVE)); } IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg, be32_to_cpu(alt_path->flow_label)); IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate); IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg, alt_path->traffic_class); IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg, alt_path->hop_limit); IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl); IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg, (alt_path->hop_limit <= 1)); IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, alt_path->packet_life_time)); } IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id); if (param->private_data && param->private_data_len) IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data, param->private_data_len); } static int cm_validate_req_param(struct ib_cm_req_param *param) { if (!param->primary_path) return -EINVAL; if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && param->qp_type != IB_QPT_XRC_INI) return -EINVAL; if (param->private_data && param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) return -EINVAL; if (param->alternate_path && (param->alternate_path->pkey != param->primary_path->pkey || param->alternate_path->mtu != param->primary_path->mtu)) return -EINVAL; return 0; } int ib_send_cm_req(struct ib_cm_id *cm_id, struct ib_cm_req_param *param) { struct cm_av av = {}, alt_av = {}; struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; struct cm_req_msg *req_msg; unsigned long flags; int ret; ret = cm_validate_req_param(param); if (ret) return ret; /* Verify that we're not in timewait. */ cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); return -EINVAL; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; return ret; } ret = cm_init_av_by_path(param->primary_path, param->ppath_sgid_attr, &av); if (ret) return ret; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av); if (ret) { cm_destroy_av(&av); return ret; } } cm_id->service_id = param->service_id; cm_id_priv->timeout_ms = cm_convert_to_ms( param->primary_path->packet_life_time) * 2 + cm_convert_to_ms( param->remote_cm_response_timeout); cm_id_priv->max_cm_retries = param->max_cm_retries; cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->retry_count = param->retry_count; cm_id_priv->path_mtu = param->primary_path->mtu; cm_id_priv->pkey = param->primary_path->pkey; cm_id_priv->qp_type = param->qp_type; spin_lock_irqsave(&cm_id_priv->lock, flags); cm_move_av_from_path(&cm_id_priv->av, &av); if (param->primary_path_outbound) cm_id_priv->av.dlid_datapath = be16_to_cpu(param->primary_path_outbound->ib.dlid); if (param->alternate_path) cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av); msg = cm_alloc_priv_msg(cm_id_priv); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto out_unlock; } req_msg = (struct cm_req_msg *)msg->mad; cm_format_req(req_msg, cm_id_priv, param); cm_id_priv->tid = req_msg->hdr.tid; msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT; cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); trace_icm_send_req(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) goto out_free; BUG_ON(cm_id->state != IB_CM_IDLE); cm_id->state = IB_CM_REQ_SENT; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; out_free: cm_free_priv_msg(msg); out_unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_req); static int cm_issue_rej(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, enum ib_cm_rej_reason reason, enum cm_msg_response msg_rejected, void *ari, u8 ari_length) { struct ib_mad_send_buf *msg = NULL; struct cm_rej_msg *rej_msg, *rcv_msg; int ret; ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); if (ret) return ret; /* We just need common CM header information. Cast to any message. */ rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; rej_msg = (struct cm_rej_msg *) msg->mad; cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg)); IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected); IBA_SET(CM_REJ_REASON, rej_msg, reason); if (ari && ari_length) { IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length); IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length); } trace_icm_issue_rej( IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg), IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg)); ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_response_msg(msg); return ret; } static bool cm_req_has_alt_path(struct cm_req_msg *req_msg) { return ((cpu_to_be16( IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) || (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg)))); } static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num, struct sa_path_rec *path, union ib_gid *gid) { if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num)) path->rec_type = SA_PATH_REC_TYPE_OPA; else path->rec_type = SA_PATH_REC_TYPE_IB; } static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg, struct sa_path_rec *primary_path, struct sa_path_rec *alt_path, struct ib_wc *wc) { u32 lid; if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) { sa_path_set_dlid(primary_path, wc->slid); sa_path_set_slid(primary_path, IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg)); } else { lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)); sa_path_set_dlid(primary_path, lid); lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)); sa_path_set_slid(primary_path, lid); } if (!cm_req_has_alt_path(req_msg)) return; if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) { sa_path_set_dlid(alt_path, IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg)); sa_path_set_slid(alt_path, IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg)); } else { lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg)); sa_path_set_dlid(alt_path, lid); lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg)); sa_path_set_slid(alt_path, lid); } } static void cm_format_paths_from_req(struct cm_req_msg *req_msg, struct sa_path_rec *primary_path, struct sa_path_rec *alt_path, struct ib_wc *wc) { primary_path->dgid = *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg); primary_path->sgid = *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg); primary_path->flow_label = cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg)); primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg); primary_path->traffic_class = IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg); primary_path->reversible = 1; primary_path->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg); primary_path->mtu_selector = IB_SA_EQ; primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); primary_path->rate_selector = IB_SA_EQ; primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg); primary_path->packet_life_time_selector = IB_SA_EQ; primary_path->packet_life_time = IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg); primary_path->packet_life_time -= (primary_path->packet_life_time > 0); primary_path->service_id = cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); if (sa_path_is_roce(primary_path)) primary_path->roce.route_resolved = false; if (cm_req_has_alt_path(req_msg)) { alt_path->dgid = *IBA_GET_MEM_PTR( CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg); alt_path->sgid = *IBA_GET_MEM_PTR( CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg); alt_path->flow_label = cpu_to_be32( IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg)); alt_path->hop_limit = IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg); alt_path->traffic_class = IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg); alt_path->reversible = 1; alt_path->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg); alt_path->mtu_selector = IB_SA_EQ; alt_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); alt_path->rate_selector = IB_SA_EQ; alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg); alt_path->packet_life_time_selector = IB_SA_EQ; alt_path->packet_life_time = IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg); alt_path->packet_life_time -= (alt_path->packet_life_time > 0); alt_path->service_id = cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); if (sa_path_is_roce(alt_path)) alt_path->roce.route_resolved = false; } cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc); } static u16 cm_get_bth_pkey(struct cm_work *work) { struct ib_device *ib_dev = work->port->cm_dev->ib_device; u32 port_num = work->port->port_num; u16 pkey_index = work->mad_recv_wc->wc->pkey_index; u16 pkey; int ret; ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey); if (ret) { dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n", port_num, pkey_index, ret); return 0; } return pkey; } /** * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID * ULPs (such as IPoIB) do not understand OPA GIDs and will * reject them as the local_gid will not match the sgid. Therefore, * change the pathrec's SGID to an IB SGID. * * @work: Work completion * @path: Path record */ static void cm_opa_to_ib_sgid(struct cm_work *work, struct sa_path_rec *path) { struct ib_device *dev = work->port->cm_dev->ib_device; u32 port_num = work->port->port_num; if (rdma_cap_opa_ah(dev, port_num) && (ib_is_opa_gid(&path->sgid))) { union ib_gid sgid; if (rdma_query_gid(dev, port_num, 0, &sgid)) { dev_warn(&dev->dev, "Error updating sgid in CM request\n"); return; } path->sgid = sgid; } } static void cm_format_req_event(struct cm_work *work, struct cm_id_private *cm_id_priv, struct ib_cm_id *listen_id) { struct cm_req_msg *req_msg; struct ib_cm_req_event_param *param; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.req_rcvd; param->listen_id = listen_id; param->bth_pkey = cm_get_bth_pkey(work); param->port = cm_id_priv->av.port->port_num; param->primary_path = &work->path[0]; cm_opa_to_ib_sgid(work, param->primary_path); if (cm_req_has_alt_path(req_msg)) { param->alternate_path = &work->path[1]; cm_opa_to_ib_sgid(work, param->alternate_path); } else { param->alternate_path = NULL; } param->remote_ca_guid = cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg)); param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg); param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg); param->qp_type = cm_req_get_qp_type(req_msg); param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg); param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg); param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg); param->local_cm_response_timeout = IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg); param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg); param->remote_cm_response_timeout = IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg); param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg); param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg); param->srq = IBA_GET(CM_REQ_SRQ, req_msg); param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr; param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg); param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod); work->cm_event.private_data = IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg); } static void cm_process_work(struct cm_id_private *cm_id_priv, struct cm_work *work) { int ret; /* We will typically only have the current event to report. */ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { spin_lock_irq(&cm_id_priv->lock); work = cm_dequeue_work(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); if (!work) return; ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); } cm_deref_id(cm_id_priv); if (ret) cm_destroy_id(&cm_id_priv->id, ret); } static void cm_format_mra(struct cm_mra_msg *mra_msg, struct cm_id_private *cm_id_priv, enum cm_msg_response msg_mraed, u8 service_timeout, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed); IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg, be32_to_cpu(cm_id_priv->id.remote_id)); IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout); if (private_data && private_data_len) IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data, private_data_len); } static void cm_format_rej(struct cm_rej_msg *rej_msg, struct cm_id_private *cm_id_priv, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len, enum ib_cm_state state) { lockdep_assert_held(&cm_id_priv->lock); cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, be32_to_cpu(cm_id_priv->id.remote_id)); switch (state) { case IB_CM_REQ_RCVD: IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); break; case IB_CM_MRA_REQ_SENT: IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); break; case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP); break; default: IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_OTHER); break; } IBA_SET(CM_REJ_REASON, rej_msg, reason); if (ari && ari_length) { IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length); IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length); } if (private_data && private_data_len) IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data, private_data_len); } static void cm_dup_req_handler(struct cm_work *work, struct cm_id_private *cm_id_priv) { struct ib_mad_send_buf *msg = NULL; int ret; atomic_long_inc( &work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state == IB_CM_REQ_RCVD) { spin_unlock_irq(&cm_id_priv->lock); return; } spin_unlock_irq(&cm_id_priv->lock); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) return; spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_MRA_REQ_SENT: cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); break; case IB_CM_TIMEWAIT: cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0, IB_CM_TIMEWAIT); break; default: goto unlock; } spin_unlock_irq(&cm_id_priv->lock); trace_icm_send_dup_req(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) goto free; return; unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_response_msg(msg); } static struct cm_id_private *cm_match_req(struct cm_work *work, struct cm_id_private *cm_id_priv) { struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; struct cm_timewait_info *timewait_info; struct cm_req_msg *req_msg; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; /* Check for possible duplicate REQ. */ spin_lock_irq(&cm.lock); timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); if (timewait_info) { cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); spin_unlock_irq(&cm.lock); if (cur_cm_id_priv) { cm_dup_req_handler(work, cur_cm_id_priv); cm_deref_id(cur_cm_id_priv); } return NULL; } /* Check for stale connections. */ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); if (timewait_info) { cm_remove_remote(cm_id_priv); cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); spin_unlock_irq(&cm.lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, NULL, 0); if (cur_cm_id_priv) { ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0); cm_deref_id(cur_cm_id_priv); } return NULL; } /* Find matching listen request. */ listen_cm_id_priv = cm_find_listen( cm_id_priv->id.device, cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg))); if (!listen_cm_id_priv) { cm_remove_remote(cm_id_priv); spin_unlock_irq(&cm.lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, NULL, 0); return NULL; } spin_unlock_irq(&cm.lock); return listen_cm_id_priv; } /* * Work-around for inter-subnet connections. If the LIDs are permissive, * we need to override the LID/SL data in the REQ with the LID information * in the work completion. */ static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) { if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) { if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg)) == IB_LID_PERMISSIVE) { IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, be16_to_cpu(ib_lid_be16(wc->slid))); IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl); } if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg)) == IB_LID_PERMISSIVE) IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, wc->dlid_path_bits); } if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) { if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg)) == IB_LID_PERMISSIVE) { IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, be16_to_cpu(ib_lid_be16(wc->slid))); IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl); } if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg)) == IB_LID_PERMISSIVE) IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, wc->dlid_path_bits); } } static int cm_req_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv, *listen_cm_id_priv; struct cm_req_msg *req_msg; const struct ib_global_route *grh; const struct ib_gid_attr *gid_attr; int ret; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL); if (IS_ERR(cm_id_priv)) return PTR_ERR(cm_id_priv); cm_id_priv->id.remote_id = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg)); cm_id_priv->id.service_id = cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->timeout_ms = cm_convert_to_ms( IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg)); cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg); cm_id_priv->remote_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); cm_id_priv->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg); cm_id_priv->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg); cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg); cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg); cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); if (ret) goto destroy; cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; goto destroy; } cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id; cm_id_priv->timewait_info->remote_ca_guid = cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg)); cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn; /* * Note that the ID pointer is not in the xarray at this point, * so this set is only visible to the local thread. */ cm_id_priv->id.state = IB_CM_REQ_RCVD; listen_cm_id_priv = cm_match_req(work, cm_id_priv); if (!listen_cm_id_priv) { trace_icm_no_listener_err(&cm_id_priv->id); cm_id_priv->id.state = IB_CM_IDLE; ret = -EINVAL; goto destroy; } memset(&work->path[0], 0, sizeof(work->path[0])); if (cm_req_has_alt_path(req_msg)) memset(&work->path[1], 0, sizeof(work->path[1])); grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr); gid_attr = grh->sgid_attr; if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) { work->path[0].rec_type = sa_conv_gid_to_pathrec_type(gid_attr->gid_type); } else { cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_path_set_rec_type( work->port->cm_dev->ib_device, work->port->port_num, &work->path[0], IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)); } if (cm_req_has_alt_path(req_msg)) work->path[1].rec_type = work->path[0].rec_type; cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1], work->mad_recv_wc->wc); if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) sa_path_set_dmac(&work->path[0], cm_id_priv->av.ah_attr.roce.dmac); work->path[0].hop_limit = grh->hop_limit; /* This destroy call is needed to pair with cm_init_av_for_response */ cm_destroy_av(&cm_id_priv->av); ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av); if (ret) { int err; err = rdma_query_gid(work->port->cm_dev->ib_device, work->port->port_num, 0, &work->path[0].sgid); if (err) ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID, NULL, 0, NULL, 0); else ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID, &work->path[0].sgid, sizeof(work->path[0].sgid), NULL, 0); goto rejected; } if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB) cm_id_priv->av.dlid_datapath = IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg); if (cm_req_has_alt_path(req_msg)) { ret = cm_init_av_by_path(&work->path[1], NULL, &cm_id_priv->alt_av); if (ret) { ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_ALT_GID, &work->path[0].sgid, sizeof(work->path[0].sgid), NULL, 0); goto rejected; } } cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.context = listen_cm_id_priv->id.context; cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); /* Now MAD handlers can see the new ID */ spin_lock_irq(&cm_id_priv->lock); cm_finalize_id(cm_id_priv); /* Refcount belongs to the event, pairs with cm_process_work() */ refcount_inc(&cm_id_priv->refcount); cm_queue_work_unlock(cm_id_priv, work); /* * Since this ID was just created and was not made visible to other MAD * handlers until the cm_finalize_id() above we know that the * cm_process_work() will deliver the event and the listen_cm_id * embedded in the event can be derefed here. */ cm_deref_id(listen_cm_id_priv); return 0; rejected: cm_deref_id(listen_cm_id_priv); destroy: ib_destroy_cm_id(&cm_id_priv->id); return ret; } static void cm_format_rep(struct cm_rep_msg *rep_msg, struct cm_id_private *cm_id_priv, struct ib_cm_rep_param *param) { cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid, param->ece.attr_mod); IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg, be32_to_cpu(cm_id_priv->id.remote_id)); IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn); IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg, param->responder_resources); IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg, cm_id_priv->av.port->cm_dev->ack_delay); IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted); IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count); IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg, be64_to_cpu(cm_id_priv->id.device->node_guid)); if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg, param->initiator_depth); IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg, param->flow_control); IBA_SET(CM_REP_SRQ, rep_msg, param->srq); IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num); } else { IBA_SET(CM_REP_SRQ, rep_msg, 1); IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num); } IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id); IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8); IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16); if (param->private_data && param->private_data_len) IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data, param->private_data_len); } int ib_send_cm_rep(struct ib_cm_id *cm_id, struct ib_cm_rep_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; struct cm_rep_msg *rep_msg; unsigned long flags; int ret; if (param->private_data && param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_REQ_RCVD && cm_id->state != IB_CM_MRA_REQ_SENT) { trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state); ret = -EINVAL; goto out; } msg = cm_alloc_priv_msg(cm_id_priv); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto out; } rep_msg = (struct cm_rep_msg *) msg->mad; cm_format_rep(rep_msg, cm_id_priv, param); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; trace_icm_send_rep(cm_id); ret = ib_post_send_mad(msg, NULL); if (ret) goto out_free; cm_id->state = IB_CM_REP_SENT; cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg)); WARN_ONCE(param->qp_num & 0xFF000000, "IBTA declares QPN to be 24 bits, but it is 0x%X\n", param->qp_num); cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; out_free: cm_free_priv_msg(msg); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rep); static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg, be32_to_cpu(cm_id_priv->id.remote_id)); if (private_data && private_data_len) IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data, private_data_len); } int ib_send_cm_rtu(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; void *data; int ret; if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_REP_RCVD && cm_id->state != IB_CM_MRA_REP_SENT) { trace_icm_send_cm_rtu_err(cm_id); ret = -EINVAL; goto error; } msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto error; } cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, private_data, private_data_len); trace_icm_send_rtu(cm_id); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); kfree(data); return ret; } cm_id->state = IB_CM_ESTABLISHED; cm_set_private_data(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return ret; } EXPORT_SYMBOL(ib_send_cm_rtu); static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) { struct cm_rep_msg *rep_msg; struct ib_cm_rep_event_param *param; rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.rep_rcvd; param->remote_ca_guid = cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg)); param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg); param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg); param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg); param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg); param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg); param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg); param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg); param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg); param->srq = IBA_GET(CM_REP_SRQ, rep_msg); param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16; param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8; param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg); param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod); work->cm_event.private_data = IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg); } static void cm_dup_rep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rep_msg *rep_msg; struct ib_mad_send_buf *msg = NULL; int ret; rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg))); if (!cm_id_priv) return; atomic_long_inc( &work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) goto deref; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state == IB_CM_ESTABLISHED) cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, cm_id_priv->private_data, cm_id_priv->private_data_len); else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); else goto unlock; spin_unlock_irq(&cm_id_priv->lock); trace_icm_send_dup_rep(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) goto free; goto deref; unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_response_msg(msg); deref: cm_deref_id(cm_id_priv); } static int cm_rep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rep_msg *rep_msg; int ret; struct cm_id_private *cur_cm_id_priv; struct cm_timewait_info *timewait_info; rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0); if (!cm_id_priv) { cm_dup_rep_handler(work); trace_icm_remote_no_priv_err( IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); return -EINVAL; } cm_format_rep_event(work, cm_id_priv->qp_type); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: break; default: ret = -EINVAL; trace_icm_rep_unknown_err( IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg), IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg), cm_id_priv->id.state); spin_unlock_irq(&cm_id_priv->lock); goto error; } cm_id_priv->timewait_info->work.remote_id = cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)); cm_id_priv->timewait_info->remote_ca_guid = cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg)); cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); spin_lock(&cm.lock); /* Check for duplicate REP. */ if (cm_insert_remote_id(cm_id_priv->timewait_info)) { spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; trace_icm_insert_failed_err( IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); goto error; } /* Check for a stale connection. */ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); if (timewait_info) { cm_remove_remote(cm_id_priv); cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, NULL, 0); ret = -EINVAL; trace_icm_staleconn_err( IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg), IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); if (cur_cm_id_priv) { ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0); cm_deref_id(cur_cm_id_priv); } goto error; } spin_unlock(&cm.lock); cm_id_priv->id.state = IB_CM_REP_RCVD; cm_id_priv->id.remote_id = cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)); cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); cm_id_priv->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg); cm_id_priv->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg); cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg)); cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg); cm_id_priv->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg); cm_id_priv->av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->av.timeout - 1); cm_id_priv->alt_av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->alt_av.timeout - 1); ib_cancel_mad(cm_id_priv->msg); cm_queue_work_unlock(cm_id_priv, work); return 0; error: cm_deref_id(cm_id_priv); return ret; } static int cm_establish_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; /* See comment in cm_establish about lookup. */ cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); if (!cm_id_priv) return -EINVAL; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { spin_unlock_irq(&cm_id_priv->lock); goto out; } ib_cancel_mad(cm_id_priv->msg); cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_rtu_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rtu_msg *rtu_msg; rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)), cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg))); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg); spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_REP_SENT && cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { spin_unlock_irq(&cm_id_priv->lock); atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_RTU_COUNTER]); goto out; } cm_id_priv->id.state = IB_CM_ESTABLISHED; ib_cancel_mad(cm_id_priv->msg); cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, cm_form_tid(cm_id_priv)); IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg, be32_to_cpu(cm_id_priv->id.remote_id)); IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg, be32_to_cpu(cm_id_priv->remote_qpn)); if (private_data && private_data_len) IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data, private_data_len); } static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { struct ib_mad_send_buf *msg; int ret; lockdep_assert_held(&cm_id_priv->lock); if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) return -EINVAL; if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { trace_icm_dreq_skipped(&cm_id_priv->id); return -EINVAL; } if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->msg); msg = cm_alloc_priv_msg(cm_id_priv); if (IS_ERR(msg)) { cm_enter_timewait(cm_id_priv); return PTR_ERR(msg); } cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, private_data, private_data_len); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; trace_icm_send_dreq(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) { cm_enter_timewait(cm_id_priv); cm_free_priv_msg(msg); return ret; } cm_id_priv->id.state = IB_CM_DREQ_SENT; return 0; } int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv = container_of(cm_id, struct cm_id_private, id); unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_dreq); static void cm_format_drep(struct cm_drep_msg *drep_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg, be32_to_cpu(cm_id_priv->id.remote_id)); if (private_data && private_data_len) IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data, private_data_len); } static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len) { struct ib_mad_send_buf *msg; int ret; lockdep_assert_held(&cm_id_priv->lock); if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) return -EINVAL; if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) { trace_icm_send_drep_err(&cm_id_priv->id); kfree(private_data); return -EINVAL; } cm_set_private_data(cm_id_priv, private_data, private_data_len); cm_enter_timewait(cm_id_priv); msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return PTR_ERR(msg); cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, private_data, private_data_len); trace_icm_send_drep(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) { cm_free_msg(msg); return ret; } return 0; } int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv = container_of(cm_id, struct cm_id_private, id); unsigned long flags; void *data; int ret; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); spin_lock_irqsave(&cm_id_priv->lock, flags); ret = cm_send_drep_locked(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_drep); static int cm_issue_drep(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_buf *msg = NULL; struct cm_dreq_msg *dreq_msg; struct cm_drep_msg *drep_msg; int ret; ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); if (ret) return ret; dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; drep_msg = (struct cm_drep_msg *) msg->mad; cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg, IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)); IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg, IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); trace_icm_issue_drep( IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg), IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_response_msg(msg); return ret; } static int cm_dreq_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_dreq_msg *dreq_msg; struct ib_mad_send_buf *msg = NULL; dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)), cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg))); if (!cm_id_priv) { atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_DREQ_COUNTER]); cm_issue_drep(work->port, work->mad_recv_wc); trace_icm_no_priv_err( IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg), IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); return -EINVAL; } work->cm_event.private_data = IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg); spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->local_qpn != cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg))) goto unlock; switch (cm_id_priv->id.state) { case IB_CM_REP_SENT: case IB_CM_DREQ_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->msg); break; case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->msg); break; case IB_CM_TIMEWAIT: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_DREQ_COUNTER]); msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc); if (IS_ERR(msg)) goto unlock; cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || ib_post_send_mad(msg, NULL)) cm_free_response_msg(msg); goto deref; case IB_CM_DREQ_RCVD: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_DREQ_COUNTER]); goto unlock; default: trace_icm_dreq_unknown_err(&cm_id_priv->id); goto unlock; } cm_id_priv->id.state = IB_CM_DREQ_RCVD; cm_id_priv->tid = dreq_msg->hdr.tid; cm_queue_work_unlock(cm_id_priv, work); return 0; unlock: spin_unlock_irq(&cm_id_priv->lock); deref: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_drep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_drep_msg *drep_msg; drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)), cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg))); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg); spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_DREQ_SENT && cm_id_priv->id.state != IB_CM_DREQ_RCVD) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_enter_timewait(cm_id_priv); ib_cancel_mad(cm_id_priv->msg); cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { enum ib_cm_state state = cm_id_priv->id.state; struct ib_mad_send_buf *msg; int ret; lockdep_assert_held(&cm_id_priv->lock); if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) return -EINVAL; trace_icm_send_rej(&cm_id_priv->id, reason); switch (state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: cm_reset_to_idle(cm_id_priv); msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return PTR_ERR(msg); cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, ari, ari_length, private_data, private_data_len, state); break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_enter_timewait(cm_id_priv); msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return PTR_ERR(msg); cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, ari, ari_length, private_data, private_data_len, state); break; default: trace_icm_send_unknown_rej_err(&cm_id_priv->id); return -EINVAL; } ret = ib_post_send_mad(msg, NULL); if (ret) { cm_free_msg(msg); return ret; } return 0; } int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv = container_of(cm_id, struct cm_id_private, id); unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length, private_data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rej); static void cm_format_rej_event(struct cm_work *work) { struct cm_rej_msg *rej_msg; struct ib_cm_rej_event_param *param; rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.rej_rcvd; param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg); param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg); param->reason = IBA_GET(CM_REJ_REASON, rej_msg); work->cm_event.private_data = IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg); } static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) { struct cm_id_private *cm_id_priv; __be32 remote_id; remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg)); if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) { cm_id_priv = cm_find_remote_id( *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)), remote_id); } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) == CM_MSG_RESPONSE_REQ) cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)), 0); else cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)), remote_id); return cm_id_priv; } static int cm_rej_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rej_msg *rej_msg; rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_rejected_id(rej_msg); if (!cm_id_priv) return -EINVAL; cm_format_rej_event(work); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->msg); fallthrough; case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN) cm_enter_timewait(cm_id_priv); else cm_reset_to_idle(cm_id_priv); break; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->msg); fallthrough; case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: cm_enter_timewait(cm_id_priv); break; case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) ib_cancel_mad(cm_id_priv->msg); cm_enter_timewait(cm_id_priv); break; } fallthrough; default: trace_icm_rej_unknown_err(&cm_id_priv->id); spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } int ib_send_cm_mra(struct ib_cm_id *cm_id, u8 service_timeout, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; enum ib_cm_state cm_state; enum ib_cm_lap_state lap_state; enum cm_msg_response msg_response; void *data; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_RCVD: cm_state = IB_CM_MRA_REQ_SENT; lap_state = cm_id->lap_state; msg_response = CM_MSG_RESPONSE_REQ; break; case IB_CM_REP_RCVD: cm_state = IB_CM_MRA_REP_SENT; lap_state = cm_id->lap_state; msg_response = CM_MSG_RESPONSE_REP; break; case IB_CM_ESTABLISHED: if (cm_id->lap_state == IB_CM_LAP_RCVD) { cm_state = cm_id->state; lap_state = IB_CM_MRA_LAP_SENT; msg_response = CM_MSG_RESPONSE_OTHER; break; } fallthrough; default: trace_icm_send_mra_unknown_err(&cm_id_priv->id); ret = -EINVAL; goto error_unlock; } if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) { msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto error_unlock; } cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, msg_response, service_timeout, private_data, private_data_len); trace_icm_send_mra(cm_id); ret = ib_post_send_mad(msg, NULL); if (ret) goto error_free_msg; } cm_id->state = cm_state; cm_id->lap_state = lap_state; cm_id_priv->service_timeout = service_timeout; cm_set_private_data(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error_free_msg: cm_free_msg(msg); error_unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return ret; } EXPORT_SYMBOL(ib_send_cm_mra); static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) { switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) { case CM_MSG_RESPONSE_REQ: return cm_acquire_id( cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)), 0); case CM_MSG_RESPONSE_REP: case CM_MSG_RESPONSE_OTHER: return cm_acquire_id( cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)), cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg))); default: return NULL; } } static int cm_mra_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_mra_msg *mra_msg; int timeout; mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_mraed_id(mra_msg); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg); work->cm_event.param.mra_rcvd.service_timeout = IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg); timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) + cm_convert_to_ms(cm_id_priv->av.timeout); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != CM_MSG_RESPONSE_REQ || ib_modify_mad(cm_id_priv->msg, timeout)) goto out; cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; break; case IB_CM_REP_SENT: if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != CM_MSG_RESPONSE_REP || ib_modify_mad(cm_id_priv->msg, timeout)) goto out; cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; break; case IB_CM_ESTABLISHED: if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != CM_MSG_RESPONSE_OTHER || cm_id_priv->id.lap_state != IB_CM_LAP_SENT || ib_modify_mad(cm_id_priv->msg, timeout)) { if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) atomic_long_inc( &work->port->counters[CM_RECV_DUPLICATES] [CM_MRA_COUNTER]); goto out; } cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; break; case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REP_RCVD: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_MRA_COUNTER]); fallthrough; default: trace_icm_mra_unknown_err(&cm_id_priv->id); goto out; } cm_id_priv->msg->context[1] = (void *) (unsigned long) cm_id_priv->id.state; cm_queue_work_unlock(cm_id_priv, work); return 0; out: spin_unlock_irq(&cm_id_priv->lock); cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg, struct sa_path_rec *path) { u32 lid; if (path->rec_type != SA_PATH_REC_TYPE_OPA) { sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID, lap_msg)); sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID, lap_msg)); } else { lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg)); sa_path_set_dlid(path, lid); lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg)); sa_path_set_slid(path, lid); } } static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, struct sa_path_rec *path, struct cm_lap_msg *lap_msg) { path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg); path->sgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg); path->flow_label = cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg)); path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg); path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg); path->reversible = 1; path->pkey = cm_id_priv->pkey; path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg); path->mtu_selector = IB_SA_EQ; path->mtu = cm_id_priv->path_mtu; path->rate_selector = IB_SA_EQ; path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg); path->packet_life_time_selector = IB_SA_EQ; path->packet_life_time = IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg); path->packet_life_time -= (path->packet_life_time > 0); cm_format_path_lid_from_lap(lap_msg, path); } static int cm_lap_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_lap_msg *lap_msg; struct ib_cm_lap_event_param *param; struct ib_mad_send_buf *msg = NULL; struct rdma_ah_attr ah_attr; struct cm_av alt_av = {}; int ret; /* Currently Alternate path messages are not supported for * RoCE link layer. */ if (rdma_protocol_roce(work->port->cm_dev->ib_device, work->port->port_num)) return -EINVAL; /* todo: verify LAP request and send reject APR if invalid. */ lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)), cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg))); if (!cm_id_priv) return -EINVAL; param = &work->cm_event.param.lap_rcvd; memset(&work->path[0], 0, sizeof(work->path[1])); cm_path_set_rec_type(work->port->cm_dev->ib_device, work->port->port_num, &work->path[0], IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg)); param->alternate_path = &work->path[0]; cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); work->cm_event.private_data = IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg); ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device, work->port->port_num, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &ah_attr); if (ret) goto deref; ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av); if (ret) { rdma_destroy_ah_attr(&ah_attr); goto deref; } spin_lock_irq(&cm_id_priv->lock); cm_init_av_for_lap(work->port, work->mad_recv_wc->wc, &ah_attr, &cm_id_priv->av); cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) goto unlock; switch (cm_id_priv->id.lap_state) { case IB_CM_LAP_UNINIT: case IB_CM_LAP_IDLE: break; case IB_CM_MRA_LAP_SENT: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_LAP_COUNTER]); msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc); if (IS_ERR(msg)) goto unlock; cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_OTHER, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || ib_post_send_mad(msg, NULL)) cm_free_response_msg(msg); goto deref; case IB_CM_LAP_RCVD: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_LAP_COUNTER]); goto unlock; default: goto unlock; } cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; cm_id_priv->tid = lap_msg->hdr.tid; cm_queue_work_unlock(cm_id_priv, work); return 0; unlock: spin_unlock_irq(&cm_id_priv->lock); deref: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_apr_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_apr_msg *apr_msg; /* Currently Alternate path messages are not supported for * RoCE link layer. */ if (rdma_protocol_roce(work->port->cm_dev->ib_device, work->port->port_num)) return -EINVAL; apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)), cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg))); if (!cm_id_priv) return -EINVAL; /* Unmatched reply. */ work->cm_event.param.apr_rcvd.ap_status = IBA_GET(CM_APR_AR_STATUS, apr_msg); work->cm_event.param.apr_rcvd.apr_info = IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg); work->cm_event.param.apr_rcvd.info_len = IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg); work->cm_event.private_data = IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg); spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED || (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; ib_cancel_mad(cm_id_priv->msg); cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_timewait_handler(struct cm_work *work) { struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; timewait_info = container_of(work, struct cm_timewait_info, work); spin_lock_irq(&cm.lock); list_del(&timewait_info->list); spin_unlock_irq(&cm.lock); cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); if (!cm_id_priv) return -EINVAL; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_TIMEWAIT || cm_id_priv->remote_qpn != timewait_info->remote_qpn) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.state = IB_CM_IDLE; cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, struct cm_id_private *cm_id_priv, struct ib_cm_sidr_req_param *param) { cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, cm_form_tid(cm_id_priv)); IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg, be16_to_cpu(param->path->pkey)); IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg, be64_to_cpu(param->service_id)); if (param->private_data && param->private_data_len) IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg, param->private_data, param->private_data_len); } int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, struct ib_cm_sidr_req_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; struct cm_av av = {}; unsigned long flags; int ret; if (!param->path || (param->private_data && param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); ret = cm_init_av_by_path(param->path, param->sgid_attr, &av); if (ret) return ret; spin_lock_irqsave(&cm_id_priv->lock, flags); cm_move_av_from_path(&cm_id_priv->av, &av); cm_id->service_id = param->service_id; cm_id_priv->timeout_ms = param->timeout_ms; cm_id_priv->max_cm_retries = param->max_cm_retries; if (cm_id->state != IB_CM_IDLE) { ret = -EINVAL; goto out_unlock; } msg = cm_alloc_priv_msg(cm_id_priv); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto out_unlock; } cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv, param); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT; trace_icm_send_sidr_req(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) goto out_free; cm_id->state = IB_CM_SIDR_REQ_SENT; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; out_free: cm_free_priv_msg(msg); out_unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_req); static void cm_format_sidr_req_event(struct cm_work *work, const struct cm_id_private *rx_cm_id, struct ib_cm_id *listen_id) { struct cm_sidr_req_msg *sidr_req_msg; struct ib_cm_sidr_req_event_param *param; sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_req_rcvd; param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg); param->listen_id = listen_id; param->service_id = cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); param->bth_pkey = cm_get_bth_pkey(work); param->port = work->port->port_num; param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr; work->cm_event.private_data = IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg); } static int cm_sidr_req_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv, *listen_cm_id_priv; struct cm_sidr_req_msg *sidr_req_msg; struct ib_wc *wc; int ret; cm_id_priv = cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL); if (IS_ERR(cm_id_priv)) return PTR_ERR(cm_id_priv); /* Record SGID/SLID and request ID for lookup. */ sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv->id.remote_id = cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg)); cm_id_priv->id.service_id = cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); cm_id_priv->tid = sidr_req_msg->hdr.tid; wc = work->mad_recv_wc->wc; cm_id_priv->sidr_slid = wc->slid; ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); if (ret) goto out; spin_lock_irq(&cm.lock); listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); if (listen_cm_id_priv) { spin_unlock_irq(&cm.lock); atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, cm_id_priv->id.service_id); if (!listen_cm_id_priv) { spin_unlock_irq(&cm.lock); ib_send_cm_sidr_rep(&cm_id_priv->id, &(struct ib_cm_sidr_rep_param){ .status = IB_SIDR_UNSUPPORTED }); goto out; /* No match. */ } spin_unlock_irq(&cm.lock); cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.context = listen_cm_id_priv->id.context; /* * A SIDR ID does not need to be in the xarray since it does not receive * mads, is not placed in the remote_id or remote_qpn rbtree, and does * not enter timewait. */ cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id); ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); /* * A pointer to the listen_cm_id is held in the event, so this deref * must be after the event is delivered above. */ cm_deref_id(listen_cm_id_priv); if (ret) cm_destroy_id(&cm_id_priv->id, ret); return 0; out: ib_destroy_cm_id(&cm_id_priv->id); return -EINVAL; } static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param) { cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, cm_id_priv->tid, param->ece.attr_mod); IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg, be32_to_cpu(cm_id_priv->id.remote_id)); IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status); IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num); IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg, be64_to_cpu(cm_id_priv->id.service_id)); IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey); IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg, param->ece.vendor_id & 0xFF); IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg, (param->ece.vendor_id >> 8) & 0xFF); if (param->info && param->info_length) IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg, param->info, param->info_length); if (param->private_data && param->private_data_len) IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg, param->private_data, param->private_data_len); } static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param) { struct ib_mad_send_buf *msg; unsigned long flags; int ret; lockdep_assert_held(&cm_id_priv->lock); if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || (param->private_data && param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) return -EINVAL; if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD) return -EINVAL; msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return PTR_ERR(msg); cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, param); trace_icm_send_sidr_rep(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) { cm_free_msg(msg); return ret; } cm_id_priv->id.state = IB_CM_IDLE; spin_lock_irqsave(&cm.lock, flags); if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); } spin_unlock_irqrestore(&cm.lock, flags); return 0; } int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, struct ib_cm_sidr_rep_param *param) { struct cm_id_private *cm_id_priv = container_of(cm_id, struct cm_id_private, id); unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); ret = cm_send_sidr_rep_locked(cm_id_priv, param); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_rep); static void cm_format_sidr_rep_event(struct cm_work *work, const struct cm_id_private *cm_id_priv) { struct cm_sidr_rep_msg *sidr_rep_msg; struct ib_cm_sidr_rep_event_param *param; sidr_rep_msg = (struct cm_sidr_rep_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_rep_rcvd; param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg); param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg); param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg); param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg); param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH, sidr_rep_msg); param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr; work->cm_event.private_data = IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg); } static int cm_sidr_rep_handler(struct cm_work *work) { struct cm_sidr_rep_msg *sidr_rep_msg; struct cm_id_private *cm_id_priv; sidr_rep_msg = (struct cm_sidr_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0); if (!cm_id_priv) return -EINVAL; /* Unmatched reply. */ spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); cm_format_sidr_rep_event(work, cm_id_priv); cm_process_work(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_process_send_error(struct cm_id_private *cm_id_priv, struct ib_mad_send_buf *msg, enum ib_cm_state state, enum ib_wc_status wc_status) { struct ib_cm_event cm_event = {}; int ret; /* Discard old sends or ones without a response. */ spin_lock_irq(&cm_id_priv->lock); if (msg != cm_id_priv->msg) { spin_unlock_irq(&cm_id_priv->lock); cm_free_msg(msg); return; } cm_free_priv_msg(msg); if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS || wc_status == IB_WC_WR_FLUSH_ERR) goto out_unlock; trace_icm_mad_send_err(state, wc_status); switch (state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: cm_reset_to_idle(cm_id_priv); cm_event.event = IB_CM_REQ_ERROR; break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_reset_to_idle(cm_id_priv); cm_event.event = IB_CM_REP_ERROR; break; case IB_CM_DREQ_SENT: cm_enter_timewait(cm_id_priv); cm_event.event = IB_CM_DREQ_ERROR; break; case IB_CM_SIDR_REQ_SENT: cm_id_priv->id.state = IB_CM_IDLE; cm_event.event = IB_CM_SIDR_REQ_ERROR; break; default: goto out_unlock; } spin_unlock_irq(&cm_id_priv->lock); cm_event.param.send_status = wc_status; /* No other events can occur on the cm_id at this point. */ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); if (ret) ib_destroy_cm_id(&cm_id_priv->id); return; out_unlock: spin_unlock_irq(&cm_id_priv->lock); } static void cm_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { struct ib_mad_send_buf *msg = mad_send_wc->send_buf; struct cm_id_private *cm_id_priv = msg->context[0]; enum ib_cm_state state = (enum ib_cm_state)(unsigned long)msg->context[1]; struct cm_port *port; u16 attr_index; port = mad_agent->context; attr_index = be16_to_cpu(((struct ib_mad_hdr *) msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; /* * If the send was in response to a received message (context[0] is not * set to a cm_id), and is not a REJ, then it is a send that was * manually retried. */ if (!cm_id_priv && (attr_index != CM_REJ_COUNTER)) msg->retries = 1; atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]); if (msg->retries) atomic_long_add(msg->retries, &port->counters[CM_XMIT_RETRIES][attr_index]); if (cm_id_priv) cm_process_send_error(cm_id_priv, msg, state, mad_send_wc->status); else cm_free_response_msg(msg); } static void cm_work_handler(struct work_struct *_work) { struct cm_work *work = container_of(_work, struct cm_work, work.work); int ret; switch (work->cm_event.event) { case IB_CM_REQ_RECEIVED: ret = cm_req_handler(work); break; case IB_CM_MRA_RECEIVED: ret = cm_mra_handler(work); break; case IB_CM_REJ_RECEIVED: ret = cm_rej_handler(work); break; case IB_CM_REP_RECEIVED: ret = cm_rep_handler(work); break; case IB_CM_RTU_RECEIVED: ret = cm_rtu_handler(work); break; case IB_CM_USER_ESTABLISHED: ret = cm_establish_handler(work); break; case IB_CM_DREQ_RECEIVED: ret = cm_dreq_handler(work); break; case IB_CM_DREP_RECEIVED: ret = cm_drep_handler(work); break; case IB_CM_SIDR_REQ_RECEIVED: ret = cm_sidr_req_handler(work); break; case IB_CM_SIDR_REP_RECEIVED: ret = cm_sidr_rep_handler(work); break; case IB_CM_LAP_RECEIVED: ret = cm_lap_handler(work); break; case IB_CM_APR_RECEIVED: ret = cm_apr_handler(work); break; case IB_CM_TIMEWAIT_EXIT: ret = cm_timewait_handler(work); break; default: trace_icm_handler_err(work->cm_event.event); ret = -EINVAL; break; } if (ret) cm_free_work(work); } static int cm_establish(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; struct cm_work *work; unsigned long flags; int ret = 0; struct cm_device *cm_dev; cm_dev = ib_get_client_data(cm_id->device, &cm_client); if (!cm_dev) return -ENODEV; work = kmalloc(sizeof *work, GFP_ATOMIC); if (!work) return -ENOMEM; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id->state) { case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_id->state = IB_CM_ESTABLISHED; break; case IB_CM_ESTABLISHED: ret = -EISCONN; break; default: trace_icm_establish_err(cm_id); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (ret) { kfree(work); goto out; } /* * The CM worker thread may try to destroy the cm_id before it * can execute this work item. To prevent potential deadlock, * we need to find the cm_id once we're in the context of the * worker thread, rather than holding a reference on it. */ INIT_DELAYED_WORK(&work->work, cm_work_handler); work->local_id = cm_id->local_id; work->remote_id = cm_id->remote_id; work->mad_recv_wc = NULL; work->cm_event.event = IB_CM_USER_ESTABLISHED; /* Check if the device started its remove_one */ spin_lock_irqsave(&cm.lock, flags); if (!cm_dev->going_down) { queue_delayed_work(cm.wq, &work->work, 0); } else { kfree(work); ret = -ENODEV; } spin_unlock_irqrestore(&cm.lock, flags); out: return ret; } static int cm_migrate(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; unsigned long flags; int ret = 0; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state == IB_CM_ESTABLISHED && (cm_id->lap_state == IB_CM_LAP_UNINIT || cm_id->lap_state == IB_CM_LAP_IDLE)) { cm_id->lap_state = IB_CM_LAP_IDLE; cm_id_priv->av = cm_id_priv->alt_av; } else ret = -EINVAL; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) { int ret; switch (event) { case IB_EVENT_COMM_EST: ret = cm_establish(cm_id); break; case IB_EVENT_PATH_MIG: ret = cm_migrate(cm_id); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(ib_cm_notify); static void cm_recv_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_buf *send_buf, struct ib_mad_recv_wc *mad_recv_wc) { struct cm_port *port = mad_agent->context; struct cm_work *work; enum ib_cm_event_type event; bool alt_path = false; u16 attr_id; int paths = 0; int going_down = 0; switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { case CM_REQ_ATTR_ID: alt_path = cm_req_has_alt_path((struct cm_req_msg *) mad_recv_wc->recv_buf.mad); paths = 1 + (alt_path != 0); event = IB_CM_REQ_RECEIVED; break; case CM_MRA_ATTR_ID: event = IB_CM_MRA_RECEIVED; break; case CM_REJ_ATTR_ID: event = IB_CM_REJ_RECEIVED; break; case CM_REP_ATTR_ID: event = IB_CM_REP_RECEIVED; break; case CM_RTU_ATTR_ID: event = IB_CM_RTU_RECEIVED; break; case CM_DREQ_ATTR_ID: event = IB_CM_DREQ_RECEIVED; break; case CM_DREP_ATTR_ID: event = IB_CM_DREP_RECEIVED; break; case CM_SIDR_REQ_ATTR_ID: event = IB_CM_SIDR_REQ_RECEIVED; break; case CM_SIDR_REP_ATTR_ID: event = IB_CM_SIDR_REP_RECEIVED; break; case CM_LAP_ATTR_ID: paths = 1; event = IB_CM_LAP_RECEIVED; break; case CM_APR_ATTR_ID: event = IB_CM_APR_RECEIVED; break; default: ib_free_recv_mad(mad_recv_wc); return; } attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]); work = kmalloc(struct_size(work, path, paths), GFP_KERNEL); if (!work) { ib_free_recv_mad(mad_recv_wc); return; } INIT_DELAYED_WORK(&work->work, cm_work_handler); work->cm_event.event = event; work->mad_recv_wc = mad_recv_wc; work->port = port; /* Check if the device started its remove_one */ spin_lock_irq(&cm.lock); if (!port->cm_dev->going_down) queue_delayed_work(cm.wq, &work->work, 0); else going_down = 1; spin_unlock_irq(&cm.lock); if (going_down) { kfree(work); ib_free_recv_mad(mad_recv_wc); } } static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; if (cm_id_priv->responder_resources) { struct ib_device *ib_dev = cm_id_priv->id.device; u64 support_flush = ib_dev->attrs.device_cap_flags & (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT); u32 flushable = support_flush ? (IB_ACCESS_FLUSH_GLOBAL | IB_ACCESS_FLUSH_PERSISTENT) : 0; qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_ATOMIC | flushable; } qp_attr->pkey_index = cm_id_priv->av.pkey_index; if (cm_id_priv->av.port) qp_attr->port_num = cm_id_priv->av.port->port_num; ret = 0; break; default: trace_icm_qp_init_err(&cm_id_priv->id); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN; qp_attr->ah_attr = cm_id_priv->av.ah_attr; if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) && cm_id_priv->av.dlid_datapath && (cm_id_priv->av.dlid_datapath != 0xffff)) qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath; qp_attr->path_mtu = cm_id_priv->path_mtu; qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); if (cm_id_priv->qp_type == IB_QPT_RC || cm_id_priv->qp_type == IB_QPT_XRC_TGT) { *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources; qp_attr->min_rnr_timer = 0; } if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr) && cm_id_priv->alt_av.port) { *qp_attr_mask |= IB_QP_ALT_PATH; qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; } ret = 0; break; default: trace_icm_qp_rtr_err(&cm_id_priv->id); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { /* Allow transition to RTS before sending REP */ case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); switch (cm_id_priv->qp_type) { case IB_QPT_RC: case IB_QPT_XRC_INI: *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC; qp_attr->retry_cnt = cm_id_priv->retry_count; qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; fallthrough; case IB_QPT_XRC_TGT: *qp_attr_mask |= IB_QP_TIMEOUT; qp_attr->timeout = cm_id_priv->av.timeout; break; default: break; } if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) { *qp_attr_mask |= IB_QP_PATH_MIG_STATE; qp_attr->path_mig_state = IB_MIG_REARM; } } else { *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; if (cm_id_priv->alt_av.port) qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; qp_attr->path_mig_state = IB_MIG_REARM; } ret = 0; break; default: trace_icm_qp_rts_err(&cm_id_priv->id); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct cm_id_private *cm_id_priv; int ret; cm_id_priv = container_of(cm_id, struct cm_id_private, id); switch (qp_attr->qp_state) { case IB_QPS_INIT: ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTR: ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTS: ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); break; default: ret = -EINVAL; break; } return ret; } EXPORT_SYMBOL(ib_cm_init_qp_attr); static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { struct cm_counter_attribute *cm_attr = container_of(attr, struct cm_counter_attribute, attr); struct cm_device *cm_dev = ib_get_client_data(ibdev, &cm_client); if (WARN_ON(!cm_dev)) return -EINVAL; return sysfs_emit( buf, "%ld\n", atomic_long_read( &cm_dev->port[port_num - 1] ->counters[cm_attr->group][cm_attr->index])); } #define CM_COUNTER_ATTR(_name, _group, _index) \ { \ .attr = __ATTR(_name, 0444, cm_show_counter, NULL), \ .group = _group, .index = _index \ } #define CM_COUNTER_GROUP(_group, _name) \ static struct cm_counter_attribute cm_counter_attr_##_group[] = { \ CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER), \ CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER), \ CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER), \ CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER), \ CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER), \ CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER), \ CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER), \ CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER), \ CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER), \ CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER), \ CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER), \ }; \ static struct attribute *cm_counter_attrs_##_group[] = { \ &cm_counter_attr_##_group[0].attr.attr, \ &cm_counter_attr_##_group[1].attr.attr, \ &cm_counter_attr_##_group[2].attr.attr, \ &cm_counter_attr_##_group[3].attr.attr, \ &cm_counter_attr_##_group[4].attr.attr, \ &cm_counter_attr_##_group[5].attr.attr, \ &cm_counter_attr_##_group[6].attr.attr, \ &cm_counter_attr_##_group[7].attr.attr, \ &cm_counter_attr_##_group[8].attr.attr, \ &cm_counter_attr_##_group[9].attr.attr, \ &cm_counter_attr_##_group[10].attr.attr, \ NULL, \ }; \ static const struct attribute_group cm_counter_group_##_group = { \ .name = _name, \ .attrs = cm_counter_attrs_##_group, \ }; CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs") CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries") CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs") CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates") static const struct attribute_group *cm_counter_groups[] = { &cm_counter_group_CM_XMIT, &cm_counter_group_CM_XMIT_RETRIES, &cm_counter_group_CM_RECV, &cm_counter_group_CM_RECV_DUPLICATES, NULL, }; static int cm_add_one(struct ib_device *ib_device) { struct cm_device *cm_dev; struct cm_port *port; struct ib_mad_reg_req reg_req = { .mgmt_class = IB_MGMT_CLASS_CM, .mgmt_class_version = IB_CM_CLASS_VERSION, }; struct ib_port_modify port_modify = { .set_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; int ret; int count = 0; u32 i; cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt), GFP_KERNEL); if (!cm_dev) return -ENOMEM; kref_init(&cm_dev->kref); spin_lock_init(&cm_dev->mad_agent_lock); cm_dev->ib_device = ib_device; cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay; cm_dev->going_down = 0; ib_set_client_data(ib_device, &cm_client, cm_dev); set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); rdma_for_each_port (ib_device, i) { if (!rdma_cap_ib_cm(ib_device, i)) continue; port = kzalloc(sizeof *port, GFP_KERNEL); if (!port) { ret = -ENOMEM; goto error1; } cm_dev->port[i-1] = port; port->cm_dev = cm_dev; port->port_num = i; ret = ib_port_register_client_groups(ib_device, i, cm_counter_groups); if (ret) goto error1; port->mad_agent = ib_register_mad_agent(ib_device, i, IB_QPT_GSI, &reg_req, 0, cm_send_handler, cm_recv_handler, port, 0); if (IS_ERR(port->mad_agent)) { ret = PTR_ERR(port->mad_agent); goto error2; } ret = ib_modify_port(ib_device, i, 0, &port_modify); if (ret) goto error3; count++; } if (!count) { ret = -EOPNOTSUPP; goto free; } write_lock_irqsave(&cm.device_lock, flags); list_add_tail(&cm_dev->list, &cm.device_list); write_unlock_irqrestore(&cm.device_lock, flags); return 0; error3: ib_unregister_mad_agent(port->mad_agent); error2: ib_port_unregister_client_groups(ib_device, i, cm_counter_groups); error1: port_modify.set_port_cap_mask = 0; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; while (--i) { if (!rdma_cap_ib_cm(ib_device, i)) continue; port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); ib_port_unregister_client_groups(ib_device, i, cm_counter_groups); } free: cm_device_put(cm_dev); return ret; } static void cm_remove_one(struct ib_device *ib_device, void *client_data) { struct cm_device *cm_dev = client_data; struct cm_port *port; struct ib_port_modify port_modify = { .clr_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; u32 i; write_lock_irqsave(&cm.device_lock, flags); list_del(&cm_dev->list); write_unlock_irqrestore(&cm.device_lock, flags); spin_lock_irq(&cm.lock); cm_dev->going_down = 1; spin_unlock_irq(&cm.lock); rdma_for_each_port (ib_device, i) { struct ib_mad_agent *mad_agent; if (!rdma_cap_ib_cm(ib_device, i)) continue; port = cm_dev->port[i-1]; mad_agent = port->mad_agent; ib_modify_port(ib_device, port->port_num, 0, &port_modify); /* * We flush the queue here after the going_down set, this * verify that no new works will be queued in the recv handler, * after that we can call the unregister_mad_agent */ flush_workqueue(cm.wq); /* * The above ensures no call paths from the work are running, * the remaining paths all take the mad_agent_lock. */ spin_lock(&cm_dev->mad_agent_lock); port->mad_agent = NULL; spin_unlock(&cm_dev->mad_agent_lock); ib_unregister_mad_agent(mad_agent); ib_port_unregister_client_groups(ib_device, i, cm_counter_groups); } cm_device_put(cm_dev); } static int __init ib_cm_init(void) { int ret; INIT_LIST_HEAD(&cm.device_list); rwlock_init(&cm.device_lock); spin_lock_init(&cm.lock); cm.listen_service_table = RB_ROOT; cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); cm.remote_id_table = RB_ROOT; cm.remote_qp_table = RB_ROOT; cm.remote_sidr_table = RB_ROOT; xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC); get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); INIT_LIST_HEAD(&cm.timewait_list); cm.wq = alloc_workqueue("ib_cm", 0, 1); if (!cm.wq) { ret = -ENOMEM; goto error2; } ret = ib_register_client(&cm_client); if (ret) goto error3; return 0; error3: destroy_workqueue(cm.wq); error2: return ret; } static void __exit ib_cm_cleanup(void) { struct cm_timewait_info *timewait_info, *tmp; spin_lock_irq(&cm.lock); list_for_each_entry(timewait_info, &cm.timewait_list, list) cancel_delayed_work(&timewait_info->work.work); spin_unlock_irq(&cm.lock); ib_unregister_client(&cm_client); destroy_workqueue(cm.wq); list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { list_del(&timewait_info->list); kfree(timewait_info); } WARN_ON(!xa_empty(&cm.local_id_table)); } module_init(ib_cm_init); module_exit(ib_cm_cleanup);
linux-master
drivers/infiniband/core/cm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016 HGST, a Western Digital Company. */ #include <linux/memremap.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/pci-p2pdma.h> #include <rdma/mr_pool.h> #include <rdma/rw.h> enum { RDMA_RW_SINGLE_WR, RDMA_RW_MULTI_WR, RDMA_RW_MR, RDMA_RW_SIG_MR, }; static bool rdma_rw_force_mr; module_param_named(force_mr, rdma_rw_force_mr, bool, 0); MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations"); /* * Report whether memory registration should be used. Memory registration must * be used for iWarp devices because of iWARP-specific limitations. Memory * registration is also enabled if registering memory might yield better * performance than using multiple SGE entries, see rdma_rw_io_needs_mr() */ static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num) { if (rdma_protocol_iwarp(dev, port_num)) return true; if (dev->attrs.max_sgl_rd) return true; if (unlikely(rdma_rw_force_mr)) return true; return false; } /* * Check if the device will use memory registration for this RW operation. * For RDMA READs we must use MRs on iWarp and can optionally use them as an * optimization otherwise. Additionally we have a debug option to force usage * of MRs to help testing this code path. */ static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num, enum dma_data_direction dir, int dma_nents) { if (dir == DMA_FROM_DEVICE) { if (rdma_protocol_iwarp(dev, port_num)) return true; if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd) return true; } if (unlikely(rdma_rw_force_mr)) return true; return false; } static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev, bool pi_support) { u32 max_pages; if (pi_support) max_pages = dev->attrs.max_pi_fast_reg_page_list_len; else max_pages = dev->attrs.max_fast_reg_page_list_len; /* arbitrary limit to avoid allocating gigantic resources */ return min_t(u32, max_pages, 256); } static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg) { int count = 0; if (reg->mr->need_inval) { reg->inv_wr.opcode = IB_WR_LOCAL_INV; reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey; reg->inv_wr.next = &reg->reg_wr.wr; count++; } else { reg->inv_wr.next = NULL; } return count; } /* Caller must have zero-initialized *reg. */ static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num, struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, u32 sg_cnt, u32 offset) { u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, qp->integrity_en); u32 nents = min(sg_cnt, pages_per_mr); int count = 0, ret; reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs); if (!reg->mr) return -EAGAIN; count += rdma_rw_inv_key(reg); ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE); if (ret < 0 || ret < nents) { ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr); return -EINVAL; } reg->reg_wr.wr.opcode = IB_WR_REG_MR; reg->reg_wr.mr = reg->mr; reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; if (rdma_protocol_iwarp(qp->device, port_num)) reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; count++; reg->sge.addr = reg->mr->iova; reg->sge.length = reg->mr->length; return count; } static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { struct rdma_rw_reg_ctx *prev = NULL; u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, qp->integrity_en); int i, j, ret = 0, count = 0; ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr); ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL); if (!ctx->reg) { ret = -ENOMEM; goto out; } for (i = 0; i < ctx->nr_ops; i++) { struct rdma_rw_reg_ctx *reg = &ctx->reg[i]; u32 nents = min(sg_cnt, pages_per_mr); ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt, offset); if (ret < 0) goto out_free; count += ret; if (prev) { if (reg->mr->need_inval) prev->wr.wr.next = &reg->inv_wr; else prev->wr.wr.next = &reg->reg_wr.wr; } reg->reg_wr.wr.next = &reg->wr.wr; reg->wr.wr.sg_list = &reg->sge; reg->wr.wr.num_sge = 1; reg->wr.remote_addr = remote_addr; reg->wr.rkey = rkey; if (dir == DMA_TO_DEVICE) { reg->wr.wr.opcode = IB_WR_RDMA_WRITE; } else if (!rdma_cap_read_inv(qp->device, port_num)) { reg->wr.wr.opcode = IB_WR_RDMA_READ; } else { reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV; reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey; } count++; remote_addr += reg->sge.length; sg_cnt -= nents; for (j = 0; j < nents; j++) sg = sg_next(sg); prev = reg; offset = 0; } if (prev) prev->wr.wr.next = NULL; ctx->type = RDMA_RW_MR; return count; out_free: while (--i >= 0) ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); kfree(ctx->reg); out: return ret; } static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, struct scatterlist *sg, u32 sg_cnt, u32 offset, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge : qp->max_read_sge; struct ib_sge *sge; u32 total_len = 0, i, j; ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge); ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL); if (!ctx->map.sges) goto out; ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL); if (!ctx->map.wrs) goto out_free_sges; for (i = 0; i < ctx->nr_ops; i++) { struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i]; u32 nr_sge = min(sg_cnt, max_sge); if (dir == DMA_TO_DEVICE) rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; else rdma_wr->wr.opcode = IB_WR_RDMA_READ; rdma_wr->remote_addr = remote_addr + total_len; rdma_wr->rkey = rkey; rdma_wr->wr.num_sge = nr_sge; rdma_wr->wr.sg_list = sge; for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) { sge->addr = sg_dma_address(sg) + offset; sge->length = sg_dma_len(sg) - offset; sge->lkey = qp->pd->local_dma_lkey; total_len += sge->length; sge++; sg_cnt--; offset = 0; } rdma_wr->wr.next = i + 1 < ctx->nr_ops ? &ctx->map.wrs[i + 1].wr : NULL; } ctx->type = RDMA_RW_MULTI_WR; return ctx->nr_ops; out_free_sges: kfree(ctx->map.sges); out: return -ENOMEM; } static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp, struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { struct ib_rdma_wr *rdma_wr = &ctx->single.wr; ctx->nr_ops = 1; ctx->single.sge.lkey = qp->pd->local_dma_lkey; ctx->single.sge.addr = sg_dma_address(sg) + offset; ctx->single.sge.length = sg_dma_len(sg) - offset; memset(rdma_wr, 0, sizeof(*rdma_wr)); if (dir == DMA_TO_DEVICE) rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; else rdma_wr->wr.opcode = IB_WR_RDMA_READ; rdma_wr->wr.sg_list = &ctx->single.sge; rdma_wr->wr.num_sge = 1; rdma_wr->remote_addr = remote_addr; rdma_wr->rkey = rkey; ctx->type = RDMA_RW_SINGLE_WR; return 1; } /** * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context * @ctx: context to initialize * @qp: queue pair to operate on * @port_num: port num to which the connection is bound * @sg: scatterlist to READ/WRITE from/to * @sg_cnt: number of entries in @sg * @sg_offset: current byte offset into @sg * @remote_addr:remote address to read/write (relative to @rkey) * @rkey: remote key to operate on * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ * * Returns the number of WQEs that will be needed on the workqueue if * successful, or a negative error code. */ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { struct ib_device *dev = qp->pd->device; struct sg_table sgt = { .sgl = sg, .orig_nents = sg_cnt, }; int ret; ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0); if (ret) return ret; sg_cnt = sgt.nents; /* * Skip to the S/G entry that sg_offset falls into: */ for (;;) { u32 len = sg_dma_len(sg); if (sg_offset < len) break; sg = sg_next(sg); sg_offset -= len; sg_cnt--; } ret = -EIO; if (WARN_ON_ONCE(sg_cnt == 0)) goto out_unmap_sg; if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) { ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt, sg_offset, remote_addr, rkey, dir); } else if (sg_cnt > 1) { ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset, remote_addr, rkey, dir); } else { ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset, remote_addr, rkey, dir); } if (ret < 0) goto out_unmap_sg; return ret; out_unmap_sg: ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0); return ret; } EXPORT_SYMBOL(rdma_rw_ctx_init); /** * rdma_rw_ctx_signature_init - initialize a RW context with signature offload * @ctx: context to initialize * @qp: queue pair to operate on * @port_num: port num to which the connection is bound * @sg: scatterlist to READ/WRITE from/to * @sg_cnt: number of entries in @sg * @prot_sg: scatterlist to READ/WRITE protection information from/to * @prot_sg_cnt: number of entries in @prot_sg * @sig_attrs: signature offloading algorithms * @remote_addr:remote address to read/write (relative to @rkey) * @rkey: remote key to operate on * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ * * Returns the number of WQEs that will be needed on the workqueue if * successful, or a negative error code. */ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct scatterlist *sg, u32 sg_cnt, struct scatterlist *prot_sg, u32 prot_sg_cnt, struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { struct ib_device *dev = qp->pd->device; u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, qp->integrity_en); struct sg_table sgt = { .sgl = sg, .orig_nents = sg_cnt, }; struct sg_table prot_sgt = { .sgl = prot_sg, .orig_nents = prot_sg_cnt, }; struct ib_rdma_wr *rdma_wr; int count = 0, ret; if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) { pr_err("SG count too large: sg_cnt=%u, prot_sg_cnt=%u, pages_per_mr=%u\n", sg_cnt, prot_sg_cnt, pages_per_mr); return -EINVAL; } ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0); if (ret) return ret; if (prot_sg_cnt) { ret = ib_dma_map_sgtable_attrs(dev, &prot_sgt, dir, 0); if (ret) goto out_unmap_sg; } ctx->type = RDMA_RW_SIG_MR; ctx->nr_ops = 1; ctx->reg = kzalloc(sizeof(*ctx->reg), GFP_KERNEL); if (!ctx->reg) { ret = -ENOMEM; goto out_unmap_prot_sg; } ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs); if (!ctx->reg->mr) { ret = -EAGAIN; goto out_free_ctx; } count += rdma_rw_inv_key(ctx->reg); memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs)); ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sgt.nents, NULL, prot_sg, prot_sgt.nents, NULL, SZ_4K); if (unlikely(ret)) { pr_err("failed to map PI sg (%u)\n", sgt.nents + prot_sgt.nents); goto out_destroy_sig_mr; } ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY; ctx->reg->reg_wr.wr.wr_cqe = NULL; ctx->reg->reg_wr.wr.num_sge = 0; ctx->reg->reg_wr.wr.send_flags = 0; ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; if (rdma_protocol_iwarp(qp->device, port_num)) ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; ctx->reg->reg_wr.mr = ctx->reg->mr; ctx->reg->reg_wr.key = ctx->reg->mr->lkey; count++; ctx->reg->sge.addr = ctx->reg->mr->iova; ctx->reg->sge.length = ctx->reg->mr->length; if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE) ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length; rdma_wr = &ctx->reg->wr; rdma_wr->wr.sg_list = &ctx->reg->sge; rdma_wr->wr.num_sge = 1; rdma_wr->remote_addr = remote_addr; rdma_wr->rkey = rkey; if (dir == DMA_TO_DEVICE) rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; else rdma_wr->wr.opcode = IB_WR_RDMA_READ; ctx->reg->reg_wr.wr.next = &rdma_wr->wr; count++; return count; out_destroy_sig_mr: ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); out_free_ctx: kfree(ctx->reg); out_unmap_prot_sg: if (prot_sgt.nents) ib_dma_unmap_sgtable_attrs(dev, &prot_sgt, dir, 0); out_unmap_sg: ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0); return ret; } EXPORT_SYMBOL(rdma_rw_ctx_signature_init); /* * Now that we are going to post the WRs we can update the lkey and need_inval * state on the MRs. If we were doing this at init time, we would get double * or missing invalidations if a context was initialized but not actually * posted. */ static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval) { reg->mr->need_inval = need_inval; ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey)); reg->reg_wr.key = reg->mr->lkey; reg->sge.lkey = reg->mr->lkey; } /** * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation * @ctx: context to operate on * @qp: queue pair to operate on * @port_num: port num to which the connection is bound * @cqe: completion queue entry for the last WR * @chain_wr: WR to append to the posted chain * * Return the WR chain for the set of RDMA READ/WRITE operations described by * @ctx, as well as any memory registration operations needed. If @chain_wr * is non-NULL the WR it points to will be appended to the chain of WRs posted. * If @chain_wr is not set @cqe must be set so that the caller gets a * completion notification. */ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) { struct ib_send_wr *first_wr, *last_wr; int i; switch (ctx->type) { case RDMA_RW_SIG_MR: case RDMA_RW_MR: for (i = 0; i < ctx->nr_ops; i++) { rdma_rw_update_lkey(&ctx->reg[i], ctx->reg[i].wr.wr.opcode != IB_WR_RDMA_READ_WITH_INV); } if (ctx->reg[0].inv_wr.next) first_wr = &ctx->reg[0].inv_wr; else first_wr = &ctx->reg[0].reg_wr.wr; last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr; break; case RDMA_RW_MULTI_WR: first_wr = &ctx->map.wrs[0].wr; last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr; break; case RDMA_RW_SINGLE_WR: first_wr = &ctx->single.wr.wr; last_wr = &ctx->single.wr.wr; break; default: BUG(); } if (chain_wr) { last_wr->next = chain_wr; } else { last_wr->wr_cqe = cqe; last_wr->send_flags |= IB_SEND_SIGNALED; } return first_wr; } EXPORT_SYMBOL(rdma_rw_ctx_wrs); /** * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation * @ctx: context to operate on * @qp: queue pair to operate on * @port_num: port num to which the connection is bound * @cqe: completion queue entry for the last WR * @chain_wr: WR to append to the posted chain * * Post the set of RDMA READ/WRITE operations described by @ctx, as well as * any memory registration operations needed. If @chain_wr is non-NULL the * WR it points to will be appended to the chain of WRs posted. If @chain_wr * is not set @cqe must be set so that the caller gets a completion * notification. */ int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) { struct ib_send_wr *first_wr; first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr); return ib_post_send(qp, first_wr, NULL); } EXPORT_SYMBOL(rdma_rw_ctx_post); /** * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init * @ctx: context to release * @qp: queue pair to operate on * @port_num: port num to which the connection is bound * @sg: scatterlist that was used for the READ/WRITE * @sg_cnt: number of entries in @sg * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ */ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir) { int i; switch (ctx->type) { case RDMA_RW_MR: for (i = 0; i < ctx->nr_ops; i++) ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); kfree(ctx->reg); break; case RDMA_RW_MULTI_WR: kfree(ctx->map.wrs); kfree(ctx->map.sges); break; case RDMA_RW_SINGLE_WR: break; default: BUG(); break; } ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); } EXPORT_SYMBOL(rdma_rw_ctx_destroy); /** * rdma_rw_ctx_destroy_signature - release all resources allocated by * rdma_rw_ctx_signature_init * @ctx: context to release * @qp: queue pair to operate on * @port_num: port num to which the connection is bound * @sg: scatterlist that was used for the READ/WRITE * @sg_cnt: number of entries in @sg * @prot_sg: scatterlist that was used for the READ/WRITE of the PI * @prot_sg_cnt: number of entries in @prot_sg * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ */ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct scatterlist *sg, u32 sg_cnt, struct scatterlist *prot_sg, u32 prot_sg_cnt, enum dma_data_direction dir) { if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR)) return; ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); kfree(ctx->reg); if (prot_sg_cnt) ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); } EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); /** * rdma_rw_mr_factor - return number of MRs required for a payload * @device: device handling the connection * @port_num: port num to which the connection is bound * @maxpages: maximum payload pages per rdma_rw_ctx * * Returns the number of MRs the device requires to move @maxpayload * bytes. The returned value is used during transport creation to * compute max_rdma_ctxts and the size of the transport's Send and * Send Completion Queues. */ unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, unsigned int maxpages) { unsigned int mr_pages; if (rdma_rw_can_use_mr(device, port_num)) mr_pages = rdma_rw_fr_page_list_len(device, false); else mr_pages = device->attrs.max_sge_rd; return DIV_ROUND_UP(maxpages, mr_pages); } EXPORT_SYMBOL(rdma_rw_mr_factor); void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) { u32 factor; WARN_ON_ONCE(attr->port_num == 0); /* * Each context needs at least one RDMA READ or WRITE WR. * * For some hardware we might need more, eventually we should ask the * HCA driver for a multiplier here. */ factor = 1; /* * If the devices needs MRs to perform RDMA READ or WRITE operations, * we'll need two additional MRs for the registrations and the * invalidation. */ if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN || rdma_rw_can_use_mr(dev, attr->port_num)) factor += 2; /* inv + reg */ attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs; /* * But maybe we were just too high in the sky and the device doesn't * even support all we need, and we'll have to live with what we get.. */ attr->cap.max_send_wr = min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr); } int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr) { struct ib_device *dev = qp->pd->device; u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0; int ret = 0; if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) { nr_sig_mrs = attr->cap.max_rdma_ctxs; nr_mrs = attr->cap.max_rdma_ctxs; max_num_sg = rdma_rw_fr_page_list_len(dev, true); } else if (rdma_rw_can_use_mr(dev, attr->port_num)) { nr_mrs = attr->cap.max_rdma_ctxs; max_num_sg = rdma_rw_fr_page_list_len(dev, false); } if (nr_mrs) { ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs, IB_MR_TYPE_MEM_REG, max_num_sg, 0); if (ret) { pr_err("%s: failed to allocated %u MRs\n", __func__, nr_mrs); return ret; } } if (nr_sig_mrs) { ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs, IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg); if (ret) { pr_err("%s: failed to allocated %u SIG MRs\n", __func__, nr_sig_mrs); goto out_free_rdma_mrs; } } return 0; out_free_rdma_mrs: ib_mr_pool_destroy(qp, &qp->rdma_mrs); return ret; } void rdma_rw_cleanup_mrs(struct ib_qp *qp) { ib_mr_pool_destroy(qp, &qp->sig_mrs); ib_mr_pool_destroy(qp, &qp->rdma_mrs); }
linux-master
drivers/infiniband/core/rw.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com> */ #include "core_priv.h" /** * ib_device_register_rdmacg - register with rdma cgroup. * @device: device to register to participate in resource * accounting by rdma cgroup. * * Register with the rdma cgroup. Should be called before * exposing rdma device to user space applications to avoid * resource accounting leak. */ void ib_device_register_rdmacg(struct ib_device *device) { device->cg_device.name = device->name; rdmacg_register_device(&device->cg_device); } /** * ib_device_unregister_rdmacg - unregister with rdma cgroup. * @device: device to unregister. * * Unregister with the rdma cgroup. Should be called after * all the resources are deallocated, and after a stage when any * other resource allocation by user application cannot be done * for this device to avoid any leak in accounting. */ void ib_device_unregister_rdmacg(struct ib_device *device) { rdmacg_unregister_device(&device->cg_device); } int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj, struct ib_device *device, enum rdmacg_resource_type resource_index) { return rdmacg_try_charge(&cg_obj->cg, &device->cg_device, resource_index); } EXPORT_SYMBOL(ib_rdmacg_try_charge); void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj, struct ib_device *device, enum rdmacg_resource_type resource_index) { rdmacg_uncharge(cg_obj->cg, &device->cg_device, resource_index); } EXPORT_SYMBOL(ib_rdmacg_uncharge);
linux-master
drivers/infiniband/core/cgroup.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright (c) 2020 Intel Corporation. All rights reserved. */ #include <linux/dma-buf.h> #include <linux/dma-resv.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include "uverbs.h" MODULE_IMPORT_NS(DMA_BUF); int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) { struct sg_table *sgt; struct scatterlist *sg; unsigned long start, end, cur = 0; unsigned int nmap = 0; long ret; int i; dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); if (umem_dmabuf->sgt) goto wait_fence; sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) return PTR_ERR(sgt); /* modify the sg list in-place to match umem address and length */ start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE); end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, PAGE_SIZE); for_each_sgtable_dma_sg(sgt, sg, i) { if (start < cur + sg_dma_len(sg) && cur < end) nmap++; if (cur <= start && start < cur + sg_dma_len(sg)) { unsigned long offset = start - cur; umem_dmabuf->first_sg = sg; umem_dmabuf->first_sg_offset = offset; sg_dma_address(sg) += offset; sg_dma_len(sg) -= offset; cur += offset; } if (cur < end && end <= cur + sg_dma_len(sg)) { unsigned long trim = cur + sg_dma_len(sg) - end; umem_dmabuf->last_sg = sg; umem_dmabuf->last_sg_trim = trim; sg_dma_len(sg) -= trim; break; } cur += sg_dma_len(sg); } umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg; umem_dmabuf->umem.sgt_append.sgt.nents = nmap; umem_dmabuf->sgt = sgt; wait_fence: /* * Although the sg list is valid now, the content of the pages * may be not up-to-date. Wait for the exporter to finish * the migration. */ ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, DMA_RESV_USAGE_KERNEL, false, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; if (ret == 0) return -ETIMEDOUT; return 0; } EXPORT_SYMBOL(ib_umem_dmabuf_map_pages); void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); if (!umem_dmabuf->sgt) return; /* retore the original sg list */ if (umem_dmabuf->first_sg) { sg_dma_address(umem_dmabuf->first_sg) -= umem_dmabuf->first_sg_offset; sg_dma_len(umem_dmabuf->first_sg) += umem_dmabuf->first_sg_offset; umem_dmabuf->first_sg = NULL; umem_dmabuf->first_sg_offset = 0; } if (umem_dmabuf->last_sg) { sg_dma_len(umem_dmabuf->last_sg) += umem_dmabuf->last_sg_trim; umem_dmabuf->last_sg = NULL; umem_dmabuf->last_sg_trim = 0; } dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt, DMA_BIDIRECTIONAL); umem_dmabuf->sgt = NULL; } EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages); struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, unsigned long offset, size_t size, int fd, int access, const struct dma_buf_attach_ops *ops) { struct dma_buf *dmabuf; struct ib_umem_dmabuf *umem_dmabuf; struct ib_umem *umem; unsigned long end; struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL); if (check_add_overflow(offset, (unsigned long)size, &end)) return ret; if (unlikely(!ops || !ops->move_notify)) return ret; dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) return ERR_CAST(dmabuf); if (dmabuf->size < end) goto out_release_dmabuf; umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL); if (!umem_dmabuf) { ret = ERR_PTR(-ENOMEM); goto out_release_dmabuf; } umem = &umem_dmabuf->umem; umem->ibdev = device; umem->length = size; umem->address = offset; umem->writable = ib_access_writable(access); umem->is_dmabuf = 1; if (!ib_umem_num_pages(umem)) goto out_free_umem; umem_dmabuf->attach = dma_buf_dynamic_attach( dmabuf, device->dma_device, ops, umem_dmabuf); if (IS_ERR(umem_dmabuf->attach)) { ret = ERR_CAST(umem_dmabuf->attach); goto out_free_umem; } return umem_dmabuf; out_free_umem: kfree(umem_dmabuf); out_release_dmabuf: dma_buf_put(dmabuf); return ret; } EXPORT_SYMBOL(ib_umem_dmabuf_get); static void ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach) { struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev, "Invalidate callback should not be called when memory is pinned\n"); } static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = { .allow_peer2peer = true, .move_notify = ib_umem_dmabuf_unsupported_move_notify, }; struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset, size_t size, int fd, int access) { struct ib_umem_dmabuf *umem_dmabuf; int err; umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access, &ib_umem_dmabuf_attach_pinned_ops); if (IS_ERR(umem_dmabuf)) return umem_dmabuf; dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL); err = dma_buf_pin(umem_dmabuf->attach); if (err) goto err_release; umem_dmabuf->pinned = 1; err = ib_umem_dmabuf_map_pages(umem_dmabuf); if (err) goto err_unpin; dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); return umem_dmabuf; err_unpin: dma_buf_unpin(umem_dmabuf->attach); err_release: dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); ib_umem_release(&umem_dmabuf->umem); return ERR_PTR(err); } EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned); void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf; dma_resv_lock(dmabuf->resv, NULL); ib_umem_dmabuf_unmap_pages(umem_dmabuf); if (umem_dmabuf->pinned) dma_buf_unpin(umem_dmabuf->attach); dma_resv_unlock(dmabuf->resv); dma_buf_detach(dmabuf, umem_dmabuf->attach); dma_buf_put(dmabuf); kfree(umem_dmabuf); }
linux-master
drivers/infiniband/core/umem_dmabuf.c
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/inetdevice.h> #include <net/addrconf.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/byteorder.h> #include <rdma/iw_cm.h> #include <rdma/ib_verbs.h> #include <rdma/ib_smi.h> #include <rdma/ib_umem.h> #include <rdma/ib_user_verbs.h> #include "iw_cxgb4.h" static int fastreg_support = 1; module_param(fastreg_support, int, 0644); MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)"); static void c4iw_dealloc_ucontext(struct ib_ucontext *context) { struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); struct c4iw_dev *rhp; struct c4iw_mm_entry *mm, *tmp; pr_debug("context %p\n", context); rhp = to_c4iw_dev(ucontext->ibucontext.device); list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) kfree(mm); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); } static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext, struct ib_udata *udata) { struct ib_device *ibdev = ucontext->device; struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext); struct c4iw_dev *rhp = to_c4iw_dev(ibdev); struct c4iw_alloc_ucontext_resp uresp; int ret = 0; struct c4iw_mm_entry *mm = NULL; pr_debug("ibdev %p\n", ibdev); c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n"); rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; } else { mm = kmalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { ret = -ENOMEM; goto err; } uresp.status_page_size = PAGE_SIZE; spin_lock(&context->mmap_lock); uresp.status_page_key = context->key; context->key += PAGE_SIZE; spin_unlock(&context->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp) - sizeof(uresp.reserved)); if (ret) goto err_mm; mm->key = uresp.status_page_key; mm->addr = virt_to_phys(rhp->rdev.status_page); mm->len = PAGE_SIZE; insert_mmap(context, mm); } return 0; err_mm: kfree(mm); err: return ret; } static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { int len = vma->vm_end - vma->vm_start; u32 key = vma->vm_pgoff << PAGE_SHIFT; struct c4iw_rdev *rdev; int ret = 0; struct c4iw_mm_entry *mm; struct c4iw_ucontext *ucontext; u64 addr; pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff, key, len); if (vma->vm_start & (PAGE_SIZE-1)) return -EINVAL; rdev = &(to_c4iw_dev(context->device)->rdev); ucontext = to_c4iw_ucontext(context); mm = remove_mmap(ucontext, key, len); if (!mm) return -EINVAL; addr = mm->addr; kfree(mm); if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) && (addr < (pci_resource_start(rdev->lldi.pdev, 0) + pci_resource_len(rdev->lldi.pdev, 0)))) { /* * MA_SYNC register... */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, len, vma->vm_page_prot); } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) && (addr < (pci_resource_start(rdev->lldi.pdev, 2) + pci_resource_len(rdev->lldi.pdev, 2)))) { /* * Map user DB or OCQP memory... */ if (addr >= rdev->oc_mw_pa) vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); else { if (!is_t4(rdev->lldi.adapter_type)) vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); else vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); } ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, len, vma->vm_page_prot); } else { /* * Map WQ or CQ contig dma memory... */ ret = remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, len, vma->vm_page_prot); } return ret; } static int c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_pd *php; php = to_c4iw_pd(pd); rhp = php->rhp; pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid); c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid); mutex_lock(&rhp->rdev.stats.lock); rhp->rdev.stats.pd.cur--; mutex_unlock(&rhp->rdev.stats.lock); return 0; } static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata) { struct c4iw_pd *php = to_c4iw_pd(pd); struct ib_device *ibdev = pd->device; u32 pdid; struct c4iw_dev *rhp; pr_debug("ibdev %p\n", ibdev); rhp = (struct c4iw_dev *) ibdev; pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table); if (!pdid) return -EINVAL; php->pdid = pdid; php->rhp = rhp; if (udata) { struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid}; if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { c4iw_deallocate_pd(&php->ibpd, udata); return -EFAULT; } } mutex_lock(&rhp->rdev.stats.lock); rhp->rdev.stats.pd.cur++; if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max) rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur; mutex_unlock(&rhp->rdev.stats.lock); pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php); return 0; } static int c4iw_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid) { struct c4iw_dev *dev; pr_debug("ibdev %p, port %u, index %d, gid %p\n", ibdev, port, index, gid); if (!port) return -EINVAL; dev = to_c4iw_dev(ibdev); memset(&(gid->raw[0]), 0, sizeof(gid->raw)); memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6); return 0; } static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { struct c4iw_dev *dev; pr_debug("ibdev %p\n", ibdev); if (uhw->inlen || uhw->outlen) return -EINVAL; dev = to_c4iw_dev(ibdev); addrconf_addr_eui48((u8 *)&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr); props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); props->fw_ver = dev->rdev.lldi.fw_vers; props->device_cap_flags = IB_DEVICE_MEM_WINDOW; props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; if (fastreg_support) props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; props->page_size_cap = T4_PAGESIZE_MASK; props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor; props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device; props->max_mr_size = T4_MAX_MR_SIZE; props->max_qp = dev->rdev.lldi.vr->qp.size / 2; props->max_srq = dev->rdev.lldi.vr->srq.size; props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth; props->max_srq_wr = dev->rdev.hw_queue.t4_max_qp_depth; props->max_send_sge = min(T4_MAX_SEND_SGE, T4_MAX_WRITE_SGE); props->max_recv_sge = T4_MAX_RECV_SGE; props->max_srq_sge = T4_MAX_RECV_SGE; props->max_sge_rd = 1; props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter; props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth); props->max_qp_init_rd_atom = props->max_qp_rd_atom; props->max_cq = dev->rdev.lldi.vr->qp.size; props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth; props->max_mr = c4iw_num_stags(&dev->rdev); props->max_pd = T4_MAX_NUM_PD; props->local_ca_ack_delay = 0; props->max_fast_reg_page_list_len = t4_max_fr_depth(dev->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl); return 0; } static int c4iw_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { int ret = 0; pr_debug("ibdev %p\n", ibdev); ret = ib_get_eth_speed(ibdev, port, &props->active_speed, &props->active_width); props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_REINIT_SUP | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; props->gid_tbl_len = 1; props->max_msg_sz = -1; return ret; } static ssize_t hw_rev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct c4iw_dev *c4iw_dev = rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev); pr_debug("dev 0x%p\n", dev); return sysfs_emit( buf, "%d\n", CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type)); } static DEVICE_ATTR_RO(hw_rev); static ssize_t hca_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct c4iw_dev *c4iw_dev = rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev); struct ethtool_drvinfo info; struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0]; pr_debug("dev 0x%p\n", dev); lldev->ethtool_ops->get_drvinfo(lldev, &info); return sysfs_emit(buf, "%s\n", info.driver); } static DEVICE_ATTR_RO(hca_type); static ssize_t board_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct c4iw_dev *c4iw_dev = rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev); pr_debug("dev 0x%p\n", dev); return sysfs_emit(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor, c4iw_dev->rdev.lldi.pdev->device); } static DEVICE_ATTR_RO(board_id); enum counters { IP4INSEGS, IP4OUTSEGS, IP4RETRANSSEGS, IP4OUTRSTS, IP6INSEGS, IP6OUTSEGS, IP6RETRANSSEGS, IP6OUTRSTS, NR_COUNTERS }; static const struct rdma_stat_desc cxgb4_descs[] = { [IP4INSEGS].name = "ip4InSegs", [IP4OUTSEGS].name = "ip4OutSegs", [IP4RETRANSSEGS].name = "ip4RetransSegs", [IP4OUTRSTS].name = "ip4OutRsts", [IP6INSEGS].name = "ip6InSegs", [IP6OUTSEGS].name = "ip6OutSegs", [IP6RETRANSSEGS].name = "ip6RetransSegs", [IP6OUTRSTS].name = "ip6OutRsts" }; static struct rdma_hw_stats *c4iw_alloc_device_stats(struct ib_device *ibdev) { BUILD_BUG_ON(ARRAY_SIZE(cxgb4_descs) != NR_COUNTERS); /* FIXME: these look like port stats */ return rdma_alloc_hw_stats_struct(cxgb4_descs, NR_COUNTERS, RDMA_HW_STATS_DEFAULT_LIFESPAN); } static int c4iw_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port, int index) { struct tp_tcp_stats v4, v6; struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev); cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6); stats->value[IP4INSEGS] = v4.tcp_in_segs; stats->value[IP4OUTSEGS] = v4.tcp_out_segs; stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs; stats->value[IP4OUTRSTS] = v4.tcp_out_rsts; stats->value[IP6INSEGS] = v6.tcp_in_segs; stats->value[IP6OUTSEGS] = v6.tcp_out_segs; stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs; stats->value[IP6OUTRSTS] = v6.tcp_out_rsts; return stats->num_counters; } static struct attribute *c4iw_class_attributes[] = { &dev_attr_hw_rev.attr, &dev_attr_hca_type.attr, &dev_attr_board_id.attr, NULL }; static const struct attribute_group c4iw_attr_group = { .attrs = c4iw_class_attributes, }; static int c4iw_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; int err; immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; err = ib_query_port(ibdev, port_num, &attr); if (err) return err; immutable->gid_tbl_len = attr.gid_tbl_len; return 0; } static void get_dev_fw_str(struct ib_device *dev, char *str) { struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, ibdev); pr_debug("dev 0x%p\n", dev); snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u", FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers), FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers), FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers), FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers)); } static const struct ib_device_ops c4iw_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_CXGB4, .uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION, .alloc_hw_device_stats = c4iw_alloc_device_stats, .alloc_mr = c4iw_alloc_mr, .alloc_pd = c4iw_allocate_pd, .alloc_ucontext = c4iw_alloc_ucontext, .create_cq = c4iw_create_cq, .create_qp = c4iw_create_qp, .create_srq = c4iw_create_srq, .dealloc_pd = c4iw_deallocate_pd, .dealloc_ucontext = c4iw_dealloc_ucontext, .dereg_mr = c4iw_dereg_mr, .destroy_cq = c4iw_destroy_cq, .destroy_qp = c4iw_destroy_qp, .destroy_srq = c4iw_destroy_srq, .device_group = &c4iw_attr_group, .fill_res_cq_entry = c4iw_fill_res_cq_entry, .fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry, .fill_res_mr_entry = c4iw_fill_res_mr_entry, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = c4iw_get_dma_mr, .get_hw_stats = c4iw_get_mib, .get_port_immutable = c4iw_port_immutable, .iw_accept = c4iw_accept_cr, .iw_add_ref = c4iw_qp_add_ref, .iw_connect = c4iw_connect, .iw_create_listen = c4iw_create_listen, .iw_destroy_listen = c4iw_destroy_listen, .iw_get_qp = c4iw_get_qp, .iw_reject = c4iw_reject_cr, .iw_rem_ref = c4iw_qp_rem_ref, .map_mr_sg = c4iw_map_mr_sg, .mmap = c4iw_mmap, .modify_qp = c4iw_ib_modify_qp, .modify_srq = c4iw_modify_srq, .poll_cq = c4iw_poll_cq, .post_recv = c4iw_post_receive, .post_send = c4iw_post_send, .post_srq_recv = c4iw_post_srq_recv, .query_device = c4iw_query_device, .query_gid = c4iw_query_gid, .query_port = c4iw_query_port, .query_qp = c4iw_ib_query_qp, .reg_user_mr = c4iw_reg_user_mr, .req_notify_cq = c4iw_arm_cq, INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw), INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_qp, c4iw_qp, ibqp), INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext), }; static int set_netdevs(struct ib_device *ib_dev, struct c4iw_rdev *rdev) { int ret; int i; for (i = 0; i < rdev->lldi.nports; i++) { ret = ib_device_set_netdev(ib_dev, rdev->lldi.ports[i], i + 1); if (ret) return ret; } return 0; } void c4iw_register_device(struct work_struct *work) { int ret; struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work); struct c4iw_dev *dev = ctx->dev; pr_debug("c4iw_dev %p\n", dev); addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr); dev->ibdev.local_dma_lkey = 0; dev->ibdev.node_type = RDMA_NODE_RNIC; BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX); memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC)); dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports; dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq; dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev; memcpy(dev->ibdev.iw_ifname, dev->rdev.lldi.ports[0]->name, sizeof(dev->ibdev.iw_ifname)); ib_set_device_ops(&dev->ibdev, &c4iw_dev_ops); ret = set_netdevs(&dev->ibdev, &dev->rdev); if (ret) goto err_dealloc_ctx; dma_set_max_seg_size(&dev->rdev.lldi.pdev->dev, UINT_MAX); ret = ib_register_device(&dev->ibdev, "cxgb4_%d", &dev->rdev.lldi.pdev->dev); if (ret) goto err_dealloc_ctx; return; err_dealloc_ctx: pr_err("%s - Failed registering iwarp device: %d\n", pci_name(ctx->lldi.pdev), ret); c4iw_dealloc(ctx); return; } void c4iw_unregister_device(struct c4iw_dev *dev) { pr_debug("c4iw_dev %p\n", dev); ib_unregister_device(&dev->ibdev); return; }
linux-master
drivers/infiniband/hw/cxgb4/provider.c
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Crude resource management */ #include <linux/spinlock.h> #include <linux/genalloc.h> #include <linux/ratelimit.h> #include "iw_cxgb4.h" static int c4iw_init_qid_table(struct c4iw_rdev *rdev) { u32 i; if (c4iw_id_table_alloc(&rdev->resource.qid_table, rdev->lldi.vr->qp.start, rdev->lldi.vr->qp.size, rdev->lldi.vr->qp.size, 0)) return -ENOMEM; for (i = rdev->lldi.vr->qp.start; i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) if (!(i & rdev->qpmask)) c4iw_id_free(&rdev->resource.qid_table, i); return 0; } /* nr_* must be power of 2 */ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid, u32 nr_srqt) { int err = 0; err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1, C4IW_ID_TABLE_F_RANDOM); if (err) goto tpt_err; err = c4iw_init_qid_table(rdev); if (err) goto qid_err; err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, nr_pdid, 1, 0); if (err) goto pdid_err; if (!nr_srqt) err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0, 1, 1, 0); else err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0, nr_srqt, 0, 0); if (err) goto srq_err; return 0; srq_err: c4iw_id_table_free(&rdev->resource.pdid_table); pdid_err: c4iw_id_table_free(&rdev->resource.qid_table); qid_err: c4iw_id_table_free(&rdev->resource.tpt_table); tpt_err: return -ENOMEM; } /* * returns 0 if no resource available */ u32 c4iw_get_resource(struct c4iw_id_table *id_table) { u32 entry; entry = c4iw_id_alloc(id_table); if (entry == (u32)(-1)) return 0; return entry; } void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) { pr_debug("entry 0x%x\n", entry); c4iw_id_free(id_table, entry); } u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; u32 qid; int i; mutex_lock(&uctx->lock); if (!list_empty(&uctx->cqids)) { entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, entry); list_del(&entry->entry); qid = entry->qid; kfree(entry); } else { qid = c4iw_get_resource(&rdev->resource.qid_table); if (!qid) goto out; mutex_lock(&rdev->stats.lock); rdev->stats.qid.cur += rdev->qpmask + 1; mutex_unlock(&rdev->stats.lock); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->cqids); } /* * now put the same ids on the qp list since they all * map to the same db/gts page. */ entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto out; entry->qid = qid; list_add_tail(&entry->entry, &uctx->qpids); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->qpids); } } out: mutex_unlock(&uctx->lock); pr_debug("qid 0x%x\n", qid); mutex_lock(&rdev->stats.lock); if (rdev->stats.qid.cur > rdev->stats.qid.max) rdev->stats.qid.max = rdev->stats.qid.cur; mutex_unlock(&rdev->stats.lock); return qid; } void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return; pr_debug("qid 0x%x\n", qid); entry->qid = qid; mutex_lock(&uctx->lock); list_add_tail(&entry->entry, &uctx->cqids); mutex_unlock(&uctx->lock); } u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; u32 qid; int i; mutex_lock(&uctx->lock); if (!list_empty(&uctx->qpids)) { entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, entry); list_del(&entry->entry); qid = entry->qid; kfree(entry); } else { qid = c4iw_get_resource(&rdev->resource.qid_table); if (!qid) { mutex_lock(&rdev->stats.lock); rdev->stats.qid.fail++; mutex_unlock(&rdev->stats.lock); goto out; } mutex_lock(&rdev->stats.lock); rdev->stats.qid.cur += rdev->qpmask + 1; mutex_unlock(&rdev->stats.lock); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->qpids); } /* * now put the same ids on the cq list since they all * map to the same db/gts page. */ entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto out; entry->qid = qid; list_add_tail(&entry->entry, &uctx->cqids); for (i = qid + 1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->cqids); } } out: mutex_unlock(&uctx->lock); pr_debug("qid 0x%x\n", qid); mutex_lock(&rdev->stats.lock); if (rdev->stats.qid.cur > rdev->stats.qid.max) rdev->stats.qid.max = rdev->stats.qid.cur; mutex_unlock(&rdev->stats.lock); return qid; } void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return; pr_debug("qid 0x%x\n", qid); entry->qid = qid; mutex_lock(&uctx->lock); list_add_tail(&entry->entry, &uctx->qpids); mutex_unlock(&uctx->lock); } void c4iw_destroy_resource(struct c4iw_resource *rscp) { c4iw_id_table_free(&rscp->tpt_table); c4iw_id_table_free(&rscp->qid_table); c4iw_id_table_free(&rscp->pdid_table); } /* * PBL Memory Manager. Uses Linux generic allocator. */ #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); pr_debug("addr 0x%x size %d\n", (u32)addr, size); mutex_lock(&rdev->stats.lock); if (addr) { rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); if (rdev->stats.pbl.cur > rdev->stats.pbl.max) rdev->stats.pbl.max = rdev->stats.pbl.cur; kref_get(&rdev->pbl_kref); } else rdev->stats.pbl.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } static void destroy_pblpool(struct kref *kref) { struct c4iw_rdev *rdev; rdev = container_of(kref, struct c4iw_rdev, pbl_kref); gen_pool_destroy(rdev->pbl_pool); complete(&rdev->pbl_compl); } void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { pr_debug("addr 0x%x size %d\n", addr, size); mutex_lock(&rdev->stats.lock); rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); kref_put(&rdev->pbl_kref, destroy_pblpool); } int c4iw_pblpool_create(struct c4iw_rdev *rdev) { unsigned pbl_start, pbl_chunk, pbl_top; rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); if (!rdev->pbl_pool) return -ENOMEM; pbl_start = rdev->lldi.vr->pbl.start; pbl_chunk = rdev->lldi.vr->pbl.size; pbl_top = pbl_start + pbl_chunk; while (pbl_start < pbl_top) { pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk); if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { pr_debug("failed to add PBL chunk (%x/%x)\n", pbl_start, pbl_chunk); if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { pr_warn("Failed to add all PBL chunks (%x/%x)\n", pbl_start, pbl_top - pbl_start); return 0; } pbl_chunk >>= 1; } else { pr_debug("added PBL chunk (%x/%x)\n", pbl_start, pbl_chunk); pbl_start += pbl_chunk; } } return 0; } void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) { kref_put(&rdev->pbl_kref, destroy_pblpool); } /* * RQT Memory Manager. Uses Linux generic allocator. */ #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6); if (!addr) pr_warn_ratelimited("%s: Out of RQT memory\n", pci_name(rdev->lldi.pdev)); mutex_lock(&rdev->stats.lock); if (addr) { rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); if (rdev->stats.rqt.cur > rdev->stats.rqt.max) rdev->stats.rqt.max = rdev->stats.rqt.cur; kref_get(&rdev->rqt_kref); } else rdev->stats.rqt.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } static void destroy_rqtpool(struct kref *kref) { struct c4iw_rdev *rdev; rdev = container_of(kref, struct c4iw_rdev, rqt_kref); gen_pool_destroy(rdev->rqt_pool); complete(&rdev->rqt_compl); } void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { pr_debug("addr 0x%x size %d\n", addr, size << 6); mutex_lock(&rdev->stats.lock); rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); kref_put(&rdev->rqt_kref, destroy_rqtpool); } int c4iw_rqtpool_create(struct c4iw_rdev *rdev) { unsigned rqt_start, rqt_chunk, rqt_top; int skip = 0; rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1); if (!rdev->rqt_pool) return -ENOMEM; /* * If SRQs are supported, then never use the first RQE from * the RQT region. This is because HW uses RQT index 0 as NULL. */ if (rdev->lldi.vr->srq.size) skip = T4_RQT_ENTRY_SIZE; rqt_start = rdev->lldi.vr->rq.start + skip; rqt_chunk = rdev->lldi.vr->rq.size - skip; rqt_top = rqt_start + rqt_chunk; while (rqt_start < rqt_top) { rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk); if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { pr_debug("failed to add RQT chunk (%x/%x)\n", rqt_start, rqt_chunk); if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { pr_warn("Failed to add all RQT chunks (%x/%x)\n", rqt_start, rqt_top - rqt_start); return 0; } rqt_chunk >>= 1; } else { pr_debug("added RQT chunk (%x/%x)\n", rqt_start, rqt_chunk); rqt_start += rqt_chunk; } } return 0; } void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) { kref_put(&rdev->rqt_kref, destroy_rqtpool); } int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev) { int idx; idx = c4iw_id_alloc(&rdev->resource.srq_table); mutex_lock(&rdev->stats.lock); if (idx == -1) { rdev->stats.srqt.fail++; mutex_unlock(&rdev->stats.lock); return -ENOMEM; } rdev->stats.srqt.cur++; if (rdev->stats.srqt.cur > rdev->stats.srqt.max) rdev->stats.srqt.max = rdev->stats.srqt.cur; mutex_unlock(&rdev->stats.lock); return idx; } void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx) { c4iw_id_free(&rdev->resource.srq_table, idx); mutex_lock(&rdev->stats.lock); rdev->stats.srqt.cur--; mutex_unlock(&rdev->stats.lock); } /* * On-Chip QP Memory. */ #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); pr_debug("addr 0x%x size %d\n", (u32)addr, size); if (addr) { mutex_lock(&rdev->stats.lock); rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max) rdev->stats.ocqp.max = rdev->stats.ocqp.cur; mutex_unlock(&rdev->stats.lock); } return (u32)addr; } void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) { pr_debug("addr 0x%x size %d\n", addr, size); mutex_lock(&rdev->stats.lock); rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); } int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) { unsigned start, chunk, top; rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1); if (!rdev->ocqp_pool) return -ENOMEM; start = rdev->lldi.vr->ocq.start; chunk = rdev->lldi.vr->ocq.size; top = start + chunk; while (start < top) { chunk = min(top - start + 1, chunk); if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { pr_debug("failed to add OCQP chunk (%x/%x)\n", start, chunk); if (chunk <= 1024 << MIN_OCQP_SHIFT) { pr_warn("Failed to add all OCQP chunks (%x/%x)\n", start, top - start); return 0; } chunk >>= 1; } else { pr_debug("added OCQP chunk (%x/%x)\n", start, chunk); start += chunk; } } return 0; } void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev) { gen_pool_destroy(rdev->ocqp_pool); }
linux-master
drivers/infiniband/hw/cxgb4/resource.c
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <rdma/ib_umem.h> #include <linux/atomic.h> #include <rdma/ib_user_verbs.h> #include "iw_cxgb4.h" int use_dsgl = 1; module_param(use_dsgl, int, 0644); MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1) (DEPRECATED)"); #define T4_ULPTX_MIN_IO 32 #define C4IW_MAX_INLINE_SIZE 96 #define T4_ULPTX_MAX_DMA 1024 #define C4IW_INLINE_THRESHOLD 128 static int inline_threshold = C4IW_INLINE_THRESHOLD; module_param(inline_threshold, int, 0644); MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)"); static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length) { return (is_t4(dev->rdev.lldi.adapter_type) || is_t5(dev->rdev.lldi.adapter_type)) && length >= 8*1024*1024*1024ULL; } static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len, dma_addr_t data, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) { struct ulp_mem_io *req; struct ulptx_sgl *sgl; u8 wr_len; int ret = 0; addr &= 0x7FFFFFF; if (wr_waitp) c4iw_init_wr_wait(wr_waitp); wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); if (!skb) { skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); if (!skb) return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); req = __skb_put_zero(skb, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | (wr_waitp ? FW_WR_COMPL_F : 0)); req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L; req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | T5_ULP_MEMIO_ORDER_V(1) | T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0])); req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5)); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr)); sgl = (struct ulptx_sgl *)(req + 1); sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE_V(1)); sgl->len0 = cpu_to_be32(len); sgl->addr0 = cpu_to_be64(data); if (wr_waitp) ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); else ret = c4iw_ofld_send(rdev, skb); return ret; } static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) { struct ulp_mem_io *req; struct ulptx_idata *sc; u8 wr_len, *to_dp, *from_dp; int copy_len, num_wqe, i, ret = 0; __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE)); if (is_t4(rdev->lldi.adapter_type)) cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F); else cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F); addr &= 0x7FFFFFF; pr_debug("addr 0x%x len %u\n", addr, len); num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); c4iw_init_wr_wait(wr_waitp); for (i = 0; i < num_wqe; i++) { copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE : len; wr_len = roundup(sizeof(*req) + sizeof(*sc) + roundup(copy_len, T4_ULPTX_MIN_IO), 16); if (!skb) { skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); if (!skb) return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); req = __skb_put_zero(skb, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); if (i == (num_wqe-1)) { req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | FW_WR_COMPL_F); req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp; } else req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); req->wr.wr_mid = cpu_to_be32( FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); req->cmd = cmd; req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V( DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3)); sc = (struct ulptx_idata *)(req + 1); sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM)); sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); to_dp = (u8 *)(sc + 1); from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE; if (data) memcpy(to_dp, from_dp, copy_len); else memset(to_dp, 0, copy_len); if (copy_len % T4_ULPTX_MIN_IO) memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - (copy_len % T4_ULPTX_MIN_IO)); if (i == (num_wqe-1)) ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); else ret = c4iw_ofld_send(rdev, skb); if (ret) break; skb = NULL; len -= C4IW_MAX_INLINE_SIZE; } return ret; } static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) { u32 remain = len; u32 dmalen; int ret = 0; dma_addr_t daddr; dma_addr_t save; daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE); if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr)) return -1; save = daddr; while (remain > inline_threshold) { if (remain < T4_ULPTX_MAX_DMA) { if (remain & ~T4_ULPTX_MIN_IO) dmalen = remain & ~(T4_ULPTX_MIN_IO-1); else dmalen = remain; } else dmalen = T4_ULPTX_MAX_DMA; remain -= dmalen; ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr, skb, remain ? NULL : wr_waitp); if (ret) goto out; addr += dmalen >> 5; data += dmalen; daddr += dmalen; } if (remain) ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb, wr_waitp); out: dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE); return ret; } /* * write len bytes of data into addr (32B aligned address) * If data is NULL, clear len byte of memory to zero. */ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) { int ret; if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) { ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb, wr_waitp); goto out; } if (len <= inline_threshold) { ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb, wr_waitp); goto out; } ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp); if (ret) { pr_warn_ratelimited("%s: dma map failure (non fatal)\n", pci_name(rdev->lldi.pdev)); ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb, wr_waitp); } out: return ret; } /* * Build and write a TPT entry. * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, * pbl_size and pbl_addr * OUT: stag index */ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, u32 *stag, u8 stag_state, u32 pdid, enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, int bind_enabled, u32 zbva, u64 to, u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) { int err; struct fw_ri_tpte *tpt; u32 stag_idx; static atomic_t key; if (c4iw_fatal_error(rdev)) return -EIO; tpt = kmalloc(sizeof(*tpt), GFP_KERNEL); if (!tpt) return -ENOMEM; stag_state = stag_state > 0; stag_idx = (*stag) >> 8; if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); if (!stag_idx) { mutex_lock(&rdev->stats.lock); rdev->stats.stag.fail++; mutex_unlock(&rdev->stats.lock); kfree(tpt); return -ENOMEM; } mutex_lock(&rdev->stats.lock); rdev->stats.stag.cur += 32; if (rdev->stats.stag.cur > rdev->stats.stag.max) rdev->stats.stag.max = rdev->stats.stag.cur; mutex_unlock(&rdev->stats.lock); *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); } pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", stag_state, type, pdid, stag_idx); /* write TPT entry */ if (reset_tpt_entry) memset(tpt, 0, sizeof(*tpt)); else { tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) | FW_RI_TPTE_STAGSTATE_V(stag_state) | FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid)); tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) | (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) | FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO : FW_RI_VA_BASED_TO))| FW_RI_TPTE_PS_V(page_size)); tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3)); tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); tpt->va_hi = cpu_to_be32((u32)(to >> 32)); tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); tpt->dca_mwbcnt_pstag = cpu_to_be32(0); tpt->len_hi = cpu_to_be32((u32)(len >> 32)); } err = write_adapter_mem(rdev, stag_idx + (rdev->lldi.vr->stag.start >> 5), sizeof(*tpt), tpt, skb, wr_waitp); if (reset_tpt_entry) { c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); mutex_lock(&rdev->stats.lock); rdev->stats.stag.cur -= 32; mutex_unlock(&rdev->stats.lock); } kfree(tpt); return err; } static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp) { int err; pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", pbl_addr, rdev->lldi.vr->pbl.start, pbl_size); err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL, wr_waitp); return err; } static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, u32 pbl_addr, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) { return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, pbl_size, pbl_addr, skb, wr_waitp); } static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr, struct c4iw_wr_wait *wr_waitp) { *stag = T4_STAG_UNSET; return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, 0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp); } static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) { u32 mmid; mhp->attr.state = 1; mhp->attr.stag = stag; mmid = stag >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; mhp->ibmr.length = mhp->attr.len; mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12); pr_debug("mmid 0x%x mhp %p\n", mmid, mhp); return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL); } static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, struct c4iw_mr *mhp, int shift) { u32 stag = T4_STAG_UNSET; int ret; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0, mhp->attr.mw_bind_enable, mhp->attr.zbva, mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12, mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL, mhp->wr_waitp); if (ret) return ret; ret = finish_mem_reg(mhp, stag); if (ret) { dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); mhp->dereg_skb = NULL; } return ret; } static int alloc_pbl(struct c4iw_mr *mhp, int npages) { mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, npages << 3); if (!mhp->attr.pbl_addr) return -ENOMEM; mhp->attr.pbl_size = npages; return 0; } struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; int ret; u32 stag = T4_STAG_UNSET; pr_debug("ib_pd %p\n", pd); php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); if (!mhp->wr_waitp) { ret = -ENOMEM; goto err_free_mhp; } c4iw_init_wr_wait(mhp->wr_waitp); mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); if (!mhp->dereg_skb) { ret = -ENOMEM; goto err_free_wr_wait; } mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; mhp->attr.zbva = 0; mhp->attr.va_fbo = 0; mhp->attr.page_size = 0; mhp->attr.len = ~0ULL; mhp->attr.pbl_size = 0; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, FW_RI_STAG_NSMR, mhp->attr.perms, mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0, NULL, mhp->wr_waitp); if (ret) goto err_free_skb; ret = finish_mem_reg(mhp, stag); if (ret) goto err_dereg_mem; return &mhp->ibmr; err_dereg_mem: dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); err_free_skb: kfree_skb(mhp->dereg_skb); err_free_wr_wait: c4iw_put_wr_wait(mhp->wr_waitp); err_free_mhp: kfree(mhp); return ERR_PTR(ret); } struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata) { __be64 *pages; int shift, n, i; int err = -ENOMEM; struct ib_block_iter biter; struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; pr_debug("ib_pd %p\n", pd); if (length == ~0ULL) return ERR_PTR(-EINVAL); if ((length + start) < start) return ERR_PTR(-EINVAL); php = to_c4iw_pd(pd); rhp = php->rhp; if (mr_exceeds_hw_limits(rhp, length)) return ERR_PTR(-EINVAL); mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); if (!mhp->wr_waitp) goto err_free_mhp; mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); if (!mhp->dereg_skb) goto err_free_wr_wait; mhp->rhp = rhp; mhp->umem = ib_umem_get(pd->device, start, length, acc); if (IS_ERR(mhp->umem)) goto err_free_skb; shift = PAGE_SHIFT; n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift); err = alloc_pbl(mhp, n); if (err) goto err_umem_release; pages = (__be64 *) __get_free_page(GFP_KERNEL); if (!pages) { err = -ENOMEM; goto err_pbl_free; } i = n = 0; rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) { pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter)); if (i == PAGE_SIZE / sizeof(*pages)) { err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i, mhp->wr_waitp); if (err) goto pbl_done; n += i; i = 0; } } if (i) err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i, mhp->wr_waitp); pbl_done: free_page((unsigned long) pages); if (err) goto err_pbl_free; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); mhp->attr.va_fbo = virt; mhp->attr.page_size = shift - 12; mhp->attr.len = length; err = register_mem(rhp, php, mhp, shift); if (err) goto err_pbl_free; return &mhp->ibmr; err_pbl_free: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err_umem_release: ib_umem_release(mhp->umem); err_free_skb: kfree_skb(mhp->dereg_skb); err_free_wr_wait: c4iw_put_wr_wait(mhp->wr_waitp); err_free_mhp: kfree(mhp); return ERR_PTR(err); } struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; u32 mmid; u32 stag = 0; int ret = 0; int length = roundup(max_num_sg * sizeof(u64), 32); php = to_c4iw_pd(pd); rhp = php->rhp; if (mr_type != IB_MR_TYPE_MEM_REG || max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl)) return ERR_PTR(-EINVAL); mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) { ret = -ENOMEM; goto err; } mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); if (!mhp->wr_waitp) { ret = -ENOMEM; goto err_free_mhp; } c4iw_init_wr_wait(mhp->wr_waitp); mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev, length, &mhp->mpl_addr, GFP_KERNEL); if (!mhp->mpl) { ret = -ENOMEM; goto err_free_wr_wait; } mhp->max_mpl_len = length; mhp->rhp = rhp; ret = alloc_pbl(mhp, max_num_sg); if (ret) goto err_free_dma; mhp->attr.pbl_size = max_num_sg; ret = allocate_stag(&rhp->rdev, &stag, php->pdid, mhp->attr.pbl_size, mhp->attr.pbl_addr, mhp->wr_waitp); if (ret) goto err_free_pbl; mhp->attr.pdid = php->pdid; mhp->attr.type = FW_RI_STAG_NSMR; mhp->attr.stag = stag; mhp->attr.state = 0; mmid = (stag) >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { ret = -ENOMEM; goto err_dereg; } pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag); return &(mhp->ibmr); err_dereg: dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); err_free_pbl: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err_free_dma: dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); err_free_wr_wait: c4iw_put_wr_wait(mhp->wr_waitp); err_free_mhp: kfree(mhp); err: return ERR_PTR(ret); } static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) { struct c4iw_mr *mhp = to_c4iw_mr(ibmr); if (unlikely(mhp->mpl_len == mhp->attr.pbl_size)) return -ENOMEM; mhp->mpl[mhp->mpl_len++] = addr; return 0; } int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct c4iw_mr *mhp = to_c4iw_mr(ibmr); mhp->mpl_len = 0; return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); } int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_mr *mhp; u32 mmid; pr_debug("ib_mr %p\n", ib_mr); mhp = to_c4iw_mr(ib_mr); rhp = mhp->rhp; mmid = mhp->attr.stag >> 8; xa_erase_irq(&rhp->mrs, mmid); if (mhp->mpl) dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); if (mhp->attr.pbl_size) c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); if (mhp->kva) kfree((void *) (unsigned long) mhp->kva); ib_umem_release(mhp->umem); pr_debug("mmid 0x%x ptr %p\n", mmid, mhp); c4iw_put_wr_wait(mhp->wr_waitp); kfree(mhp); return 0; } void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) { struct c4iw_mr *mhp; unsigned long flags; xa_lock_irqsave(&rhp->mrs, flags); mhp = xa_load(&rhp->mrs, rkey >> 8); if (mhp) mhp->attr.state = 0; xa_unlock_irqrestore(&rhp->mrs, flags); }
linux-master
drivers/infiniband/hw/cxgb4/mem.c
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <rdma/uverbs_ioctl.h> #include "iw_cxgb4.h" static int db_delay_usecs = 1; module_param(db_delay_usecs, int, 0644); MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain"); static int ocqp_support = 1; module_param(ocqp_support, int, 0644); MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); int db_fc_threshold = 1000; module_param(db_fc_threshold, int, 0644); MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers" " automatic db flow control mode (default = 1000)"); int db_coalescing_threshold; module_param(db_coalescing_threshold, int, 0644); MODULE_PARM_DESC(db_coalescing_threshold, "QP count/threshold that triggers" " disabling db coalescing (default = 0)"); static int max_fr_immd = T4_MAX_FR_IMMD; module_param(max_fr_immd, int, 0644); MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate"); static int alloc_ird(struct c4iw_dev *dev, u32 ird) { int ret = 0; xa_lock_irq(&dev->qps); if (ird <= dev->avail_ird) dev->avail_ird -= ird; else ret = -ENOMEM; xa_unlock_irq(&dev->qps); if (ret) dev_warn(&dev->rdev.lldi.pdev->dev, "device IRD resources exhausted\n"); return ret; } static void free_ird(struct c4iw_dev *dev, int ird) { xa_lock_irq(&dev->qps); dev->avail_ird += ird; xa_unlock_irq(&dev->qps); } static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) { unsigned long flag; spin_lock_irqsave(&qhp->lock, flag); qhp->attr.state = state; spin_unlock_irqrestore(&qhp->lock, flag); } static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) { c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); } static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) { dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, dma_unmap_addr(sq, mapping)); } static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) { if (t4_sq_onchip(sq)) dealloc_oc_sq(rdev, sq); else dealloc_host_sq(rdev, sq); } static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) { if (!ocqp_support || !ocqp_supported(&rdev->lldi)) return -ENOSYS; sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); if (!sq->dma_addr) return -ENOMEM; sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr - rdev->lldi.vr->ocq.start; sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr - rdev->lldi.vr->ocq.start); sq->flags |= T4_SQ_ONCHIP; return 0; } static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) { sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize, &(sq->dma_addr), GFP_KERNEL); if (!sq->queue) return -ENOMEM; sq->phys_addr = virt_to_phys(sq->queue); dma_unmap_addr_set(sq, mapping, sq->dma_addr); return 0; } static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) { int ret = -ENOSYS; if (user) ret = alloc_oc_sq(rdev, sq); if (ret) ret = alloc_host_sq(rdev, sq); return ret; } static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct c4iw_dev_ucontext *uctx, int has_rq) { /* * uP clears EQ contexts when the connection exits rdma mode, * so no need to post a RESET WR for these EQs. */ dealloc_sq(rdev, &wq->sq); kfree(wq->sq.sw_sq); c4iw_put_qpid(rdev, wq->sq.qid, uctx); if (has_rq) { dma_free_coherent(&rdev->lldi.pdev->dev, wq->rq.memsize, wq->rq.queue, dma_unmap_addr(&wq->rq, mapping)); c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); kfree(wq->rq.sw_rq); c4iw_put_qpid(rdev, wq->rq.qid, uctx); } return 0; } /* * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL, * then this is a user mapping so compute the page-aligned physical address * for mapping. */ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid, enum cxgb4_bar2_qtype qtype, unsigned int *pbar2_qid, u64 *pbar2_pa) { u64 bar2_qoffset; int ret; ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype, pbar2_pa ? 1 : 0, &bar2_qoffset, pbar2_qid); if (ret) return NULL; if (pbar2_pa) *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; if (is_t4(rdev->lldi.adapter_type)) return NULL; return rdev->bar2_kva + bar2_qoffset; } static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp, int need_rq) { int user = (uctx != &rdev->uctx); struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; struct sk_buff *skb; int ret = 0; int eqsize; wq->sq.qid = c4iw_get_qpid(rdev, uctx); if (!wq->sq.qid) return -ENOMEM; if (need_rq) { wq->rq.qid = c4iw_get_qpid(rdev, uctx); if (!wq->rq.qid) { ret = -ENOMEM; goto free_sq_qid; } } if (!user) { wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq), GFP_KERNEL); if (!wq->sq.sw_sq) { ret = -ENOMEM; goto free_rq_qid;//FIXME } if (need_rq) { wq->rq.sw_rq = kcalloc(wq->rq.size, sizeof(*wq->rq.sw_rq), GFP_KERNEL); if (!wq->rq.sw_rq) { ret = -ENOMEM; goto free_sw_sq; } } } if (need_rq) { /* * RQT must be a power of 2 and at least 16 deep. */ wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); if (!wq->rq.rqt_hwaddr) { ret = -ENOMEM; goto free_sw_rq; } } ret = alloc_sq(rdev, &wq->sq, user); if (ret) goto free_hwaddr; memset(wq->sq.queue, 0, wq->sq.memsize); dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); if (need_rq) { wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->rq.memsize, &wq->rq.dma_addr, GFP_KERNEL); if (!wq->rq.queue) { ret = -ENOMEM; goto free_sq; } pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", wq->sq.queue, (unsigned long long)virt_to_phys(wq->sq.queue), wq->rq.queue, (unsigned long long)virt_to_phys(wq->rq.queue)); dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); } wq->db = rdev->lldi.db_reg; wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, CXGB4_BAR2_QTYPE_EGRESS, &wq->sq.bar2_qid, user ? &wq->sq.bar2_pa : NULL); if (need_rq) wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, CXGB4_BAR2_QTYPE_EGRESS, &wq->rq.bar2_qid, user ? &wq->rq.bar2_pa : NULL); /* * User mode must have bar2 access. */ if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) { pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n", pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); ret = -EINVAL; goto free_dma; } wq->rdev = rdev; wq->rq.msn = 1; /* build fw_ri_res_wr */ wr_len = sizeof(*res_wr) + 2 * sizeof(*res); if (need_rq) wr_len += sizeof(*res); skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto free_dma; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (uintptr_t)wr_waitp; res = res_wr->res; res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; res->u.sqrq.op = FW_RI_RES_OP_WRITE; /* * eqsize is the number of 64B entries plus the status page size. */ eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + rdev->hw_queue.t4_eq_status_entries; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */ FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */ FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */ (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | FW_RI_RES_WR_IQID_V(scq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( FW_RI_RES_WR_DCAEN_V(0) | FW_RI_RES_WR_DCACPU_V(0) | FW_RI_RES_WR_FBMIN_V(2) | (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) : FW_RI_RES_WR_FBMAX_V(3)) | FW_RI_RES_WR_CIDXFTHRESHO_V(0) | FW_RI_RES_WR_CIDXFTHRESH_V(0) | FW_RI_RES_WR_EQSIZE_V(eqsize)); res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); if (need_rq) { res++; res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; res->u.sqrq.op = FW_RI_RES_OP_WRITE; /* * eqsize is the number of 64B entries plus the status page size */ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + rdev->hw_queue.t4_eq_status_entries; res->u.sqrq.fetchszm_to_iqid = /* no host cidx updates */ cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) | /* don't keep in chip cache */ FW_RI_RES_WR_CPRIO_V(0) | /* set by uP at ri_init time */ FW_RI_RES_WR_PCIECHN_V(0) | FW_RI_RES_WR_IQID_V(rcq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) | FW_RI_RES_WR_DCACPU_V(0) | FW_RI_RES_WR_FBMIN_V(2) | FW_RI_RES_WR_FBMAX_V(3) | FW_RI_RES_WR_CIDXFTHRESHO_V(0) | FW_RI_RES_WR_CIDXFTHRESH_V(0) | FW_RI_RES_WR_EQSIZE_V(eqsize)); res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); } c4iw_init_wr_wait(wr_waitp); ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__); if (ret) goto free_dma; pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n", wq->sq.qid, wq->rq.qid, wq->db, wq->sq.bar2_va, wq->rq.bar2_va); return 0; free_dma: if (need_rq) dma_free_coherent(&rdev->lldi.pdev->dev, wq->rq.memsize, wq->rq.queue, dma_unmap_addr(&wq->rq, mapping)); free_sq: dealloc_sq(rdev, &wq->sq); free_hwaddr: if (need_rq) c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); free_sw_rq: if (need_rq) kfree(wq->rq.sw_rq); free_sw_sq: kfree(wq->sq.sw_sq); free_rq_qid: if (need_rq) c4iw_put_qpid(rdev, wq->rq.qid, uctx); free_sq_qid: c4iw_put_qpid(rdev, wq->sq.qid, uctx); return ret; } static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, const struct ib_send_wr *wr, int max, u32 *plenp) { u8 *dstp, *srcp; u32 plen = 0; int i; int rem, len; dstp = (u8 *)immdp->data; for (i = 0; i < wr->num_sge; i++) { if ((plen + wr->sg_list[i].length) > max) return -EMSGSIZE; srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; plen += wr->sg_list[i].length; rem = wr->sg_list[i].length; while (rem) { if (dstp == (u8 *)&sq->queue[sq->size]) dstp = (u8 *)sq->queue; if (rem <= (u8 *)&sq->queue[sq->size] - dstp) len = rem; else len = (u8 *)&sq->queue[sq->size] - dstp; memcpy(dstp, srcp, len); dstp += len; srcp += len; rem -= len; } } len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp)); if (len) memset(dstp, 0, len); immdp->op = FW_RI_DATA_IMMD; immdp->r1 = 0; immdp->r2 = 0; immdp->immdlen = cpu_to_be32(plen); *plenp = plen; return 0; } static int build_isgl(__be64 *queue_start, __be64 *queue_end, struct fw_ri_isgl *isglp, struct ib_sge *sg_list, int num_sge, u32 *plenp) { int i; u32 plen = 0; __be64 *flitp; if ((__be64 *)isglp == queue_end) isglp = (struct fw_ri_isgl *)queue_start; flitp = (__be64 *)isglp->sge; for (i = 0; i < num_sge; i++) { if ((plen + sg_list[i].length) < plen) return -EMSGSIZE; plen += sg_list[i].length; *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | sg_list[i].length); if (++flitp == queue_end) flitp = queue_start; *flitp = cpu_to_be64(sg_list[i].addr); if (++flitp == queue_end) flitp = queue_start; } *flitp = (__force __be64)0; isglp->op = FW_RI_DATA_ISGL; isglp->r1 = 0; isglp->nsge = cpu_to_be16(num_sge); isglp->r2 = 0; if (plenp) *plenp = plen; return 0; } static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) { u32 plen; int size; int ret; if (wr->num_sge > T4_MAX_SEND_SGE) return -EINVAL; switch (wr->opcode) { case IB_WR_SEND: if (wr->send_flags & IB_SEND_SOLICITED) wqe->send.sendop_pkd = cpu_to_be32( FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE)); else wqe->send.sendop_pkd = cpu_to_be32( FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND)); wqe->send.stag_inv = 0; break; case IB_WR_SEND_WITH_INV: if (wr->send_flags & IB_SEND_SOLICITED) wqe->send.sendop_pkd = cpu_to_be32( FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV)); else wqe->send.sendop_pkd = cpu_to_be32( FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV)); wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); break; default: return -EINVAL; } wqe->send.r3 = 0; wqe->send.r4 = 0; plen = 0; if (wr->num_sge) { if (wr->send_flags & IB_SEND_INLINE) { ret = build_immd(sq, wqe->send.u.immd_src, wr, T4_MAX_SEND_INLINE, &plen); if (ret) return ret; size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) + plen; } else { ret = build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], wqe->send.u.isgl_src, wr->sg_list, wr->num_sge, &plen); if (ret) return ret; size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) + wr->num_sge * sizeof(struct fw_ri_sge); } } else { wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; wqe->send.u.immd_src[0].r1 = 0; wqe->send.u.immd_src[0].r2 = 0; wqe->send.u.immd_src[0].immdlen = 0; size = sizeof(wqe->send) + sizeof(struct fw_ri_immd); plen = 0; } *len16 = DIV_ROUND_UP(size, 16); wqe->send.plen = cpu_to_be32(plen); return 0; } static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) { u32 plen; int size; int ret; if (wr->num_sge > T4_MAX_SEND_SGE) return -EINVAL; /* * iWARP protocol supports 64 bit immediate data but rdma api * limits it to 32bit. */ if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data; else wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0; wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); if (wr->num_sge) { if (wr->send_flags & IB_SEND_INLINE) { ret = build_immd(sq, wqe->write.u.immd_src, wr, T4_MAX_WRITE_INLINE, &plen); if (ret) return ret; size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) + plen; } else { ret = build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], wqe->write.u.isgl_src, wr->sg_list, wr->num_sge, &plen); if (ret) return ret; size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) + wr->num_sge * sizeof(struct fw_ri_sge); } } else { wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; wqe->write.u.immd_src[0].r1 = 0; wqe->write.u.immd_src[0].r2 = 0; wqe->write.u.immd_src[0].immdlen = 0; size = sizeof(wqe->write) + sizeof(struct fw_ri_immd); plen = 0; } *len16 = DIV_ROUND_UP(size, 16); wqe->write.plen = cpu_to_be32(plen); return 0; } static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp, struct ib_send_wr *wr) { memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16); memset(immdp->r1, 0, 6); immdp->op = FW_RI_DATA_IMMD; immdp->immdlen = 16; } static void build_rdma_write_cmpl(struct t4_sq *sq, struct fw_ri_rdma_write_cmpl_wr *wcwr, const struct ib_send_wr *wr, u8 *len16) { u32 plen; int size; /* * This code assumes the struct fields preceding the write isgl * fit in one 64B WR slot. This is because the WQE is built * directly in the dma queue, and wrapping is only handled * by the code buildling sgls. IE the "fixed part" of the wr * structs must all fit in 64B. The WQE build code should probably be * redesigned to avoid this restriction, but for now just add * the BUILD_BUG_ON() to catch if this WQE struct gets too big. */ BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr, u) > 64); wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); if (wr->next->opcode == IB_WR_SEND) wcwr->stag_inv = 0; else wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey); wcwr->r2 = 0; wcwr->r3 = 0; /* SEND_INV SGL */ if (wr->next->send_flags & IB_SEND_INLINE) build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next); else build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL); /* WRITE SGL */ build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen); size = sizeof(*wcwr) + sizeof(struct fw_ri_isgl) + wr->num_sge * sizeof(struct fw_ri_sge); wcwr->plen = cpu_to_be32(plen); *len16 = DIV_ROUND_UP(size, 16); } static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) { if (wr->num_sge > 1) return -EINVAL; if (wr->num_sge && wr->sg_list[0].length) { wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr >> 32)); wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr); wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr >> 32)); wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); } else { wqe->read.stag_src = cpu_to_be32(2); wqe->read.to_src_hi = 0; wqe->read.to_src_lo = 0; wqe->read.stag_sink = cpu_to_be32(2); wqe->read.plen = 0; wqe->read.to_sink_hi = 0; wqe->read.to_sink_lo = 0; } wqe->read.r2 = 0; wqe->read.r5 = 0; *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16); return 0; } static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr) { bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) || qhp->sq_sig_all; bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) || qhp->sq_sig_all; struct t4_swsqe *swsqe; union t4_wr *wqe; u16 write_wrid; u8 len16; u16 idx; /* * The sw_sq entries still look like a WRITE and a SEND and consume * 2 slots. The FW WR, however, will be a single uber-WR. */ wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16); /* WRITE swsqe */ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; swsqe->opcode = FW_RI_RDMA_WRITE; swsqe->idx = qhp->wq.sq.pidx; swsqe->complete = 0; swsqe->signaled = write_signaled; swsqe->flushed = 0; swsqe->wr_id = wr->wr_id; if (c4iw_wr_log) { swsqe->sge_ts = cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); swsqe->host_time = ktime_get(); } write_wrid = qhp->wq.sq.pidx; /* just bump the sw_sq */ qhp->wq.sq.in_use++; if (++qhp->wq.sq.pidx == qhp->wq.sq.size) qhp->wq.sq.pidx = 0; /* SEND_WITH_INV swsqe */ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; if (wr->next->opcode == IB_WR_SEND) swsqe->opcode = FW_RI_SEND; else swsqe->opcode = FW_RI_SEND_WITH_INV; swsqe->idx = qhp->wq.sq.pidx; swsqe->complete = 0; swsqe->signaled = send_signaled; swsqe->flushed = 0; swsqe->wr_id = wr->next->wr_id; if (c4iw_wr_log) { swsqe->sge_ts = cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); swsqe->host_time = ktime_get(); } wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0; wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx; init_wr_hdr(wqe, write_wrid, FW_RI_RDMA_WRITE_CMPL_WR, write_signaled ? FW_RI_COMPLETION_FLAG : 0, len16); t4_sq_produce(&qhp->wq, len16); idx = DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE); t4_ring_sq_db(&qhp->wq, idx, wqe); } static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) { int ret; ret = build_isgl((__be64 *)qhp->wq.rq.queue, (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); if (ret) return ret; *len16 = DIV_ROUND_UP( sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16); return 0; } static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) { int ret; ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1), &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); if (ret) return ret; *len16 = DIV_ROUND_UP(sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16); return 0; } static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr, const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16) { __be64 *p = (__be64 *)fr->pbl; fr->r2 = cpu_to_be32(0); fr->stag = cpu_to_be32(mhp->ibmr.rkey); fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) | FW_RI_TPTE_STAGSTATE_V(1) | FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) | FW_RI_TPTE_PDID_V(mhp->attr.pdid)); fr->tpte.locread_to_qpid = cpu_to_be32( FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) | FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) | FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12)); fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V( PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3)); fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0); fr->tpte.len_hi = cpu_to_be32(0); fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length); fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff); p[0] = cpu_to_be64((u64)mhp->mpl[0]); p[1] = cpu_to_be64((u64)mhp->mpl[1]); *len16 = DIV_ROUND_UP(sizeof(*fr), 16); } static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16, bool dsgl_supported) { struct fw_ri_immd *imdp; __be64 *p; int i; int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32); int rem; if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl)) return -EINVAL; wqe->fr.qpbinde_to_dcacpu = 0; wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12; wqe->fr.addr_type = FW_RI_VA_BASED_TO; wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access); wqe->fr.len_hi = 0; wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length); wqe->fr.stag = cpu_to_be32(wr->key); wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff); if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) { struct fw_ri_dsgl *sglp; for (i = 0; i < mhp->mpl_len; i++) mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]); sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); sglp->op = FW_RI_DATA_DSGL; sglp->r1 = 0; sglp->nsge = cpu_to_be16(1); sglp->addr0 = cpu_to_be64(mhp->mpl_addr); sglp->len0 = cpu_to_be32(pbllen); *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16); } else { imdp = (struct fw_ri_immd *)(&wqe->fr + 1); imdp->op = FW_RI_DATA_IMMD; imdp->r1 = 0; imdp->r2 = 0; imdp->immdlen = cpu_to_be32(pbllen); p = (__be64 *)(imdp + 1); rem = pbllen; for (i = 0; i < mhp->mpl_len; i++) { *p = cpu_to_be64((u64)mhp->mpl[i]); rem -= sizeof(*p); if (++p == (__be64 *)&sq->queue[sq->size]) p = (__be64 *)sq->queue; } while (rem) { *p = 0; rem -= sizeof(*p); if (++p == (__be64 *)&sq->queue[sq->size]) p = (__be64 *)sq->queue; } *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp) + pbllen, 16); } return 0; } static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) { wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); wqe->inv.r2 = 0; *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16); return 0; } void c4iw_qp_add_ref(struct ib_qp *qp) { pr_debug("ib_qp %p\n", qp); refcount_inc(&to_c4iw_qp(qp)->qp_refcnt); } void c4iw_qp_rem_ref(struct ib_qp *qp) { pr_debug("ib_qp %p\n", qp); if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt)) complete(&to_c4iw_qp(qp)->qp_rel_comp); } static void add_to_fc_list(struct list_head *head, struct list_head *entry) { if (list_empty(entry)) list_add_tail(entry, head); } static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) { unsigned long flags; xa_lock_irqsave(&qhp->rhp->qps, flags); spin_lock(&qhp->lock); if (qhp->rhp->db_state == NORMAL) t4_ring_sq_db(&qhp->wq, inc, NULL); else { add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); qhp->wq.sq.wq_pidx_inc += inc; } spin_unlock(&qhp->lock); xa_unlock_irqrestore(&qhp->rhp->qps, flags); return 0; } static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) { unsigned long flags; xa_lock_irqsave(&qhp->rhp->qps, flags); spin_lock(&qhp->lock); if (qhp->rhp->db_state == NORMAL) t4_ring_rq_db(&qhp->wq, inc, NULL); else { add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); qhp->wq.rq.wq_pidx_inc += inc; } spin_unlock(&qhp->lock); xa_unlock_irqrestore(&qhp->rhp->qps, flags); return 0; } static int ib_to_fw_opcode(int ib_opcode) { int opcode; switch (ib_opcode) { case IB_WR_SEND_WITH_INV: opcode = FW_RI_SEND_WITH_INV; break; case IB_WR_SEND: opcode = FW_RI_SEND; break; case IB_WR_RDMA_WRITE: opcode = FW_RI_RDMA_WRITE; break; case IB_WR_RDMA_WRITE_WITH_IMM: opcode = FW_RI_WRITE_IMMEDIATE; break; case IB_WR_RDMA_READ: case IB_WR_RDMA_READ_WITH_INV: opcode = FW_RI_READ_REQ; break; case IB_WR_REG_MR: opcode = FW_RI_FAST_REGISTER; break; case IB_WR_LOCAL_INV: opcode = FW_RI_LOCAL_INV; break; default: opcode = -EINVAL; } return opcode; } static int complete_sq_drain_wr(struct c4iw_qp *qhp, const struct ib_send_wr *wr) { struct t4_cqe cqe = {}; struct c4iw_cq *schp; unsigned long flag; struct t4_cq *cq; int opcode; schp = to_c4iw_cq(qhp->ibqp.send_cq); cq = &schp->cq; opcode = ib_to_fw_opcode(wr->opcode); if (opcode < 0) return opcode; cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | CQE_OPCODE_V(opcode) | CQE_TYPE_V(1) | CQE_SWCQE_V(1) | CQE_DRAIN_V(1) | CQE_QPID_V(qhp->wq.sq.qid)); spin_lock_irqsave(&schp->lock, flag); cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); cq->sw_queue[cq->sw_pidx] = cqe; t4_swcq_produce(cq); spin_unlock_irqrestore(&schp->lock, flag); if (t4_clear_cq_armed(&schp->cq)) { spin_lock_irqsave(&schp->comp_handler_lock, flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, flag); } return 0; } static int complete_sq_drain_wrs(struct c4iw_qp *qhp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { int ret = 0; while (wr) { ret = complete_sq_drain_wr(qhp, wr); if (ret) { *bad_wr = wr; break; } wr = wr->next; } return ret; } static void complete_rq_drain_wr(struct c4iw_qp *qhp, const struct ib_recv_wr *wr) { struct t4_cqe cqe = {}; struct c4iw_cq *rchp; unsigned long flag; struct t4_cq *cq; rchp = to_c4iw_cq(qhp->ibqp.recv_cq); cq = &rchp->cq; cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | CQE_OPCODE_V(FW_RI_SEND) | CQE_TYPE_V(0) | CQE_SWCQE_V(1) | CQE_DRAIN_V(1) | CQE_QPID_V(qhp->wq.sq.qid)); spin_lock_irqsave(&rchp->lock, flag); cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); cq->sw_queue[cq->sw_pidx] = cqe; t4_swcq_produce(cq); spin_unlock_irqrestore(&rchp->lock, flag); if (t4_clear_cq_armed(&rchp->cq)) { spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } } static void complete_rq_drain_wrs(struct c4iw_qp *qhp, const struct ib_recv_wr *wr) { while (wr) { complete_rq_drain_wr(qhp, wr); wr = wr->next; } } int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { int err = 0; u8 len16 = 0; enum fw_wr_opcodes fw_opcode = 0; enum fw_ri_wr_flags fw_flags; struct c4iw_qp *qhp; struct c4iw_dev *rhp; union t4_wr *wqe = NULL; u32 num_wrs; struct t4_swsqe *swsqe; unsigned long flag; u16 idx = 0; qhp = to_c4iw_qp(ibqp); rhp = qhp->rhp; spin_lock_irqsave(&qhp->lock, flag); /* * If the qp has been flushed, then just insert a special * drain cqe. */ if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); err = complete_sq_drain_wrs(qhp, wr, bad_wr); return err; } num_wrs = t4_sq_avail(&qhp->wq); if (num_wrs == 0) { spin_unlock_irqrestore(&qhp->lock, flag); *bad_wr = wr; return -ENOMEM; } /* * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is * the response for small NVMEe-oF READ requests. If the chain is * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths * and lengths meet the requirements of the fw_ri_write_cmpl_wr work * request, then build and post the write_cmpl WR. If any of the tests * below are not true, then we continue on with the tradtional WRITE * and SEND WRs. */ if (qhp->rhp->rdev.lldi.write_cmpl_support && CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >= CHELSIO_T5 && wr && wr->next && !wr->next->next && wr->opcode == IB_WR_RDMA_WRITE && wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL && (wr->next->opcode == IB_WR_SEND || wr->next->opcode == IB_WR_SEND_WITH_INV) && wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE && wr->next->num_sge == 1 && num_wrs >= 2) { post_write_cmpl(qhp, wr); spin_unlock_irqrestore(&qhp->lock, flag); return 0; } while (wr) { if (num_wrs == 0) { err = -ENOMEM; *bad_wr = wr; break; } wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); fw_flags = 0; if (wr->send_flags & IB_SEND_SOLICITED) fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) fw_flags |= FW_RI_COMPLETION_FLAG; swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; switch (wr->opcode) { case IB_WR_SEND_WITH_INV: case IB_WR_SEND: if (wr->send_flags & IB_SEND_FENCE) fw_flags |= FW_RI_READ_FENCE_FLAG; fw_opcode = FW_RI_SEND_WR; if (wr->opcode == IB_WR_SEND) swsqe->opcode = FW_RI_SEND; else swsqe->opcode = FW_RI_SEND_WITH_INV; err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); break; case IB_WR_RDMA_WRITE_WITH_IMM: if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) { err = -EINVAL; break; } fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE; fallthrough; case IB_WR_RDMA_WRITE: fw_opcode = FW_RI_RDMA_WRITE_WR; swsqe->opcode = FW_RI_RDMA_WRITE; err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); break; case IB_WR_RDMA_READ: case IB_WR_RDMA_READ_WITH_INV: fw_opcode = FW_RI_RDMA_READ_WR; swsqe->opcode = FW_RI_READ_REQ; if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) { c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey); fw_flags = FW_RI_RDMA_READ_INVALIDATE; } else { fw_flags = 0; } err = build_rdma_read(wqe, wr, &len16); if (err) break; swsqe->read_len = wr->sg_list[0].length; if (!qhp->wq.sq.oldest_read) qhp->wq.sq.oldest_read = swsqe; break; case IB_WR_REG_MR: { struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr); swsqe->opcode = FW_RI_FAST_REGISTER; if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support && !mhp->attr.state && mhp->mpl_len <= 2) { fw_opcode = FW_RI_FR_NSMR_TPTE_WR; build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr), mhp, &len16); } else { fw_opcode = FW_RI_FR_NSMR_WR; err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), mhp, &len16, rhp->rdev.lldi.ulptx_memwrite_dsgl); if (err) break; } mhp->attr.state = 1; break; } case IB_WR_LOCAL_INV: if (wr->send_flags & IB_SEND_FENCE) fw_flags |= FW_RI_LOCAL_FENCE_FLAG; fw_opcode = FW_RI_INV_LSTAG_WR; swsqe->opcode = FW_RI_LOCAL_INV; err = build_inv_stag(wqe, wr, &len16); c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey); break; default: pr_warn("%s post of type=%d TBD!\n", __func__, wr->opcode); err = -EINVAL; } if (err) { *bad_wr = wr; break; } swsqe->idx = qhp->wq.sq.pidx; swsqe->complete = 0; swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || qhp->sq_sig_all; swsqe->flushed = 0; swsqe->wr_id = wr->wr_id; if (c4iw_wr_log) { swsqe->sge_ts = cxgb4_read_sge_timestamp( rhp->rdev.lldi.ports[0]); swsqe->host_time = ktime_get(); } init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n", (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, swsqe->opcode, swsqe->read_len); wr = wr->next; num_wrs--; t4_sq_produce(&qhp->wq, len16); idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); } if (!rhp->rdev.status_page->db_off) { t4_ring_sq_db(&qhp->wq, idx, wqe); spin_unlock_irqrestore(&qhp->lock, flag); } else { spin_unlock_irqrestore(&qhp->lock, flag); ring_kernel_sq_db(qhp, idx); } return err; } int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { int err = 0; struct c4iw_qp *qhp; union t4_recv_wr *wqe = NULL; u32 num_wrs; u8 len16 = 0; unsigned long flag; u16 idx = 0; qhp = to_c4iw_qp(ibqp); spin_lock_irqsave(&qhp->lock, flag); /* * If the qp has been flushed, then just insert a special * drain cqe. */ if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); complete_rq_drain_wrs(qhp, wr); return err; } num_wrs = t4_rq_avail(&qhp->wq); if (num_wrs == 0) { spin_unlock_irqrestore(&qhp->lock, flag); *bad_wr = wr; return -ENOMEM; } while (wr) { if (wr->num_sge > T4_MAX_RECV_SGE) { err = -EINVAL; *bad_wr = wr; break; } wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + qhp->wq.rq.wq_pidx * T4_EQ_ENTRY_SIZE); if (num_wrs) err = build_rdma_recv(qhp, wqe, wr, &len16); else err = -ENOMEM; if (err) { *bad_wr = wr; break; } qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; if (c4iw_wr_log) { qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = cxgb4_read_sge_timestamp( qhp->rhp->rdev.lldi.ports[0]); qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time = ktime_get(); } wqe->recv.opcode = FW_RI_RECV_WR; wqe->recv.r1 = 0; wqe->recv.wrid = qhp->wq.rq.pidx; wqe->recv.r2[0] = 0; wqe->recv.r2[1] = 0; wqe->recv.r2[2] = 0; wqe->recv.len16 = len16; pr_debug("cookie 0x%llx pidx %u\n", (unsigned long long)wr->wr_id, qhp->wq.rq.pidx); t4_rq_produce(&qhp->wq, len16); idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); wr = wr->next; num_wrs--; } if (!qhp->rhp->rdev.status_page->db_off) { t4_ring_rq_db(&qhp->wq, idx, wqe); spin_unlock_irqrestore(&qhp->lock, flag); } else { spin_unlock_irqrestore(&qhp->lock, flag); ring_kernel_rq_db(qhp, idx); } return err; } static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe, u64 wr_id, u8 len16) { struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx]; pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n", __func__, srq->cidx, srq->pidx, srq->wq_pidx, srq->in_use, srq->ooo_count, (unsigned long long)wr_id, srq->pending_cidx, srq->pending_pidx, srq->pending_in_use); pwr->wr_id = wr_id; pwr->len16 = len16; memcpy(&pwr->wqe, wqe, len16 * 16); t4_srq_produce_pending_wr(srq); } int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { union t4_recv_wr *wqe, lwqe; struct c4iw_srq *srq; unsigned long flag; u8 len16 = 0; u16 idx = 0; int err = 0; u32 num_wrs; srq = to_c4iw_srq(ibsrq); spin_lock_irqsave(&srq->lock, flag); num_wrs = t4_srq_avail(&srq->wq); if (num_wrs == 0) { spin_unlock_irqrestore(&srq->lock, flag); return -ENOMEM; } while (wr) { if (wr->num_sge > T4_MAX_RECV_SGE) { err = -EINVAL; *bad_wr = wr; break; } wqe = &lwqe; if (num_wrs) err = build_srq_recv(wqe, wr, &len16); else err = -ENOMEM; if (err) { *bad_wr = wr; break; } wqe->recv.opcode = FW_RI_RECV_WR; wqe->recv.r1 = 0; wqe->recv.wrid = srq->wq.pidx; wqe->recv.r2[0] = 0; wqe->recv.r2[1] = 0; wqe->recv.r2[2] = 0; wqe->recv.len16 = len16; if (srq->wq.ooo_count || srq->wq.pending_in_use || srq->wq.sw_rq[srq->wq.pidx].valid) { defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16); } else { srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id; srq->wq.sw_rq[srq->wq.pidx].valid = 1; c4iw_copy_wr_to_srq(&srq->wq, wqe, len16); pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n", __func__, srq->wq.cidx, srq->wq.pidx, srq->wq.wq_pidx, srq->wq.in_use, (unsigned long long)wr->wr_id); t4_srq_produce(&srq->wq, len16); idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE); } wr = wr->next; num_wrs--; } if (idx) t4_ring_srq_db(&srq->wq, idx, len16, wqe); spin_unlock_irqrestore(&srq->lock, flag); return err; } static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, u8 *ecode) { int status; int tagged; int opcode; int rqtype; int send_inv; if (!err_cqe) { *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; *ecode = 0; return; } status = CQE_STATUS(err_cqe); opcode = CQE_OPCODE(err_cqe); rqtype = RQ_TYPE(err_cqe); send_inv = (opcode == FW_RI_SEND_WITH_INV) || (opcode == FW_RI_SEND_WITH_SE_INV); tagged = (opcode == FW_RI_RDMA_WRITE) || (rqtype && (opcode == FW_RI_READ_RESP)); switch (status) { case T4_ERR_STAG: if (send_inv) { *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; *ecode = RDMAP_CANT_INV_STAG; } else { *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_INV_STAG; } break; case T4_ERR_PDID: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; if ((opcode == FW_RI_SEND_WITH_INV) || (opcode == FW_RI_SEND_WITH_SE_INV)) *ecode = RDMAP_CANT_INV_STAG; else *ecode = RDMAP_STAG_NOT_ASSOC; break; case T4_ERR_QPID: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_STAG_NOT_ASSOC; break; case T4_ERR_ACCESS: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_ACC_VIOL; break; case T4_ERR_WRAP: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_TO_WRAP; break; case T4_ERR_BOUND: if (tagged) { *layer_type = LAYER_DDP|DDP_TAGGED_ERR; *ecode = DDPT_BASE_BOUNDS; } else { *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_BASE_BOUNDS; } break; case T4_ERR_INVALIDATE_SHARED_MR: case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; *ecode = RDMAP_CANT_INV_STAG; break; case T4_ERR_ECC: case T4_ERR_ECC_PSTAG: case T4_ERR_INTERNAL_ERR: *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; *ecode = 0; break; case T4_ERR_OUT_OF_RQE: *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_INV_MSN_NOBUF; break; case T4_ERR_PBL_ADDR_BOUND: *layer_type = LAYER_DDP|DDP_TAGGED_ERR; *ecode = DDPT_BASE_BOUNDS; break; case T4_ERR_CRC: *layer_type = LAYER_MPA|DDP_LLP; *ecode = MPA_CRC_ERR; break; case T4_ERR_MARKER: *layer_type = LAYER_MPA|DDP_LLP; *ecode = MPA_MARKER_ERR; break; case T4_ERR_PDU_LEN_ERR: *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_MSG_TOOBIG; break; case T4_ERR_DDP_VERSION: if (tagged) { *layer_type = LAYER_DDP|DDP_TAGGED_ERR; *ecode = DDPT_INV_VERS; } else { *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_INV_VERS; } break; case T4_ERR_RDMA_VERSION: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; *ecode = RDMAP_INV_VERS; break; case T4_ERR_OPCODE: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; *ecode = RDMAP_INV_OPCODE; break; case T4_ERR_DDP_QUEUE_NUM: *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_INV_QN; break; case T4_ERR_MSN: case T4_ERR_MSN_GAP: case T4_ERR_MSN_RANGE: case T4_ERR_IRD_OVERFLOW: *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_INV_MSN_RANGE; break; case T4_ERR_TBIT: *layer_type = LAYER_DDP|DDP_LOCAL_CATA; *ecode = 0; break; case T4_ERR_MO: *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_INV_MO; break; default: *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; *ecode = 0; break; } } static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, gfp_t gfp) { struct fw_ri_wr *wqe; struct sk_buff *skb; struct terminate_message *term; pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, qhp->ep->hwtid); skb = skb_dequeue(&qhp->ep->com.ep_skb_list); if (WARN_ON(!skb)) return; set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); wqe = __skb_put_zero(skb, sizeof(*wqe)); wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR)); wqe->flowid_len16 = cpu_to_be32( FW_WR_FLOWID_V(qhp->ep->hwtid) | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term)); term = (struct terminate_message *)wqe->u.terminate.termmsg; if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { term->layer_etype = qhp->attr.layer_etype; term->ecode = qhp->attr.ecode; } else build_term_codes(err_cqe, &term->layer_etype, &term->ecode); c4iw_ofld_send(&qhp->rhp->rdev, skb); } /* * Assumes qhp lock is held. */ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, struct c4iw_cq *schp) { int count; int rq_flushed = 0, sq_flushed; unsigned long flag; pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp); /* locking hierarchy: cqs lock first, then qp lock. */ spin_lock_irqsave(&rchp->lock, flag); if (schp != rchp) spin_lock(&schp->lock); spin_lock(&qhp->lock); if (qhp->wq.flushed) { spin_unlock(&qhp->lock); if (schp != rchp) spin_unlock(&schp->lock); spin_unlock_irqrestore(&rchp->lock, flag); return; } qhp->wq.flushed = 1; t4_set_wq_in_error(&qhp->wq, 0); c4iw_flush_hw_cq(rchp, qhp); if (!qhp->srq) { c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); } if (schp != rchp) c4iw_flush_hw_cq(schp, qhp); sq_flushed = c4iw_flush_sq(qhp); spin_unlock(&qhp->lock); if (schp != rchp) spin_unlock(&schp->lock); spin_unlock_irqrestore(&rchp->lock, flag); if (schp == rchp) { if ((rq_flushed || sq_flushed) && t4_clear_cq_armed(&rchp->cq)) { spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } } else { if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) { spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } if (sq_flushed && t4_clear_cq_armed(&schp->cq)) { spin_lock_irqsave(&schp->comp_handler_lock, flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, flag); } } } static void flush_qp(struct c4iw_qp *qhp) { struct c4iw_cq *rchp, *schp; unsigned long flag; rchp = to_c4iw_cq(qhp->ibqp.recv_cq); schp = to_c4iw_cq(qhp->ibqp.send_cq); if (qhp->ibqp.uobject) { /* for user qps, qhp->wq.flushed is protected by qhp->mutex */ if (qhp->wq.flushed) return; qhp->wq.flushed = 1; t4_set_wq_in_error(&qhp->wq, 0); t4_set_cq_in_error(&rchp->cq); spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); if (schp != rchp) { t4_set_cq_in_error(&schp->cq); spin_lock_irqsave(&schp->comp_handler_lock, flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, flag); } return; } __flush_qp(qhp, rchp, schp); } static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep) { struct fw_ri_wr *wqe; int ret; struct sk_buff *skb; pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid); skb = skb_dequeue(&ep->com.ep_skb_list); if (WARN_ON(!skb)) return -ENOMEM; set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); wqe = __skb_put_zero(skb, sizeof(*wqe)); wqe->op_compl = cpu_to_be32( FW_WR_OP_V(FW_RI_INIT_WR) | FW_WR_COMPL_F); wqe->flowid_len16 = cpu_to_be32( FW_WR_FLOWID_V(ep->hwtid) | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); wqe->cookie = (uintptr_t)ep->com.wr_waitp; wqe->u.fini.type = FW_RI_TYPE_FINI; ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp, qhp->ep->hwtid, qhp->wq.sq.qid, __func__); pr_debug("ret %d\n", ret); return ret; } static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) { pr_debug("p2p_type = %d\n", p2p_type); memset(&init->u, 0, sizeof(init->u)); switch (p2p_type) { case FW_RI_INIT_P2PTYPE_RDMA_WRITE: init->u.write.opcode = FW_RI_RDMA_WRITE_WR; init->u.write.stag_sink = cpu_to_be32(1); init->u.write.to_sink = cpu_to_be64(1); init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; init->u.write.len16 = DIV_ROUND_UP( sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16); break; case FW_RI_INIT_P2PTYPE_READ_REQ: init->u.write.opcode = FW_RI_RDMA_READ_WR; init->u.read.stag_src = cpu_to_be32(1); init->u.read.to_src_lo = cpu_to_be32(1); init->u.read.stag_sink = cpu_to_be32(1); init->u.read.to_sink_lo = cpu_to_be32(1); init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16); break; } } static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) { struct fw_ri_wr *wqe; int ret; struct sk_buff *skb; pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp, qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto out; } ret = alloc_ird(rhp, qhp->attr.max_ird); if (ret) { qhp->attr.max_ird = 0; kfree_skb(skb); goto out; } set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); wqe = __skb_put_zero(skb, sizeof(*wqe)); wqe->op_compl = cpu_to_be32( FW_WR_OP_V(FW_RI_INIT_WR) | FW_WR_COMPL_F); wqe->flowid_len16 = cpu_to_be32( FW_WR_FLOWID_V(qhp->ep->hwtid) | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp; wqe->u.init.type = FW_RI_TYPE_INIT; wqe->u.init.mpareqbit_p2ptype = FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) | FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type); wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; if (qhp->attr.mpa_attr.recv_marker_enabled) wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; if (qhp->attr.mpa_attr.xmit_marker_enabled) wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; if (qhp->attr.mpa_attr.crc_enabled) wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | FW_RI_QP_RDMA_WRITE_ENABLE | FW_RI_QP_BIND_ENABLE; if (!qhp->ibqp.uobject) wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | FW_RI_QP_STAG0_ENABLE; wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); if (qhp->srq) { wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ | qhp->srq->idx); } else { wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - rhp->rdev.lldi.vr->rq.start); } wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); if (qhp->attr.mpa_attr.initiator) build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp, qhp->ep->hwtid, qhp->wq.sq.qid, __func__); if (!ret) goto out; free_ird(rhp, qhp->attr.max_ird); out: pr_debug("ret %d\n", ret); return ret; } int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, enum c4iw_qp_attr_mask mask, struct c4iw_qp_attributes *attrs, int internal) { int ret = 0; struct c4iw_qp_attributes newattr = qhp->attr; int disconnect = 0; int terminate = 0; int abort = 0; int free = 0; struct c4iw_ep *ep = NULL; pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); mutex_lock(&qhp->mutex); /* Process attr changes if in IDLE */ if (mask & C4IW_QP_ATTR_VALID_MODIFY) { if (qhp->attr.state != C4IW_QP_STATE_IDLE) { ret = -EIO; goto out; } if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ) newattr.enable_rdma_read = attrs->enable_rdma_read; if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE) newattr.enable_rdma_write = attrs->enable_rdma_write; if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) newattr.enable_bind = attrs->enable_bind; if (mask & C4IW_QP_ATTR_MAX_ORD) { if (attrs->max_ord > c4iw_max_read_depth) { ret = -EINVAL; goto out; } newattr.max_ord = attrs->max_ord; } if (mask & C4IW_QP_ATTR_MAX_IRD) { if (attrs->max_ird > cur_max_read_depth(rhp)) { ret = -EINVAL; goto out; } newattr.max_ird = attrs->max_ird; } qhp->attr = newattr; } if (mask & C4IW_QP_ATTR_SQ_DB) { ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc); goto out; } if (mask & C4IW_QP_ATTR_RQ_DB) { ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc); goto out; } if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) goto out; if (qhp->attr.state == attrs->next_state) goto out; switch (qhp->attr.state) { case C4IW_QP_STATE_IDLE: switch (attrs->next_state) { case C4IW_QP_STATE_RTS: if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) { ret = -EINVAL; goto out; } if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) { ret = -EINVAL; goto out; } qhp->attr.mpa_attr = attrs->mpa_attr; qhp->attr.llp_stream_handle = attrs->llp_stream_handle; qhp->ep = qhp->attr.llp_stream_handle; set_state(qhp, C4IW_QP_STATE_RTS); /* * Ref the endpoint here and deref when we * disassociate the endpoint from the QP. This * happens in CLOSING->IDLE transition or *->ERROR * transition. */ c4iw_get_ep(&qhp->ep->com); ret = rdma_init(rhp, qhp); if (ret) goto err; break; case C4IW_QP_STATE_ERROR: set_state(qhp, C4IW_QP_STATE_ERROR); flush_qp(qhp); break; default: ret = -EINVAL; goto out; } break; case C4IW_QP_STATE_RTS: switch (attrs->next_state) { case C4IW_QP_STATE_CLOSING: t4_set_wq_in_error(&qhp->wq, 0); set_state(qhp, C4IW_QP_STATE_CLOSING); ep = qhp->ep; if (!internal) { abort = 0; disconnect = 1; c4iw_get_ep(&qhp->ep->com); } ret = rdma_fini(rhp, qhp, ep); if (ret) goto err; break; case C4IW_QP_STATE_TERMINATE: t4_set_wq_in_error(&qhp->wq, 0); set_state(qhp, C4IW_QP_STATE_TERMINATE); qhp->attr.layer_etype = attrs->layer_etype; qhp->attr.ecode = attrs->ecode; ep = qhp->ep; if (!internal) { c4iw_get_ep(&ep->com); terminate = 1; disconnect = 1; } else { terminate = qhp->attr.send_term; ret = rdma_fini(rhp, qhp, ep); if (ret) goto err; } break; case C4IW_QP_STATE_ERROR: t4_set_wq_in_error(&qhp->wq, 0); set_state(qhp, C4IW_QP_STATE_ERROR); if (!internal) { disconnect = 1; ep = qhp->ep; c4iw_get_ep(&qhp->ep->com); } goto err; break; default: ret = -EINVAL; goto out; } break; case C4IW_QP_STATE_CLOSING: /* * Allow kernel users to move to ERROR for qp draining. */ if (!internal && (qhp->ibqp.uobject || attrs->next_state != C4IW_QP_STATE_ERROR)) { ret = -EINVAL; goto out; } switch (attrs->next_state) { case C4IW_QP_STATE_IDLE: flush_qp(qhp); set_state(qhp, C4IW_QP_STATE_IDLE); qhp->attr.llp_stream_handle = NULL; c4iw_put_ep(&qhp->ep->com); qhp->ep = NULL; wake_up(&qhp->wait); break; case C4IW_QP_STATE_ERROR: goto err; default: ret = -EINVAL; goto err; } break; case C4IW_QP_STATE_ERROR: if (attrs->next_state != C4IW_QP_STATE_IDLE) { ret = -EINVAL; goto out; } if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { ret = -EINVAL; goto out; } set_state(qhp, C4IW_QP_STATE_IDLE); break; case C4IW_QP_STATE_TERMINATE: if (!internal) { ret = -EINVAL; goto out; } goto err; break; default: pr_err("%s in a bad state %d\n", __func__, qhp->attr.state); ret = -EINVAL; goto err; break; } goto out; err: pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep, qhp->wq.sq.qid); /* disassociate the LLP connection */ qhp->attr.llp_stream_handle = NULL; if (!ep) ep = qhp->ep; qhp->ep = NULL; set_state(qhp, C4IW_QP_STATE_ERROR); free = 1; abort = 1; flush_qp(qhp); wake_up(&qhp->wait); out: mutex_unlock(&qhp->mutex); if (terminate) post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); /* * If disconnect is 1, then we need to initiate a disconnect * on the EP. This can be a normal close (RTS->CLOSING) or * an abnormal close (RTS/CLOSING->ERROR). */ if (disconnect) { c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : GFP_KERNEL); c4iw_put_ep(&ep->com); } /* * If free is 1, then we've disassociated the EP from the QP * and we need to dereference the EP. */ if (free) c4iw_put_ep(&ep->com); pr_debug("exit state %d\n", qhp->attr.state); return ret; } int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_qp *qhp; struct c4iw_ucontext *ucontext; struct c4iw_qp_attributes attrs; qhp = to_c4iw_qp(ib_qp); rhp = qhp->rhp; ucontext = qhp->ucontext; attrs.next_state = C4IW_QP_STATE_ERROR; if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); else c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); wait_event(qhp->wait, !qhp->ep); xa_lock_irq(&rhp->qps); __xa_erase(&rhp->qps, qhp->wq.sq.qid); if (!list_empty(&qhp->db_fc_entry)) list_del_init(&qhp->db_fc_entry); xa_unlock_irq(&rhp->qps); free_ird(rhp, qhp->attr.max_ird); c4iw_qp_rem_ref(ib_qp); wait_for_completion(&qhp->qp_rel_comp); pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid); pr_debug("qhp %p ucontext %p\n", qhp, ucontext); destroy_qp(&rhp->rdev, &qhp->wq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); c4iw_put_wr_wait(qhp->wr_waitp); return 0; } int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs, struct ib_udata *udata) { struct ib_pd *pd = qp->pd; struct c4iw_dev *rhp; struct c4iw_qp *qhp = to_c4iw_qp(qp); struct c4iw_pd *php; struct c4iw_cq *schp; struct c4iw_cq *rchp; struct c4iw_create_qp_resp uresp; unsigned int sqsize, rqsize = 0; struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct c4iw_ucontext, ibucontext); int ret; struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm; struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL; if (attrs->qp_type != IB_QPT_RC || attrs->create_flags) return -EOPNOTSUPP; php = to_c4iw_pd(pd); rhp = php->rhp; schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); if (!schp || !rchp) return -EINVAL; if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) return -EINVAL; if (!attrs->srq) { if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) return -E2BIG; rqsize = attrs->cap.max_recv_wr + 1; if (rqsize < 8) rqsize = 8; } if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) return -E2BIG; sqsize = attrs->cap.max_send_wr + 1; if (sqsize < 8) sqsize = 8; qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); if (!qhp->wr_waitp) return -ENOMEM; qhp->wq.sq.size = sqsize; qhp->wq.sq.memsize = (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); qhp->wq.sq.flush_cidx = -1; if (!attrs->srq) { qhp->wq.rq.size = rqsize; qhp->wq.rq.memsize = (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * sizeof(*qhp->wq.rq.queue); } if (ucontext) { qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); if (!attrs->srq) qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); } ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, qhp->wr_waitp, !attrs->srq); if (ret) goto err_free_wr_wait; attrs->cap.max_recv_wr = rqsize - 1; attrs->cap.max_send_wr = sqsize - 1; attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; qhp->rhp = rhp; qhp->attr.pd = php->pdid; qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; qhp->attr.sq_num_entries = attrs->cap.max_send_wr; qhp->attr.sq_max_sges = attrs->cap.max_send_sge; qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; if (!attrs->srq) { qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; } qhp->attr.state = C4IW_QP_STATE_IDLE; qhp->attr.next_state = C4IW_QP_STATE_IDLE; qhp->attr.enable_rdma_read = 1; qhp->attr.enable_rdma_write = 1; qhp->attr.enable_bind = 1; qhp->attr.max_ord = 0; qhp->attr.max_ird = 0; qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; spin_lock_init(&qhp->lock); mutex_init(&qhp->mutex); init_waitqueue_head(&qhp->wait); init_completion(&qhp->qp_rel_comp); refcount_set(&qhp->qp_refcnt, 1); ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); if (ret) goto err_destroy_qp; if (udata && ucontext) { sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL); if (!sq_key_mm) { ret = -ENOMEM; goto err_remove_handle; } if (!attrs->srq) { rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL); if (!rq_key_mm) { ret = -ENOMEM; goto err_free_sq_key; } } sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL); if (!sq_db_key_mm) { ret = -ENOMEM; goto err_free_rq_key; } if (!attrs->srq) { rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL); if (!rq_db_key_mm) { ret = -ENOMEM; goto err_free_sq_db_key; } } memset(&uresp, 0, sizeof(uresp)); if (t4_sq_onchip(&qhp->wq.sq)) { ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm), GFP_KERNEL); if (!ma_sync_key_mm) { ret = -ENOMEM; goto err_free_rq_db_key; } uresp.flags = C4IW_QPF_ONCHIP; } if (rhp->rdev.lldi.write_w_imm_support) uresp.flags |= C4IW_QPF_WRITE_W_IMM; uresp.qid_mask = rhp->rdev.qpmask; uresp.sqid = qhp->wq.sq.qid; uresp.sq_size = qhp->wq.sq.size; uresp.sq_memsize = qhp->wq.sq.memsize; if (!attrs->srq) { uresp.rqid = qhp->wq.rq.qid; uresp.rq_size = qhp->wq.rq.size; uresp.rq_memsize = qhp->wq.rq.memsize; } spin_lock(&ucontext->mmap_lock); if (ma_sync_key_mm) { uresp.ma_sync_key = ucontext->key; ucontext->key += PAGE_SIZE; } uresp.sq_key = ucontext->key; ucontext->key += PAGE_SIZE; if (!attrs->srq) { uresp.rq_key = ucontext->key; ucontext->key += PAGE_SIZE; } uresp.sq_db_gts_key = ucontext->key; ucontext->key += PAGE_SIZE; if (!attrs->srq) { uresp.rq_db_gts_key = ucontext->key; ucontext->key += PAGE_SIZE; } spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (ret) goto err_free_ma_sync_key; sq_key_mm->key = uresp.sq_key; sq_key_mm->addr = qhp->wq.sq.phys_addr; sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); insert_mmap(ucontext, sq_key_mm); if (!attrs->srq) { rq_key_mm->key = uresp.rq_key; rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue); rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize); insert_mmap(ucontext, rq_key_mm); } sq_db_key_mm->key = uresp.sq_db_gts_key; sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa; sq_db_key_mm->len = PAGE_SIZE; insert_mmap(ucontext, sq_db_key_mm); if (!attrs->srq) { rq_db_key_mm->key = uresp.rq_db_gts_key; rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa; rq_db_key_mm->len = PAGE_SIZE; insert_mmap(ucontext, rq_db_key_mm); } if (ma_sync_key_mm) { ma_sync_key_mm->key = uresp.ma_sync_key; ma_sync_key_mm->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0) + PCIE_MA_SYNC_A) & PAGE_MASK; ma_sync_key_mm->len = PAGE_SIZE; insert_mmap(ucontext, ma_sync_key_mm); } qhp->ucontext = ucontext; } if (!attrs->srq) { qhp->wq.qp_errp = &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err; } else { qhp->wq.qp_errp = &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err; qhp->wq.srqidxp = &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx; } qhp->ibqp.qp_num = qhp->wq.sq.qid; if (attrs->srq) qhp->srq = to_c4iw_srq(attrs->srq); INIT_LIST_HEAD(&qhp->db_fc_entry); pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n", qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, qhp->wq.rq.memsize, attrs->cap.max_recv_wr); return 0; err_free_ma_sync_key: kfree(ma_sync_key_mm); err_free_rq_db_key: if (!attrs->srq) kfree(rq_db_key_mm); err_free_sq_db_key: kfree(sq_db_key_mm); err_free_rq_key: if (!attrs->srq) kfree(rq_key_mm); err_free_sq_key: kfree(sq_key_mm); err_remove_handle: xa_erase_irq(&rhp->qps, qhp->wq.sq.qid); err_destroy_qp: destroy_qp(&rhp->rdev, &qhp->wq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); err_free_wr_wait: c4iw_put_wr_wait(qhp->wr_waitp); return ret; } int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_qp *qhp; enum c4iw_qp_attr_mask mask = 0; struct c4iw_qp_attributes attrs = {}; pr_debug("ib_qp %p\n", ibqp); if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; /* iwarp does not support the RTR state */ if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) attr_mask &= ~IB_QP_STATE; /* Make sure we still have something left to do */ if (!attr_mask) return 0; qhp = to_c4iw_qp(ibqp); rhp = qhp->rhp; attrs.next_state = c4iw_convert_state(attr->qp_state); attrs.enable_rdma_read = (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) ? 1 : 0; attrs.enable_rdma_write = (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0; mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? (C4IW_QP_ATTR_ENABLE_RDMA_READ | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; /* * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for * ringing the queue db when we're in DB_FULL mode. * Only allow this on T4 devices. */ attrs.sq_db_inc = attr->sq_psn; attrs.rq_db_inc = attr->rq_psn; mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) return -EINVAL; return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); } struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) { pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn); return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); } void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq) { struct ib_event event = {}; event.device = &srq->rhp->ibdev; event.element.srq = &srq->ibsrq; event.event = IB_EVENT_SRQ_LIMIT_REACHED; ib_dispatch_event(&event); } int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata) { struct c4iw_srq *srq = to_c4iw_srq(ib_srq); int ret = 0; /* * XXX 0 mask == a SW interrupt for srq_limit reached... */ if (udata && !srq_attr_mask) { c4iw_dispatch_srq_limit_reached_event(srq); goto out; } /* no support for this yet */ if (srq_attr_mask & IB_SRQ_MAX_WR) { ret = -EINVAL; goto out; } if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) { srq->armed = true; srq->srq_limit = attr->srq_limit; } out: return ret; } int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) { struct c4iw_qp *qhp = to_c4iw_qp(ibqp); memset(attr, 0, sizeof(*attr)); memset(init_attr, 0, sizeof(*init_attr)); attr->qp_state = to_ib_qp_state(qhp->attr.state); attr->cur_qp_state = to_ib_qp_state(qhp->attr.state); init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; return 0; } static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp) { struct c4iw_rdev *rdev = &srq->rhp->rdev; struct sk_buff *skb = srq->destroy_skb; struct t4_srq *wq = &srq->wq; struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; wr_len = sizeof(*res_wr) + sizeof(*res); set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (uintptr_t)wr_waitp; res = res_wr->res; res->u.srq.restype = FW_RI_RES_TYPE_SRQ; res->u.srq.op = FW_RI_RES_OP_RESET; res->u.srq.srqid = cpu_to_be32(srq->idx); res->u.srq.eqid = cpu_to_be32(wq->qid); c4iw_init_wr_wait(wr_waitp); c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); dma_free_coherent(&rdev->lldi.pdev->dev, wq->memsize, wq->queue, dma_unmap_addr(wq, mapping)); c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size); kfree(wq->sw_rq); c4iw_put_qpid(rdev, wq->qid, uctx); } static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp) { struct c4iw_rdev *rdev = &srq->rhp->rdev; int user = (uctx != &rdev->uctx); struct t4_srq *wq = &srq->wq; struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; struct sk_buff *skb; int wr_len; int eqsize; int ret = -ENOMEM; wq->qid = c4iw_get_qpid(rdev, uctx); if (!wq->qid) goto err; if (!user) { wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq), GFP_KERNEL); if (!wq->sw_rq) goto err_put_qpid; wq->pending_wrs = kcalloc(srq->wq.size, sizeof(*srq->wq.pending_wrs), GFP_KERNEL); if (!wq->pending_wrs) goto err_free_sw_rq; } wq->rqt_size = wq->size; wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size); if (!wq->rqt_hwaddr) goto err_free_pending_wrs; wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> T4_RQT_ENTRY_SHIFT; wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize, &wq->dma_addr, GFP_KERNEL); if (!wq->queue) goto err_free_rqtpool; dma_unmap_addr_set(wq, mapping, wq->dma_addr); wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS, &wq->bar2_qid, user ? &wq->bar2_pa : NULL); /* * User mode must have bar2 access. */ if (user && !wq->bar2_va) { pr_warn(MOD "%s: srqid %u not in BAR2 range.\n", pci_name(rdev->lldi.pdev), wq->qid); ret = -EINVAL; goto err_free_queue; } /* build fw_ri_res_wr */ wr_len = sizeof(*res_wr) + sizeof(*res); skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) goto err_free_queue; set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (uintptr_t)wr_waitp; res = res_wr->res; res->u.srq.restype = FW_RI_RES_TYPE_SRQ; res->u.srq.op = FW_RI_RES_OP_WRITE; /* * eqsize is the number of 64B entries plus the status page size. */ eqsize = wq->size * T4_RQ_NUM_SLOTS + rdev->hw_queue.t4_eq_status_entries; res->u.srq.eqid = cpu_to_be32(wq->qid); res->u.srq.fetchszm_to_iqid = /* no host cidx updates */ cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) | FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */ FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */ FW_RI_RES_WR_FETCHRO_V(0)); /* relaxed_ordering */ res->u.srq.dcaen_to_eqsize = cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) | FW_RI_RES_WR_DCACPU_V(0) | FW_RI_RES_WR_FBMIN_V(2) | FW_RI_RES_WR_FBMAX_V(3) | FW_RI_RES_WR_CIDXFTHRESHO_V(0) | FW_RI_RES_WR_CIDXFTHRESH_V(0) | FW_RI_RES_WR_EQSIZE_V(eqsize)); res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr); res->u.srq.srqid = cpu_to_be32(srq->idx); res->u.srq.pdid = cpu_to_be32(srq->pdid); res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size); res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr - rdev->lldi.vr->rq.start); c4iw_init_wr_wait(wr_waitp); ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__); if (ret) goto err_free_queue; pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n" " bar2_addr %p rqt addr 0x%x size %d\n", __func__, srq->idx, wq->qid, srq->pdid, wq->queue, (u64)virt_to_phys(wq->queue), wq->bar2_va, wq->rqt_hwaddr, wq->rqt_size); return 0; err_free_queue: dma_free_coherent(&rdev->lldi.pdev->dev, wq->memsize, wq->queue, dma_unmap_addr(wq, mapping)); err_free_rqtpool: c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size); err_free_pending_wrs: if (!user) kfree(wq->pending_wrs); err_free_sw_rq: if (!user) kfree(wq->sw_rq); err_put_qpid: c4iw_put_qpid(rdev, wq->qid, uctx); err: return ret; } void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16) { u64 *src, *dst; src = (u64 *)wqe; dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE); while (len16) { *dst++ = *src++; if (dst >= (u64 *)&srq->queue[srq->size]) dst = (u64 *)srq->queue; *dst++ = *src++; if (dst >= (u64 *)&srq->queue[srq->size]) dst = (u64 *)srq->queue; len16--; } } int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs, struct ib_udata *udata) { struct ib_pd *pd = ib_srq->pd; struct c4iw_dev *rhp; struct c4iw_srq *srq = to_c4iw_srq(ib_srq); struct c4iw_pd *php; struct c4iw_create_srq_resp uresp; struct c4iw_ucontext *ucontext; struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm; int rqsize; int ret; int wr_len; if (attrs->srq_type != IB_SRQT_BASIC) return -EOPNOTSUPP; pr_debug("%s ib_pd %p\n", __func__, pd); php = to_c4iw_pd(pd); rhp = php->rhp; if (!rhp->rdev.lldi.vr->srq.size) return -EINVAL; if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size) return -E2BIG; if (attrs->attr.max_sge > T4_MAX_RECV_SGE) return -E2BIG; /* * SRQ RQT and RQ must be a power of 2 and at least 16 deep. */ rqsize = attrs->attr.max_wr + 1; rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16)); ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, ibucontext); srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); if (!srq->wr_waitp) return -ENOMEM; srq->idx = c4iw_alloc_srq_idx(&rhp->rdev); if (srq->idx < 0) { ret = -ENOMEM; goto err_free_wr_wait; } wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res); srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); if (!srq->destroy_skb) { ret = -ENOMEM; goto err_free_srq_idx; } srq->rhp = rhp; srq->pdid = php->pdid; srq->wq.size = rqsize; srq->wq.memsize = (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * sizeof(*srq->wq.queue); if (ucontext) srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE); ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, srq->wr_waitp); if (ret) goto err_free_skb; attrs->attr.max_wr = rqsize - 1; if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6) srq->flags = T4_SRQ_LIMIT_SUPPORT; if (udata) { srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL); if (!srq_key_mm) { ret = -ENOMEM; goto err_free_queue; } srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL); if (!srq_db_key_mm) { ret = -ENOMEM; goto err_free_srq_key_mm; } memset(&uresp, 0, sizeof(uresp)); uresp.flags = srq->flags; uresp.qid_mask = rhp->rdev.qpmask; uresp.srqid = srq->wq.qid; uresp.srq_size = srq->wq.size; uresp.srq_memsize = srq->wq.memsize; uresp.rqt_abs_idx = srq->wq.rqt_abs_idx; spin_lock(&ucontext->mmap_lock); uresp.srq_key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.srq_db_gts_key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (ret) goto err_free_srq_db_key_mm; srq_key_mm->key = uresp.srq_key; srq_key_mm->addr = virt_to_phys(srq->wq.queue); srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize); insert_mmap(ucontext, srq_key_mm); srq_db_key_mm->key = uresp.srq_db_gts_key; srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa; srq_db_key_mm->len = PAGE_SIZE; insert_mmap(ucontext, srq_db_key_mm); } pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n", __func__, srq->wq.qid, srq->idx, srq->wq.size, (unsigned long)srq->wq.memsize, attrs->attr.max_wr); spin_lock_init(&srq->lock); return 0; err_free_srq_db_key_mm: kfree(srq_db_key_mm); err_free_srq_key_mm: kfree(srq_key_mm); err_free_queue: free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, srq->wr_waitp); err_free_skb: kfree_skb(srq->destroy_skb); err_free_srq_idx: c4iw_free_srq_idx(&rhp->rdev, srq->idx); err_free_wr_wait: c4iw_put_wr_wait(srq->wr_waitp); return ret; } int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_srq *srq; struct c4iw_ucontext *ucontext; srq = to_c4iw_srq(ibsrq); rhp = srq->rhp; pr_debug("%s id %d\n", __func__, srq->wq.qid); ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, ibucontext); free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, srq->wr_waitp); c4iw_free_srq_idx(&rhp->rdev, srq->idx); c4iw_put_wr_wait(srq->wr_waitp); return 0; }
linux-master
drivers/infiniband/hw/cxgb4/qp.c
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <linux/mman.h> #include <net/sock.h> #include "iw_cxgb4.h" static void print_tpte(struct c4iw_dev *dev, u32 stag) { int ret; struct fw_ri_tpte tpte; ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, (__be32 *)&tpte); if (ret) { dev_err(&dev->rdev.lldi.pdev->dev, "%s cxgb4_read_tpte err %d\n", __func__, ret); return; } pr_debug("stag idx 0x%x valid %d key 0x%x state %d pdid %d perm 0x%x ps %d len 0x%llx va 0x%llx\n", stag & 0xffffff00, FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)), FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)), FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)), FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)), FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)), FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)), ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); } static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe) { __be64 *p = (void *)err_cqe; dev_err(&dev->rdev.lldi.pdev->dev, "AE qpid %d opcode %d status 0x%x " "type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n", CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len), CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n", be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]), be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]), be64_to_cpu(p[6]), be64_to_cpu(p[7])); /* * Ingress WRITE and READ_RESP errors provide * the offending stag, so parse and log it. */ if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE || CQE_OPCODE(err_cqe) == FW_RI_READ_RESP)) print_tpte(dev, CQE_WRID_STAG(err_cqe)); } static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, struct c4iw_qp *qhp, struct t4_cqe *err_cqe, enum ib_event_type ib_event) { struct ib_event event; struct c4iw_qp_attributes attrs; unsigned long flag; dump_err_cqe(dev, err_cqe); if (qhp->attr.state == C4IW_QP_STATE_RTS) { attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); } event.event = ib_event; event.device = chp->ibcq.device; if (ib_event == IB_EVENT_CQ_ERR) event.element.cq = &chp->ibcq; else event.element.qp = &qhp->ibqp; if (qhp->ibqp.event_handler) (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); if (t4_clear_cq_armed(&chp->cq)) { spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); } } void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) { struct c4iw_cq *chp; struct c4iw_qp *qhp; u32 cqid; xa_lock_irq(&dev->qps); qhp = xa_load(&dev->qps, CQE_QPID(err_cqe)); if (!qhp) { pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n", CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); xa_unlock_irq(&dev->qps); goto out; } if (SQ_TYPE(err_cqe)) cqid = qhp->attr.scq; else cqid = qhp->attr.rcq; chp = get_chp(dev, cqid); if (!chp) { pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n", cqid, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); xa_unlock_irq(&dev->qps); goto out; } c4iw_qp_add_ref(&qhp->ibqp); refcount_inc(&chp->refcnt); xa_unlock_irq(&dev->qps); /* Bad incoming write */ if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) { post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR); goto done; } switch (CQE_STATUS(err_cqe)) { /* Completion Events */ case T4_ERR_SUCCESS: pr_err("AE with status 0!\n"); break; case T4_ERR_STAG: case T4_ERR_PDID: case T4_ERR_QPID: case T4_ERR_ACCESS: case T4_ERR_WRAP: case T4_ERR_BOUND: case T4_ERR_INVALIDATE_SHARED_MR: case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR); break; /* Device Fatal Errors */ case T4_ERR_ECC: case T4_ERR_ECC_PSTAG: case T4_ERR_INTERNAL_ERR: post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL); break; /* QP Fatal Errors */ case T4_ERR_OUT_OF_RQE: case T4_ERR_PBL_ADDR_BOUND: case T4_ERR_CRC: case T4_ERR_MARKER: case T4_ERR_PDU_LEN_ERR: case T4_ERR_DDP_VERSION: case T4_ERR_RDMA_VERSION: case T4_ERR_OPCODE: case T4_ERR_DDP_QUEUE_NUM: case T4_ERR_MSN: case T4_ERR_TBIT: case T4_ERR_MO: case T4_ERR_MSN_GAP: case T4_ERR_MSN_RANGE: case T4_ERR_RQE_ADDR_BOUND: case T4_ERR_IRD_OVERFLOW: post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); break; default: pr_err("Unknown T4 status 0x%x QPID 0x%x\n", CQE_STATUS(err_cqe), qhp->wq.sq.qid); post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); break; } done: c4iw_cq_rem_ref(chp); c4iw_qp_rem_ref(&qhp->ibqp); out: return; } int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) { struct c4iw_cq *chp; unsigned long flag; xa_lock_irqsave(&dev->cqs, flag); chp = xa_load(&dev->cqs, qid); if (chp) { refcount_inc(&chp->refcnt); xa_unlock_irqrestore(&dev->cqs, flag); t4_clear_cq_armed(&chp->cq); spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); c4iw_cq_rem_ref(chp); } else { pr_debug("unknown cqid 0x%x\n", qid); xa_unlock_irqrestore(&dev->cqs, flag); } return 0; }
linux-master
drivers/infiniband/hw/cxgb4/ev.c
/* * Copyright (c) 2018 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/rdma_cm.h> #include "iw_cxgb4.h" #include <rdma/restrack.h> #include <uapi/rdma/rdma_netlink.h> static int fill_sq(struct sk_buff *msg, struct t4_wq *wq) { /* WQ+SQ */ if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid)) goto err; if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed)) goto err; if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize)) goto err; if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx)) goto err; if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx)) goto err; if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx)) goto err; if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx)) goto err; if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use)) goto err; if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size)) goto err; if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags)) goto err; return 0; err: return -EMSGSIZE; } static int fill_rq(struct sk_buff *msg, struct t4_wq *wq) { /* RQ */ if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid)) goto err; if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize)) goto err; if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx)) goto err; if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx)) goto err; if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx)) goto err; if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn)) goto err; if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr)) goto err; if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size)) goto err; if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use)) goto err; if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size)) goto err; return 0; err: return -EMSGSIZE; } static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx, struct t4_swsqe *sqe) { if (rdma_nl_put_driver_u32(msg, "idx", idx)) goto err; if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode)) goto err; if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete)) goto err; if (sqe->complete && rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe))) goto err; if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled)) goto err; if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed)) goto err; return 0; err: return -EMSGSIZE; } /* * Dump the first and last pending sqes. */ static int fill_swsqes(struct sk_buff *msg, struct t4_sq *sq, u16 first_idx, struct t4_swsqe *first_sqe, u16 last_idx, struct t4_swsqe *last_sqe) { if (!first_sqe) goto out; if (fill_swsqe(msg, sq, first_idx, first_sqe)) goto err; if (!last_sqe) goto out; if (fill_swsqe(msg, sq, last_idx, last_sqe)) goto err; out: return 0; err: return -EMSGSIZE; } int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp) { struct t4_swsqe *fsp = NULL, *lsp = NULL; struct c4iw_qp *qhp = to_c4iw_qp(ibqp); u16 first_sq_idx = 0, last_sq_idx = 0; struct t4_swsqe first_sqe, last_sqe; struct nlattr *table_attr; struct t4_wq wq; /* User qp state is not available, so don't dump user qps */ if (qhp->ucontext) return 0; table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER); if (!table_attr) goto err; /* Get a consistent snapshot */ spin_lock_irq(&qhp->lock); wq = qhp->wq; /* If there are any pending sqes, copy the first and last */ if (wq.sq.cidx != wq.sq.pidx) { first_sq_idx = wq.sq.cidx; first_sqe = qhp->wq.sq.sw_sq[first_sq_idx]; fsp = &first_sqe; last_sq_idx = wq.sq.pidx; if (last_sq_idx-- == 0) last_sq_idx = wq.sq.size - 1; if (last_sq_idx != first_sq_idx) { last_sqe = qhp->wq.sq.sw_sq[last_sq_idx]; lsp = &last_sqe; } } spin_unlock_irq(&qhp->lock); if (fill_sq(msg, &wq)) goto err_cancel_table; if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp)) goto err_cancel_table; if (fill_rq(msg, &wq)) goto err_cancel_table; nla_nest_end(msg, table_attr); return 0; err_cancel_table: nla_nest_cancel(msg, table_attr); err: return -EMSGSIZE; } union union_ep { struct c4iw_listen_ep lep; struct c4iw_ep ep; }; int c4iw_fill_res_cm_id_entry(struct sk_buff *msg, struct rdma_cm_id *cm_id) { struct nlattr *table_attr; struct c4iw_ep_common *epcp; struct c4iw_listen_ep *listen_ep = NULL; struct c4iw_ep *ep = NULL; struct iw_cm_id *iw_cm_id; union union_ep *uep; iw_cm_id = rdma_iw_cm_id(cm_id); if (!iw_cm_id) return 0; epcp = (struct c4iw_ep_common *)iw_cm_id->provider_data; if (!epcp) return 0; uep = kzalloc(sizeof(*uep), GFP_KERNEL); if (!uep) return 0; table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER); if (!table_attr) goto err_free_uep; /* Get a consistent snapshot */ mutex_lock(&epcp->mutex); if (epcp->state == LISTEN) { uep->lep = *(struct c4iw_listen_ep *)epcp; mutex_unlock(&epcp->mutex); listen_ep = &uep->lep; epcp = &listen_ep->com; } else { uep->ep = *(struct c4iw_ep *)epcp; mutex_unlock(&epcp->mutex); ep = &uep->ep; epcp = &ep->com; } if (rdma_nl_put_driver_u32(msg, "state", epcp->state)) goto err_cancel_table; if (rdma_nl_put_driver_u64_hex(msg, "flags", epcp->flags)) goto err_cancel_table; if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history)) goto err_cancel_table; if (listen_ep) { if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid)) goto err_cancel_table; if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog)) goto err_cancel_table; } else { if (rdma_nl_put_driver_u32(msg, "hwtid", ep->hwtid)) goto err_cancel_table; if (rdma_nl_put_driver_u32(msg, "ord", ep->ord)) goto err_cancel_table; if (rdma_nl_put_driver_u32(msg, "ird", ep->ird)) goto err_cancel_table; if (rdma_nl_put_driver_u32(msg, "emss", ep->emss)) goto err_cancel_table; if (!ep->parent_ep && rdma_nl_put_driver_u32(msg, "atid", ep->atid)) goto err_cancel_table; } nla_nest_end(msg, table_attr); kfree(uep); return 0; err_cancel_table: nla_nest_cancel(msg, table_attr); err_free_uep: kfree(uep); return -EMSGSIZE; } static int fill_cq(struct sk_buff *msg, struct t4_cq *cq) { if (rdma_nl_put_driver_u32(msg, "cqid", cq->cqid)) goto err; if (rdma_nl_put_driver_u32(msg, "memsize", cq->memsize)) goto err; if (rdma_nl_put_driver_u32(msg, "size", cq->size)) goto err; if (rdma_nl_put_driver_u32(msg, "cidx", cq->cidx)) goto err; if (rdma_nl_put_driver_u32(msg, "cidx_inc", cq->cidx_inc)) goto err; if (rdma_nl_put_driver_u32(msg, "sw_cidx", cq->sw_cidx)) goto err; if (rdma_nl_put_driver_u32(msg, "sw_pidx", cq->sw_pidx)) goto err; if (rdma_nl_put_driver_u32(msg, "sw_in_use", cq->sw_in_use)) goto err; if (rdma_nl_put_driver_u32(msg, "vector", cq->vector)) goto err; if (rdma_nl_put_driver_u32(msg, "gen", cq->gen)) goto err; if (rdma_nl_put_driver_u32(msg, "error", cq->error)) goto err; if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts", be64_to_cpu(cq->bits_type_ts))) goto err; if (rdma_nl_put_driver_u64_hex(msg, "flags", cq->flags)) goto err; return 0; err: return -EMSGSIZE; } static int fill_cqe(struct sk_buff *msg, struct t4_cqe *cqe, u16 idx, const char *qstr) { if (rdma_nl_put_driver_u32(msg, qstr, idx)) goto err; if (rdma_nl_put_driver_u32_hex(msg, "header", be32_to_cpu(cqe->header))) goto err; if (rdma_nl_put_driver_u32(msg, "len", be32_to_cpu(cqe->len))) goto err; if (rdma_nl_put_driver_u32_hex(msg, "wrid_hi", be32_to_cpu(cqe->u.gen.wrid_hi))) goto err; if (rdma_nl_put_driver_u32_hex(msg, "wrid_low", be32_to_cpu(cqe->u.gen.wrid_low))) goto err; if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts", be64_to_cpu(cqe->bits_type_ts))) goto err; return 0; err: return -EMSGSIZE; } static int fill_hwcqes(struct sk_buff *msg, struct t4_cq *cq, struct t4_cqe *cqes) { u16 idx; idx = (cq->cidx > 0) ? cq->cidx - 1 : cq->size - 1; if (fill_cqe(msg, cqes, idx, "hwcq_idx")) goto err; idx = cq->cidx; if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx")) goto err; return 0; err: return -EMSGSIZE; } static int fill_swcqes(struct sk_buff *msg, struct t4_cq *cq, struct t4_cqe *cqes) { u16 idx; if (!cq->sw_in_use) return 0; idx = cq->sw_cidx; if (fill_cqe(msg, cqes, idx, "swcq_idx")) goto err; if (cq->sw_in_use == 1) goto out; idx = (cq->sw_pidx > 0) ? cq->sw_pidx - 1 : cq->size - 1; if (fill_cqe(msg, cqes + 1, idx, "swcq_idx")) goto err; out: return 0; err: return -EMSGSIZE; } int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq) { struct c4iw_cq *chp = to_c4iw_cq(ibcq); struct nlattr *table_attr; struct t4_cqe hwcqes[2]; struct t4_cqe swcqes[2]; struct t4_cq cq; u16 idx; /* User cq state is not available, so don't dump user cqs */ if (ibcq->uobject) return 0; table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER); if (!table_attr) goto err; /* Get a consistent snapshot */ spin_lock_irq(&chp->lock); /* t4_cq struct */ cq = chp->cq; /* get 2 hw cqes: cidx-1, and cidx */ idx = (cq.cidx > 0) ? cq.cidx - 1 : cq.size - 1; hwcqes[0] = chp->cq.queue[idx]; idx = cq.cidx; hwcqes[1] = chp->cq.queue[idx]; /* get first and last sw cqes */ if (cq.sw_in_use) { swcqes[0] = chp->cq.sw_queue[cq.sw_cidx]; if (cq.sw_in_use > 1) { idx = (cq.sw_pidx > 0) ? cq.sw_pidx - 1 : cq.size - 1; swcqes[1] = chp->cq.sw_queue[idx]; } } spin_unlock_irq(&chp->lock); if (fill_cq(msg, &cq)) goto err_cancel_table; if (fill_swcqes(msg, &cq, swcqes)) goto err_cancel_table; if (fill_hwcqes(msg, &cq, hwcqes)) goto err_cancel_table; nla_nest_end(msg, table_attr); return 0; err_cancel_table: nla_nest_cancel(msg, table_attr); err: return -EMSGSIZE; } int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr) { struct c4iw_mr *mhp = to_c4iw_mr(ibmr); struct c4iw_dev *dev = mhp->rhp; u32 stag = mhp->attr.stag; struct nlattr *table_attr; struct fw_ri_tpte tpte; int ret; if (!stag) return 0; table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER); if (!table_attr) goto err; ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, (__be32 *)&tpte); if (ret) { dev_err(&dev->rdev.lldi.pdev->dev, "%s cxgb4_read_tpte err %d\n", __func__, ret); return 0; } if (rdma_nl_put_driver_u32_hex(msg, "idx", stag >> 8)) goto err_cancel_table; if (rdma_nl_put_driver_u32(msg, "valid", FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)))) goto err_cancel_table; if (rdma_nl_put_driver_u32_hex(msg, "key", stag & 0xff)) goto err_cancel_table; if (rdma_nl_put_driver_u32(msg, "state", FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)))) goto err_cancel_table; if (rdma_nl_put_driver_u32(msg, "pdid", FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)))) goto err_cancel_table; if (rdma_nl_put_driver_u32_hex(msg, "perm", FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)))) goto err_cancel_table; if (rdma_nl_put_driver_u32(msg, "ps", FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)))) goto err_cancel_table; if (rdma_nl_put_driver_u64(msg, "len", ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo))) goto err_cancel_table; if (rdma_nl_put_driver_u32_hex(msg, "pbl_addr", FW_RI_TPTE_PBLADDR_G(ntohl(tpte.nosnoop_pbladdr)))) goto err_cancel_table; nla_nest_end(msg, table_attr); return 0; err_cancel_table: nla_nest_cancel(msg, table_attr); err: return -EMSGSIZE; }
linux-master
drivers/infiniband/hw/cxgb4/restrack.c
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/debugfs.h> #include <linux/vmalloc.h> #include <linux/math64.h> #include <rdma/ib_verbs.h> #include "iw_cxgb4.h" #define DRV_VERSION "0.1" MODULE_AUTHOR("Steve Wise"); MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver"); MODULE_LICENSE("Dual BSD/GPL"); static int allow_db_fc_on_t5; module_param(allow_db_fc_on_t5, int, 0644); MODULE_PARM_DESC(allow_db_fc_on_t5, "Allow DB Flow Control on T5 (default = 0)"); static int allow_db_coalescing_on_t5; module_param(allow_db_coalescing_on_t5, int, 0644); MODULE_PARM_DESC(allow_db_coalescing_on_t5, "Allow DB Coalescing on T5 (default = 0)"); int c4iw_wr_log = 0; module_param(c4iw_wr_log, int, 0444); MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data."); static int c4iw_wr_log_size_order = 12; module_param(c4iw_wr_log_size_order, int, 0444); MODULE_PARM_DESC(c4iw_wr_log_size_order, "Number of entries (log2) in the work request timing log."); static LIST_HEAD(uld_ctx_list); static DEFINE_MUTEX(dev_mutex); static struct workqueue_struct *reg_workq; #define DB_FC_RESUME_SIZE 64 #define DB_FC_RESUME_DELAY 1 #define DB_FC_DRAIN_THRESH 0 static struct dentry *c4iw_debugfs_root; struct c4iw_debugfs_data { struct c4iw_dev *devp; char *buf; int bufsize; int pos; }; static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct c4iw_debugfs_data *d = file->private_data; return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); } void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) { struct wr_log_entry le; int idx; if (!wq->rdev->wr_log) return; idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & (wq->rdev->wr_log_size - 1); le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); le.poll_host_time = ktime_get(); le.valid = 1; le.cqe_sge_ts = CQE_TS(cqe); if (SQ_TYPE(cqe)) { le.qid = wq->sq.qid; le.opcode = CQE_OPCODE(cqe); le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time; le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; le.wr_id = CQE_WRID_SQ_IDX(cqe); } else { le.qid = wq->rq.qid; le.opcode = FW_RI_RECEIVE; le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time; le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts; le.wr_id = CQE_WRID_MSN(cqe); } wq->rdev->wr_log[idx] = le; } static int wr_log_show(struct seq_file *seq, void *v) { struct c4iw_dev *dev = seq->private; ktime_t prev_time; struct wr_log_entry *lep; int prev_time_set = 0; int idx, end; #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000) idx = atomic_read(&dev->rdev.wr_log_idx) & (dev->rdev.wr_log_size - 1); end = idx - 1; if (end < 0) end = dev->rdev.wr_log_size - 1; lep = &dev->rdev.wr_log[idx]; while (idx != end) { if (lep->valid) { if (!prev_time_set) { prev_time_set = 1; prev_time = lep->poll_host_time; } seq_printf(seq, "%04u: nsec %llu qid %u opcode " "%u %s 0x%x host_wr_delta nsec %llu " "post_sge_ts 0x%llx cqe_sge_ts 0x%llx " "poll_sge_ts 0x%llx post_poll_delta_ns %llu " "cqe_poll_delta_ns %llu\n", idx, ktime_to_ns(ktime_sub(lep->poll_host_time, prev_time)), lep->qid, lep->opcode, lep->opcode == FW_RI_RECEIVE ? "msn" : "wrid", lep->wr_id, ktime_to_ns(ktime_sub(lep->poll_host_time, lep->post_host_time)), lep->post_sge_ts, lep->cqe_sge_ts, lep->poll_sge_ts, ts2ns(lep->poll_sge_ts - lep->post_sge_ts), ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts)); prev_time = lep->poll_host_time; } idx++; if (idx > (dev->rdev.wr_log_size - 1)) idx = 0; lep = &dev->rdev.wr_log[idx]; } #undef ts2ns return 0; } static int wr_log_open(struct inode *inode, struct file *file) { return single_open(file, wr_log_show, inode->i_private); } static ssize_t wr_log_clear(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; int i; if (dev->rdev.wr_log) for (i = 0; i < dev->rdev.wr_log_size; i++) dev->rdev.wr_log[i].valid = 0; return count; } static const struct file_operations wr_log_debugfs_fops = { .owner = THIS_MODULE, .open = wr_log_open, .release = single_release, .read = seq_read, .llseek = seq_lseek, .write = wr_log_clear, }; static struct sockaddr_in zero_sin = { .sin_family = AF_INET, }; static struct sockaddr_in6 zero_sin6 = { .sin6_family = AF_INET6, }; static void set_ep_sin_addrs(struct c4iw_ep *ep, struct sockaddr_in **lsin, struct sockaddr_in **rsin, struct sockaddr_in **m_lsin, struct sockaddr_in **m_rsin) { struct iw_cm_id *id = ep->com.cm_id; *m_lsin = (struct sockaddr_in *)&ep->com.local_addr; *m_rsin = (struct sockaddr_in *)&ep->com.remote_addr; if (id) { *lsin = (struct sockaddr_in *)&id->local_addr; *rsin = (struct sockaddr_in *)&id->remote_addr; } else { *lsin = &zero_sin; *rsin = &zero_sin; } } static void set_ep_sin6_addrs(struct c4iw_ep *ep, struct sockaddr_in6 **lsin6, struct sockaddr_in6 **rsin6, struct sockaddr_in6 **m_lsin6, struct sockaddr_in6 **m_rsin6) { struct iw_cm_id *id = ep->com.cm_id; *m_lsin6 = (struct sockaddr_in6 *)&ep->com.local_addr; *m_rsin6 = (struct sockaddr_in6 *)&ep->com.remote_addr; if (id) { *lsin6 = (struct sockaddr_in6 *)&id->local_addr; *rsin6 = (struct sockaddr_in6 *)&id->remote_addr; } else { *lsin6 = &zero_sin6; *rsin6 = &zero_sin6; } } static int dump_qp(unsigned long id, struct c4iw_qp *qp, struct c4iw_debugfs_data *qpd) { int space; int cc; if (id != qp->wq.sq.qid) return 0; space = qpd->bufsize - qpd->pos - 1; if (space == 0) return 1; if (qp->ep) { struct c4iw_ep *ep = qp->ep; if (ep->com.local_addr.ss_family == AF_INET) { struct sockaddr_in *lsin; struct sockaddr_in *rsin; struct sockaddr_in *m_lsin; struct sockaddr_in *m_rsin; set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin); cc = snprintf(qpd->buf + qpd->pos, space, "rc qp sq id %u %s id %u state %u " "onchip %u ep tid %u state %u " "%pI4:%u/%u->%pI4:%u/%u\n", qp->wq.sq.qid, qp->srq ? "srq" : "rq", qp->srq ? qp->srq->idx : qp->wq.rq.qid, (int)qp->attr.state, qp->wq.sq.flags & T4_SQ_ONCHIP, ep->hwtid, (int)ep->com.state, &lsin->sin_addr, ntohs(lsin->sin_port), ntohs(m_lsin->sin_port), &rsin->sin_addr, ntohs(rsin->sin_port), ntohs(m_rsin->sin_port)); } else { struct sockaddr_in6 *lsin6; struct sockaddr_in6 *rsin6; struct sockaddr_in6 *m_lsin6; struct sockaddr_in6 *m_rsin6; set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, &m_rsin6); cc = snprintf(qpd->buf + qpd->pos, space, "rc qp sq id %u rq id %u state %u " "onchip %u ep tid %u state %u " "%pI6:%u/%u->%pI6:%u/%u\n", qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state, qp->wq.sq.flags & T4_SQ_ONCHIP, ep->hwtid, (int)ep->com.state, &lsin6->sin6_addr, ntohs(lsin6->sin6_port), ntohs(m_lsin6->sin6_port), &rsin6->sin6_addr, ntohs(rsin6->sin6_port), ntohs(m_rsin6->sin6_port)); } } else cc = snprintf(qpd->buf + qpd->pos, space, "qp sq id %u rq id %u state %u onchip %u\n", qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state, qp->wq.sq.flags & T4_SQ_ONCHIP); if (cc < space) qpd->pos += cc; return 0; } static int qp_release(struct inode *inode, struct file *file) { struct c4iw_debugfs_data *qpd = file->private_data; if (!qpd) { pr_info("%s null qpd?\n", __func__); return 0; } vfree(qpd->buf); kfree(qpd); return 0; } static int qp_open(struct inode *inode, struct file *file) { struct c4iw_qp *qp; struct c4iw_debugfs_data *qpd; unsigned long index; int count = 1; qpd = kmalloc(sizeof(*qpd), GFP_KERNEL); if (!qpd) return -ENOMEM; qpd->devp = inode->i_private; qpd->pos = 0; /* * No need to lock; we drop the lock to call vmalloc so it's racy * anyway. Someone who cares should switch this over to seq_file */ xa_for_each(&qpd->devp->qps, index, qp) count++; qpd->bufsize = count * 180; qpd->buf = vmalloc(qpd->bufsize); if (!qpd->buf) { kfree(qpd); return -ENOMEM; } xa_lock_irq(&qpd->devp->qps); xa_for_each(&qpd->devp->qps, index, qp) dump_qp(index, qp, qpd); xa_unlock_irq(&qpd->devp->qps); qpd->buf[qpd->pos++] = 0; file->private_data = qpd; return 0; } static const struct file_operations qp_debugfs_fops = { .owner = THIS_MODULE, .open = qp_open, .release = qp_release, .read = debugfs_read, .llseek = default_llseek, }; static int dump_stag(unsigned long id, struct c4iw_debugfs_data *stagd) { int space; int cc; struct fw_ri_tpte tpte; int ret; space = stagd->bufsize - stagd->pos - 1; if (space == 0) return 1; ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8, (__be32 *)&tpte); if (ret) { dev_err(&stagd->devp->rdev.lldi.pdev->dev, "%s cxgb4_read_tpte err %d\n", __func__, ret); return ret; } cc = snprintf(stagd->buf + stagd->pos, space, "stag: idx 0x%x valid %d key 0x%x state %d pdid %d " "perm 0x%x ps %d len 0x%llx va 0x%llx\n", (u32)id<<8, FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)), FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)), FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)), FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)), FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)), FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)), ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); if (cc < space) stagd->pos += cc; return 0; } static int stag_release(struct inode *inode, struct file *file) { struct c4iw_debugfs_data *stagd = file->private_data; if (!stagd) { pr_info("%s null stagd?\n", __func__); return 0; } vfree(stagd->buf); kfree(stagd); return 0; } static int stag_open(struct inode *inode, struct file *file) { struct c4iw_debugfs_data *stagd; void *p; unsigned long index; int ret = 0; int count = 1; stagd = kmalloc(sizeof(*stagd), GFP_KERNEL); if (!stagd) { ret = -ENOMEM; goto out; } stagd->devp = inode->i_private; stagd->pos = 0; xa_for_each(&stagd->devp->mrs, index, p) count++; stagd->bufsize = count * 256; stagd->buf = vmalloc(stagd->bufsize); if (!stagd->buf) { ret = -ENOMEM; goto err1; } xa_lock_irq(&stagd->devp->mrs); xa_for_each(&stagd->devp->mrs, index, p) dump_stag(index, stagd); xa_unlock_irq(&stagd->devp->mrs); stagd->buf[stagd->pos++] = 0; file->private_data = stagd; goto out; err1: kfree(stagd); out: return ret; } static const struct file_operations stag_debugfs_fops = { .owner = THIS_MODULE, .open = stag_open, .release = stag_release, .read = debugfs_read, .llseek = default_llseek, }; static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"}; static int stats_show(struct seq_file *seq, void *v) { struct c4iw_dev *dev = seq->private; seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current", "Max", "Fail"); seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n", dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur, dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail); seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n", dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur, dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail); seq_printf(seq, " SRQS: %10llu %10llu %10llu %10llu\n", dev->rdev.stats.srqt.total, dev->rdev.stats.srqt.cur, dev->rdev.stats.srqt.max, dev->rdev.stats.srqt.fail); seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n", dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur, dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail); seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n", dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur, dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail); seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n", dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur, dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail); seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n", dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur, dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail); seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full); seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty); seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop); seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n", db_state_str[dev->db_state], dev->rdev.stats.db_state_transitions, dev->rdev.stats.db_fc_interruptions); seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full); seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n", dev->rdev.stats.act_ofld_conn_fails); seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", dev->rdev.stats.pas_ofld_conn_fails); seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv); seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); return 0; } static int stats_open(struct inode *inode, struct file *file) { return single_open(file, stats_show, inode->i_private); } static ssize_t stats_clear(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; mutex_lock(&dev->rdev.stats.lock); dev->rdev.stats.pd.max = 0; dev->rdev.stats.pd.fail = 0; dev->rdev.stats.qid.max = 0; dev->rdev.stats.qid.fail = 0; dev->rdev.stats.stag.max = 0; dev->rdev.stats.stag.fail = 0; dev->rdev.stats.pbl.max = 0; dev->rdev.stats.pbl.fail = 0; dev->rdev.stats.rqt.max = 0; dev->rdev.stats.rqt.fail = 0; dev->rdev.stats.rqt.max = 0; dev->rdev.stats.rqt.fail = 0; dev->rdev.stats.ocqp.max = 0; dev->rdev.stats.ocqp.fail = 0; dev->rdev.stats.db_full = 0; dev->rdev.stats.db_empty = 0; dev->rdev.stats.db_drop = 0; dev->rdev.stats.db_state_transitions = 0; dev->rdev.stats.tcam_full = 0; dev->rdev.stats.act_ofld_conn_fails = 0; dev->rdev.stats.pas_ofld_conn_fails = 0; mutex_unlock(&dev->rdev.stats.lock); return count; } static const struct file_operations stats_debugfs_fops = { .owner = THIS_MODULE, .open = stats_open, .release = single_release, .read = seq_read, .llseek = seq_lseek, .write = stats_clear, }; static int dump_ep(struct c4iw_ep *ep, struct c4iw_debugfs_data *epd) { int space; int cc; space = epd->bufsize - epd->pos - 1; if (space == 0) return 1; if (ep->com.local_addr.ss_family == AF_INET) { struct sockaddr_in *lsin; struct sockaddr_in *rsin; struct sockaddr_in *m_lsin; struct sockaddr_in *m_rsin; set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin); cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p qp %p state %d flags 0x%lx " "history 0x%lx hwtid %d atid %d " "conn_na %u abort_na %u " "%pI4:%d/%d <-> %pI4:%d/%d\n", ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state, ep->com.flags, ep->com.history, ep->hwtid, ep->atid, ep->stats.connect_neg_adv, ep->stats.abort_neg_adv, &lsin->sin_addr, ntohs(lsin->sin_port), ntohs(m_lsin->sin_port), &rsin->sin_addr, ntohs(rsin->sin_port), ntohs(m_rsin->sin_port)); } else { struct sockaddr_in6 *lsin6; struct sockaddr_in6 *rsin6; struct sockaddr_in6 *m_lsin6; struct sockaddr_in6 *m_rsin6; set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, &m_rsin6); cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p qp %p state %d flags 0x%lx " "history 0x%lx hwtid %d atid %d " "conn_na %u abort_na %u " "%pI6:%d/%d <-> %pI6:%d/%d\n", ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state, ep->com.flags, ep->com.history, ep->hwtid, ep->atid, ep->stats.connect_neg_adv, ep->stats.abort_neg_adv, &lsin6->sin6_addr, ntohs(lsin6->sin6_port), ntohs(m_lsin6->sin6_port), &rsin6->sin6_addr, ntohs(rsin6->sin6_port), ntohs(m_rsin6->sin6_port)); } if (cc < space) epd->pos += cc; return 0; } static int dump_listen_ep(struct c4iw_listen_ep *ep, struct c4iw_debugfs_data *epd) { int space; int cc; space = epd->bufsize - epd->pos - 1; if (space == 0) return 1; if (ep->com.local_addr.ss_family == AF_INET) { struct sockaddr_in *lsin = (struct sockaddr_in *) &ep->com.cm_id->local_addr; struct sockaddr_in *m_lsin = (struct sockaddr_in *) &ep->com.cm_id->m_local_addr; cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p state %d flags 0x%lx stid %d " "backlog %d %pI4:%d/%d\n", ep, ep->com.cm_id, (int)ep->com.state, ep->com.flags, ep->stid, ep->backlog, &lsin->sin_addr, ntohs(lsin->sin_port), ntohs(m_lsin->sin_port)); } else { struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) &ep->com.cm_id->local_addr; struct sockaddr_in6 *m_lsin6 = (struct sockaddr_in6 *) &ep->com.cm_id->m_local_addr; cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p state %d flags 0x%lx stid %d " "backlog %d %pI6:%d/%d\n", ep, ep->com.cm_id, (int)ep->com.state, ep->com.flags, ep->stid, ep->backlog, &lsin6->sin6_addr, ntohs(lsin6->sin6_port), ntohs(m_lsin6->sin6_port)); } if (cc < space) epd->pos += cc; return 0; } static int ep_release(struct inode *inode, struct file *file) { struct c4iw_debugfs_data *epd = file->private_data; if (!epd) { pr_info("%s null qpd?\n", __func__); return 0; } vfree(epd->buf); kfree(epd); return 0; } static int ep_open(struct inode *inode, struct file *file) { struct c4iw_ep *ep; struct c4iw_listen_ep *lep; unsigned long index; struct c4iw_debugfs_data *epd; int ret = 0; int count = 1; epd = kmalloc(sizeof(*epd), GFP_KERNEL); if (!epd) { ret = -ENOMEM; goto out; } epd->devp = inode->i_private; epd->pos = 0; xa_for_each(&epd->devp->hwtids, index, ep) count++; xa_for_each(&epd->devp->atids, index, ep) count++; xa_for_each(&epd->devp->stids, index, lep) count++; epd->bufsize = count * 240; epd->buf = vmalloc(epd->bufsize); if (!epd->buf) { ret = -ENOMEM; goto err1; } xa_lock_irq(&epd->devp->hwtids); xa_for_each(&epd->devp->hwtids, index, ep) dump_ep(ep, epd); xa_unlock_irq(&epd->devp->hwtids); xa_lock_irq(&epd->devp->atids); xa_for_each(&epd->devp->atids, index, ep) dump_ep(ep, epd); xa_unlock_irq(&epd->devp->atids); xa_lock_irq(&epd->devp->stids); xa_for_each(&epd->devp->stids, index, lep) dump_listen_ep(lep, epd); xa_unlock_irq(&epd->devp->stids); file->private_data = epd; goto out; err1: kfree(epd); out: return ret; } static const struct file_operations ep_debugfs_fops = { .owner = THIS_MODULE, .open = ep_open, .release = ep_release, .read = debugfs_read, }; static void setup_debugfs(struct c4iw_dev *devp) { debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root, (void *)devp, &qp_debugfs_fops, 4096); debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root, (void *)devp, &stag_debugfs_fops, 4096); debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root, (void *)devp, &stats_debugfs_fops, 4096); debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root, (void *)devp, &ep_debugfs_fops, 4096); if (c4iw_wr_log) debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root, (void *)devp, &wr_log_debugfs_fops, 4096); } void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct list_head *pos, *nxt; struct c4iw_qid_list *entry; mutex_lock(&uctx->lock); list_for_each_safe(pos, nxt, &uctx->qpids) { entry = list_entry(pos, struct c4iw_qid_list, entry); list_del_init(&entry->entry); if (!(entry->qid & rdev->qpmask)) { c4iw_put_resource(&rdev->resource.qid_table, entry->qid); mutex_lock(&rdev->stats.lock); rdev->stats.qid.cur -= rdev->qpmask + 1; mutex_unlock(&rdev->stats.lock); } kfree(entry); } list_for_each_safe(pos, nxt, &uctx->cqids) { entry = list_entry(pos, struct c4iw_qid_list, entry); list_del_init(&entry->entry); kfree(entry); } mutex_unlock(&uctx->lock); } void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { INIT_LIST_HEAD(&uctx->qpids); INIT_LIST_HEAD(&uctx->cqids); mutex_init(&uctx->lock); } /* Caller takes care of locking if needed */ static int c4iw_rdev_open(struct c4iw_rdev *rdev) { int err; unsigned int factor; c4iw_init_dev_ucontext(rdev, &rdev->uctx); /* * This implementation assumes udb_density == ucq_density! Eventually * we might need to support this but for now fail the open. Also the * cqid and qpid range must match for now. */ if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { pr_err("%s: unsupported udb/ucq densities %u/%u\n", pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, rdev->lldi.ucq_density); return -EINVAL; } if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n", pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, rdev->lldi.vr->cq.size); return -EINVAL; } /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */ if (rdev->lldi.sge_host_page_size > PAGE_SIZE) { pr_err("%s: unsupported sge host page size %u\n", pci_name(rdev->lldi.pdev), rdev->lldi.sge_host_page_size); return -EINVAL; } factor = PAGE_SIZE / rdev->lldi.sge_host_page_size; rdev->qpmask = (rdev->lldi.udb_density * factor) - 1; rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1; pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n", pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), rdev->lldi.vr->pbl.start, rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, rdev->lldi.vr->rq.size, rdev->lldi.vr->qp.start, rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.start, rdev->lldi.vr->cq.size, rdev->lldi.vr->srq.size); pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n", &rdev->lldi.pdev->resource[2], rdev->lldi.db_reg, rdev->lldi.gts_reg, rdev->qpmask, rdev->cqmask); if (c4iw_num_stags(rdev) == 0) return -EINVAL; rdev->stats.pd.total = T4_MAX_NUM_PD; rdev->stats.stag.total = rdev->lldi.vr->stag.size; rdev->stats.pbl.total = rdev->lldi.vr->pbl.size; rdev->stats.rqt.total = rdev->lldi.vr->rq.size; rdev->stats.srqt.total = rdev->lldi.vr->srq.size; rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size; rdev->stats.qid.total = rdev->lldi.vr->qp.size; err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD, rdev->lldi.vr->srq.size); if (err) { pr_err("error %d initializing resources\n", err); return err; } err = c4iw_pblpool_create(rdev); if (err) { pr_err("error %d initializing pbl pool\n", err); goto destroy_resource; } err = c4iw_rqtpool_create(rdev); if (err) { pr_err("error %d initializing rqt pool\n", err); goto destroy_pblpool; } err = c4iw_ocqp_pool_create(rdev); if (err) { pr_err("error %d initializing ocqp pool\n", err); goto destroy_rqtpool; } rdev->status_page = (struct t4_dev_status_page *) __get_free_page(GFP_KERNEL); if (!rdev->status_page) { err = -ENOMEM; goto destroy_ocqp_pool; } rdev->status_page->qp_start = rdev->lldi.vr->qp.start; rdev->status_page->qp_size = rdev->lldi.vr->qp.size; rdev->status_page->cq_start = rdev->lldi.vr->cq.start; rdev->status_page->cq_size = rdev->lldi.vr->cq.size; rdev->status_page->write_cmpl_supported = rdev->lldi.write_cmpl_support; if (c4iw_wr_log) { rdev->wr_log = kcalloc(1 << c4iw_wr_log_size_order, sizeof(*rdev->wr_log), GFP_KERNEL); if (rdev->wr_log) { rdev->wr_log_size = 1 << c4iw_wr_log_size_order; atomic_set(&rdev->wr_log_idx, 0); } } rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); if (!rdev->free_workq) { err = -ENOMEM; goto err_free_status_page_and_wr_log; } rdev->status_page->db_off = 0; init_completion(&rdev->rqt_compl); init_completion(&rdev->pbl_compl); kref_init(&rdev->rqt_kref); kref_init(&rdev->pbl_kref); return 0; err_free_status_page_and_wr_log: if (c4iw_wr_log && rdev->wr_log) kfree(rdev->wr_log); free_page((unsigned long)rdev->status_page); destroy_ocqp_pool: c4iw_ocqp_pool_destroy(rdev); destroy_rqtpool: c4iw_rqtpool_destroy(rdev); destroy_pblpool: c4iw_pblpool_destroy(rdev); destroy_resource: c4iw_destroy_resource(&rdev->resource); return err; } static void c4iw_rdev_close(struct c4iw_rdev *rdev) { kfree(rdev->wr_log); c4iw_release_dev_ucontext(rdev, &rdev->uctx); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); wait_for_completion(&rdev->pbl_compl); wait_for_completion(&rdev->rqt_compl); c4iw_ocqp_pool_destroy(rdev); destroy_workqueue(rdev->free_workq); c4iw_destroy_resource(&rdev->resource); } void c4iw_dealloc(struct uld_ctx *ctx) { c4iw_rdev_close(&ctx->dev->rdev); WARN_ON(!xa_empty(&ctx->dev->cqs)); WARN_ON(!xa_empty(&ctx->dev->qps)); WARN_ON(!xa_empty(&ctx->dev->mrs)); wait_event(ctx->dev->wait, xa_empty(&ctx->dev->hwtids)); WARN_ON(!xa_empty(&ctx->dev->stids)); WARN_ON(!xa_empty(&ctx->dev->atids)); if (ctx->dev->rdev.bar2_kva) iounmap(ctx->dev->rdev.bar2_kva); if (ctx->dev->rdev.oc_mw_kva) iounmap(ctx->dev->rdev.oc_mw_kva); ib_dealloc_device(&ctx->dev->ibdev); ctx->dev = NULL; } static void c4iw_remove(struct uld_ctx *ctx) { pr_debug("c4iw_dev %p\n", ctx->dev); debugfs_remove_recursive(ctx->dev->debugfs_root); c4iw_unregister_device(ctx->dev); c4iw_dealloc(ctx); } static int rdma_supported(const struct cxgb4_lld_info *infop) { return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && infop->vr->cq.size > 0; } static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) { struct c4iw_dev *devp; int ret; if (!rdma_supported(infop)) { pr_info("%s: RDMA not supported on this device\n", pci_name(infop->pdev)); return ERR_PTR(-ENOSYS); } if (!ocqp_supported(infop)) pr_info("%s: On-Chip Queues not supported on this device\n", pci_name(infop->pdev)); devp = ib_alloc_device(c4iw_dev, ibdev); if (!devp) { pr_err("Cannot allocate ib device\n"); return ERR_PTR(-ENOMEM); } devp->rdev.lldi = *infop; /* init various hw-queue params based on lld info */ pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n", devp->rdev.lldi.sge_ingpadboundary, devp->rdev.lldi.sge_egrstatuspagesize); devp->rdev.hw_queue.t4_eq_status_entries = devp->rdev.lldi.sge_egrstatuspagesize / 64; devp->rdev.hw_queue.t4_max_eq_size = 65520; devp->rdev.hw_queue.t4_max_iq_size = 65520; devp->rdev.hw_queue.t4_max_rq_size = 8192 - devp->rdev.hw_queue.t4_eq_status_entries - 1; devp->rdev.hw_queue.t4_max_sq_size = devp->rdev.hw_queue.t4_max_eq_size - devp->rdev.hw_queue.t4_eq_status_entries - 1; devp->rdev.hw_queue.t4_max_qp_depth = devp->rdev.hw_queue.t4_max_rq_size; devp->rdev.hw_queue.t4_max_cq_depth = devp->rdev.hw_queue.t4_max_iq_size - 2; devp->rdev.hw_queue.t4_stat_len = devp->rdev.lldi.sge_egrstatuspagesize; /* * For T5/T6 devices, we map all of BAR2 with WC. * For T4 devices with onchip qp mem, we map only that part * of BAR2 with WC. */ devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2); if (!is_t4(devp->rdev.lldi.adapter_type)) { devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa, pci_resource_len(devp->rdev.lldi.pdev, 2)); if (!devp->rdev.bar2_kva) { pr_err("Unable to ioremap BAR2\n"); ib_dealloc_device(&devp->ibdev); return ERR_PTR(-EINVAL); } } else if (ocqp_supported(infop)) { devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) + pci_resource_len(devp->rdev.lldi.pdev, 2) - roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size); devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, devp->rdev.lldi.vr->ocq.size); if (!devp->rdev.oc_mw_kva) { pr_err("Unable to ioremap onchip mem\n"); ib_dealloc_device(&devp->ibdev); return ERR_PTR(-EINVAL); } } pr_debug("ocq memory: hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); ret = c4iw_rdev_open(&devp->rdev); if (ret) { pr_err("Unable to open CXIO rdev err %d\n", ret); ib_dealloc_device(&devp->ibdev); return ERR_PTR(ret); } xa_init_flags(&devp->cqs, XA_FLAGS_LOCK_IRQ); xa_init_flags(&devp->qps, XA_FLAGS_LOCK_IRQ); xa_init_flags(&devp->mrs, XA_FLAGS_LOCK_IRQ); xa_init_flags(&devp->hwtids, XA_FLAGS_LOCK_IRQ); xa_init_flags(&devp->atids, XA_FLAGS_LOCK_IRQ); xa_init_flags(&devp->stids, XA_FLAGS_LOCK_IRQ); mutex_init(&devp->rdev.stats.lock); mutex_init(&devp->db_mutex); INIT_LIST_HEAD(&devp->db_fc_list); init_waitqueue_head(&devp->wait); devp->avail_ird = devp->rdev.lldi.max_ird_adapter; if (c4iw_debugfs_root) { devp->debugfs_root = debugfs_create_dir( pci_name(devp->rdev.lldi.pdev), c4iw_debugfs_root); setup_debugfs(devp); } return devp; } static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) { struct uld_ctx *ctx; static int vers_printed; int i; if (!vers_printed++) pr_info("Chelsio T4/T5 RDMA Driver - version %s\n", DRV_VERSION); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { ctx = ERR_PTR(-ENOMEM); goto out; } ctx->lldi = *infop; pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n", pci_name(ctx->lldi.pdev), ctx->lldi.nchan, ctx->lldi.nrxq, ctx->lldi.ntxq, ctx->lldi.nports); mutex_lock(&dev_mutex); list_add_tail(&ctx->entry, &uld_ctx_list); mutex_unlock(&dev_mutex); for (i = 0; i < ctx->lldi.nrxq; i++) pr_debug("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); out: return ctx; } static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, const __be64 *rsp, u32 pktshift) { struct sk_buff *skb; /* * Allocate space for cpl_pass_accept_req which will be synthesized by * driver. Once the driver synthesizes the request the skb will go * through the regular cpl_pass_accept_req processing. * The math here assumes sizeof cpl_pass_accept_req >= sizeof * cpl_rx_pkt. */ skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + sizeof(struct rss_header) - pktshift, GFP_ATOMIC); if (unlikely(!skb)) return NULL; __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + sizeof(struct rss_header) - pktshift); /* * This skb will contain: * rss_header from the rspq descriptor (1 flit) * cpl_rx_pkt struct from the rspq descriptor (2 flits) * space for the difference between the size of an * rx_pkt and pass_accept_req cpl (1 flit) * the packet data from the gl */ skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) + sizeof(struct rss_header)); skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) + sizeof(struct cpl_pass_accept_req), gl->va + pktshift, gl->tot_len - pktshift); return skb; } static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, const __be64 *rsp) { unsigned int opcode = *(u8 *)rsp; struct sk_buff *skb; if (opcode != CPL_RX_PKT) goto out; skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); if (skb == NULL) goto out; if (c4iw_handlers[opcode] == NULL) { pr_info("%s no handler opcode 0x%x...\n", __func__, opcode); kfree_skb(skb); goto out; } c4iw_handlers[opcode](dev, skb); return 1; out: return 0; } static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *gl) { struct uld_ctx *ctx = handle; struct c4iw_dev *dev = ctx->dev; struct sk_buff *skb; u8 opcode; if (gl == NULL) { /* omit RSS and rsp_ctrl at end of descriptor */ unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; skb = alloc_skb(256, GFP_ATOMIC); if (!skb) goto nomem; __skb_put(skb, len); skb_copy_to_linear_data(skb, &rsp[1], len); } else if (gl == CXGB4_MSG_AN) { const struct rsp_ctrl *rc = (void *)rsp; u32 qid = be32_to_cpu(rc->pldbuflen_qid); c4iw_ev_handler(dev, qid); return 0; } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) { if (recv_rx_pkt(dev, gl, rsp)) return 0; pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n", pci_name(ctx->lldi.pdev), gl->va, be64_to_cpu(*rsp), be64_to_cpu(*(__force __be64 *)gl->va), gl->tot_len); return 0; } else { skb = cxgb4_pktgl_to_skb(gl, 128, 128); if (unlikely(!skb)) goto nomem; } opcode = *(u8 *)rsp; if (c4iw_handlers[opcode]) { c4iw_handlers[opcode](dev, skb); } else { pr_info("%s no handler opcode 0x%x...\n", __func__, opcode); kfree_skb(skb); } return 0; nomem: return -1; } static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) { struct uld_ctx *ctx = handle; pr_debug("new_state %u\n", new_state); switch (new_state) { case CXGB4_STATE_UP: pr_info("%s: Up\n", pci_name(ctx->lldi.pdev)); if (!ctx->dev) { ctx->dev = c4iw_alloc(&ctx->lldi); if (IS_ERR(ctx->dev)) { pr_err("%s: initialization failed: %ld\n", pci_name(ctx->lldi.pdev), PTR_ERR(ctx->dev)); ctx->dev = NULL; break; } INIT_WORK(&ctx->reg_work, c4iw_register_device); queue_work(reg_workq, &ctx->reg_work); } break; case CXGB4_STATE_DOWN: pr_info("%s: Down\n", pci_name(ctx->lldi.pdev)); if (ctx->dev) c4iw_remove(ctx); break; case CXGB4_STATE_FATAL_ERROR: case CXGB4_STATE_START_RECOVERY: pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev)); if (ctx->dev) { struct ib_event event = {}; ctx->dev->rdev.flags |= T4_FATAL_ERROR; event.event = IB_EVENT_DEVICE_FATAL; event.device = &ctx->dev->ibdev; ib_dispatch_event(&event); c4iw_remove(ctx); } break; case CXGB4_STATE_DETACH: pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev)); if (ctx->dev) c4iw_remove(ctx); break; } return 0; } static void stop_queues(struct uld_ctx *ctx) { struct c4iw_qp *qp; unsigned long index, flags; xa_lock_irqsave(&ctx->dev->qps, flags); ctx->dev->rdev.stats.db_state_transitions++; ctx->dev->db_state = STOPPED; if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) { xa_for_each(&ctx->dev->qps, index, qp) t4_disable_wq_db(&qp->wq); } else { ctx->dev->rdev.status_page->db_off = 1; } xa_unlock_irqrestore(&ctx->dev->qps, flags); } static void resume_rc_qp(struct c4iw_qp *qp) { spin_lock(&qp->lock); t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL); qp->wq.sq.wq_pidx_inc = 0; t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL); qp->wq.rq.wq_pidx_inc = 0; spin_unlock(&qp->lock); } static void resume_a_chunk(struct uld_ctx *ctx) { int i; struct c4iw_qp *qp; for (i = 0; i < DB_FC_RESUME_SIZE; i++) { qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp, db_fc_entry); list_del_init(&qp->db_fc_entry); resume_rc_qp(qp); if (list_empty(&ctx->dev->db_fc_list)) break; } } static void resume_queues(struct uld_ctx *ctx) { xa_lock_irq(&ctx->dev->qps); if (ctx->dev->db_state != STOPPED) goto out; ctx->dev->db_state = FLOW_CONTROL; while (1) { if (list_empty(&ctx->dev->db_fc_list)) { struct c4iw_qp *qp; unsigned long index; WARN_ON(ctx->dev->db_state != FLOW_CONTROL); ctx->dev->db_state = NORMAL; ctx->dev->rdev.stats.db_state_transitions++; if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) { xa_for_each(&ctx->dev->qps, index, qp) t4_enable_wq_db(&qp->wq); } else { ctx->dev->rdev.status_page->db_off = 0; } break; } else { if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) < (ctx->dev->rdev.lldi.dbfifo_int_thresh << DB_FC_DRAIN_THRESH)) { resume_a_chunk(ctx); } if (!list_empty(&ctx->dev->db_fc_list)) { xa_unlock_irq(&ctx->dev->qps); if (DB_FC_RESUME_DELAY) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(DB_FC_RESUME_DELAY); } xa_lock_irq(&ctx->dev->qps); if (ctx->dev->db_state != FLOW_CONTROL) break; } } } out: if (ctx->dev->db_state != NORMAL) ctx->dev->rdev.stats.db_fc_interruptions++; xa_unlock_irq(&ctx->dev->qps); } struct qp_list { unsigned idx; struct c4iw_qp **qps; }; static void deref_qps(struct qp_list *qp_list) { int idx; for (idx = 0; idx < qp_list->idx; idx++) c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp); } static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) { int idx; int ret; for (idx = 0; idx < qp_list->idx; idx++) { struct c4iw_qp *qp = qp_list->qps[idx]; xa_lock_irq(&qp->rhp->qps); spin_lock(&qp->lock); ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], qp->wq.sq.qid, t4_sq_host_wq_pidx(&qp->wq), t4_sq_wq_size(&qp->wq)); if (ret) { pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n", pci_name(ctx->lldi.pdev), qp->wq.sq.qid); spin_unlock(&qp->lock); xa_unlock_irq(&qp->rhp->qps); return; } qp->wq.sq.wq_pidx_inc = 0; ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], qp->wq.rq.qid, t4_rq_host_wq_pidx(&qp->wq), t4_rq_wq_size(&qp->wq)); if (ret) { pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n", pci_name(ctx->lldi.pdev), qp->wq.rq.qid); spin_unlock(&qp->lock); xa_unlock_irq(&qp->rhp->qps); return; } qp->wq.rq.wq_pidx_inc = 0; spin_unlock(&qp->lock); xa_unlock_irq(&qp->rhp->qps); /* Wait for the dbfifo to drain */ while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(usecs_to_jiffies(10)); } } } static void recover_queues(struct uld_ctx *ctx) { struct c4iw_qp *qp; unsigned long index; int count = 0; struct qp_list qp_list; int ret; /* slow everybody down */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(usecs_to_jiffies(1000)); /* flush the SGE contexts */ ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]); if (ret) { pr_err("%s: Fatal error - DB overflow recovery failed\n", pci_name(ctx->lldi.pdev)); return; } /* Count active queues so we can build a list of queues to recover */ xa_lock_irq(&ctx->dev->qps); WARN_ON(ctx->dev->db_state != STOPPED); ctx->dev->db_state = RECOVERY; xa_for_each(&ctx->dev->qps, index, qp) count++; qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC); if (!qp_list.qps) { xa_unlock_irq(&ctx->dev->qps); return; } qp_list.idx = 0; /* add and ref each qp so it doesn't get freed */ xa_for_each(&ctx->dev->qps, index, qp) { c4iw_qp_add_ref(&qp->ibqp); qp_list.qps[qp_list.idx++] = qp; } xa_unlock_irq(&ctx->dev->qps); /* now traverse the list in a safe context to recover the db state*/ recover_lost_dbs(ctx, &qp_list); /* we're almost done! deref the qps and clean up */ deref_qps(&qp_list); kfree(qp_list.qps); xa_lock_irq(&ctx->dev->qps); WARN_ON(ctx->dev->db_state != RECOVERY); ctx->dev->db_state = STOPPED; xa_unlock_irq(&ctx->dev->qps); } static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) { struct uld_ctx *ctx = handle; switch (control) { case CXGB4_CONTROL_DB_FULL: stop_queues(ctx); ctx->dev->rdev.stats.db_full++; break; case CXGB4_CONTROL_DB_EMPTY: resume_queues(ctx); mutex_lock(&ctx->dev->rdev.stats.lock); ctx->dev->rdev.stats.db_empty++; mutex_unlock(&ctx->dev->rdev.stats.lock); break; case CXGB4_CONTROL_DB_DROP: recover_queues(ctx); mutex_lock(&ctx->dev->rdev.stats.lock); ctx->dev->rdev.stats.db_drop++; mutex_unlock(&ctx->dev->rdev.stats.lock); break; default: pr_warn("%s: unknown control cmd %u\n", pci_name(ctx->lldi.pdev), control); break; } return 0; } static struct cxgb4_uld_info c4iw_uld_info = { .name = DRV_NAME, .nrxq = MAX_ULD_QSETS, .ntxq = MAX_ULD_QSETS, .rxq_size = 511, .ciq = true, .lro = false, .add = c4iw_uld_add, .rx_handler = c4iw_uld_rx_handler, .state_change = c4iw_uld_state_change, .control = c4iw_uld_control, }; void _c4iw_free_wr_wait(struct kref *kref) { struct c4iw_wr_wait *wr_waitp; wr_waitp = container_of(kref, struct c4iw_wr_wait, kref); pr_debug("Free wr_wait %p\n", wr_waitp); kfree(wr_waitp); } struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp) { struct c4iw_wr_wait *wr_waitp; wr_waitp = kzalloc(sizeof(*wr_waitp), gfp); if (wr_waitp) { kref_init(&wr_waitp->kref); pr_debug("wr_wait %p\n", wr_waitp); } return wr_waitp; } static int __init c4iw_init_module(void) { int err; err = c4iw_cm_init(); if (err) return err; c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); reg_workq = create_singlethread_workqueue("Register_iWARP_device"); if (!reg_workq) { pr_err("Failed creating workqueue to register iwarp device\n"); return -ENOMEM; } cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); return 0; } static void __exit c4iw_exit_module(void) { struct uld_ctx *ctx, *tmp; mutex_lock(&dev_mutex); list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { if (ctx->dev) c4iw_remove(ctx); kfree(ctx); } mutex_unlock(&dev_mutex); destroy_workqueue(reg_workq); cxgb4_unregister_uld(CXGB4_ULD_RDMA); c4iw_cm_term(); debugfs_remove_recursive(c4iw_debugfs_root); } module_init(c4iw_init_module); module_exit(c4iw_exit_module);
linux-master
drivers/infiniband/hw/cxgb4/device.c
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/uverbs_ioctl.h> #include "iw_cxgb4.h" static void destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, struct c4iw_dev_ucontext *uctx, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) { struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; wr_len = sizeof(*res_wr) + sizeof(*res); set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (uintptr_t)wr_waitp; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_RESET; res->u.cq.iqid = cpu_to_be32(cq->cqid); c4iw_init_wr_wait(wr_waitp); c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); kfree(cq->sw_queue); dma_free_coherent(&(rdev->lldi.pdev->dev), cq->memsize, cq->queue, dma_unmap_addr(cq, mapping)); c4iw_put_cqid(rdev, cq->cqid, uctx); } static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp) { struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; int user = (uctx != &rdev->uctx); int ret; struct sk_buff *skb; struct c4iw_ucontext *ucontext = NULL; if (user) ucontext = container_of(uctx, struct c4iw_ucontext, uctx); cq->cqid = c4iw_get_cqid(rdev, uctx); if (!cq->cqid) { ret = -ENOMEM; goto err1; } if (!user) { cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); if (!cq->sw_queue) { ret = -ENOMEM; goto err2; } } cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, &cq->dma_addr, GFP_KERNEL); if (!cq->queue) { ret = -ENOMEM; goto err3; } dma_unmap_addr_set(cq, mapping, cq->dma_addr); if (user && ucontext->is_32b_cqe) { cq->qp_errp = &((struct t4_status_page *) ((u8 *)cq->queue + (cq->size - 1) * (sizeof(*cq->queue) / 2)))->qp_err; } else { cq->qp_errp = &((struct t4_status_page *) ((u8 *)cq->queue + (cq->size - 1) * sizeof(*cq->queue)))->qp_err; } /* build fw_ri_res_wr */ wr_len = sizeof(*res_wr) + sizeof(*res); skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto err4; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (uintptr_t)wr_waitp; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_WRITE; res->u.cq.iqid = cpu_to_be32(cq->cqid); res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( FW_RI_RES_WR_IQANUS_V(0) | FW_RI_RES_WR_IQANUD_V(1) | FW_RI_RES_WR_IQANDST_F | FW_RI_RES_WR_IQANDSTINDEX_V( rdev->lldi.ciq_ids[cq->vector])); res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( FW_RI_RES_WR_IQDROPRSS_F | FW_RI_RES_WR_IQPCIECH_V(2) | FW_RI_RES_WR_IQINTCNTTHRESH_V(0) | FW_RI_RES_WR_IQO_F | ((user && ucontext->is_32b_cqe) ? FW_RI_RES_WR_IQESIZE_V(1) : FW_RI_RES_WR_IQESIZE_V(2))); res->u.cq.iqsize = cpu_to_be16(cq->size); res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); c4iw_init_wr_wait(wr_waitp); ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); if (ret) goto err4; cq->gen = 1; cq->gts = rdev->lldi.gts_reg; cq->rdev = rdev; cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS, &cq->bar2_qid, user ? &cq->bar2_pa : NULL); if (user && !cq->bar2_pa) { pr_warn("%s: cqid %u not in BAR2 range\n", pci_name(rdev->lldi.pdev), cq->cqid); ret = -EINVAL; goto err4; } return 0; err4: dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, dma_unmap_addr(cq, mapping)); err3: kfree(cq->sw_queue); err2: c4iw_put_cqid(rdev, cq->cqid, uctx); err1: return ret; } static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx) { struct t4_cqe cqe; pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n", wq, cq, cq->sw_cidx, cq->sw_pidx); memset(&cqe, 0, sizeof(cqe)); cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | CQE_OPCODE_V(FW_RI_SEND) | CQE_TYPE_V(0) | CQE_SWCQE_V(1) | CQE_QPID_V(wq->sq.qid)); cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); if (srqidx) cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); cq->sw_queue[cq->sw_pidx] = cqe; t4_swcq_produce(cq); } int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) { int flushed = 0; int in_use = wq->rq.in_use - count; pr_debug("wq %p cq %p rq.in_use %u skip count %u\n", wq, cq, wq->rq.in_use, count); while (in_use--) { insert_recv_cqe(wq, cq, 0); flushed++; } return flushed; } static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, struct t4_swsqe *swcqe) { struct t4_cqe cqe; pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n", wq, cq, cq->sw_cidx, cq->sw_pidx); memset(&cqe, 0, sizeof(cqe)); cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | CQE_OPCODE_V(swcqe->opcode) | CQE_TYPE_V(1) | CQE_SWCQE_V(1) | CQE_QPID_V(wq->sq.qid)); CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); cq->sw_queue[cq->sw_pidx] = cqe; t4_swcq_produce(cq); } static void advance_oldest_read(struct t4_wq *wq); int c4iw_flush_sq(struct c4iw_qp *qhp) { int flushed = 0; struct t4_wq *wq = &qhp->wq; struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq); struct t4_cq *cq = &chp->cq; int idx; struct t4_swsqe *swsqe; if (wq->sq.flush_cidx == -1) wq->sq.flush_cidx = wq->sq.cidx; idx = wq->sq.flush_cidx; while (idx != wq->sq.pidx) { swsqe = &wq->sq.sw_sq[idx]; swsqe->flushed = 1; insert_sq_cqe(wq, cq, swsqe); if (wq->sq.oldest_read == swsqe) { advance_oldest_read(wq); } flushed++; if (++idx == wq->sq.size) idx = 0; } wq->sq.flush_cidx += flushed; if (wq->sq.flush_cidx >= wq->sq.size) wq->sq.flush_cidx -= wq->sq.size; return flushed; } static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) { struct t4_swsqe *swsqe; int cidx; if (wq->sq.flush_cidx == -1) wq->sq.flush_cidx = wq->sq.cidx; cidx = wq->sq.flush_cidx; while (cidx != wq->sq.pidx) { swsqe = &wq->sq.sw_sq[cidx]; if (!swsqe->signaled) { if (++cidx == wq->sq.size) cidx = 0; } else if (swsqe->complete) { /* * Insert this completed cqe into the swcq. */ pr_debug("moving cqe into swcq sq idx %u cq idx %u\n", cidx, cq->sw_pidx); swsqe->cqe.header |= htonl(CQE_SWCQE_V(1)); cq->sw_queue[cq->sw_pidx] = swsqe->cqe; t4_swcq_produce(cq); swsqe->flushed = 1; if (++cidx == wq->sq.size) cidx = 0; wq->sq.flush_cidx = cidx; } else break; } } static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, struct t4_cqe *read_cqe) { read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; read_cqe->len = htonl(wq->sq.oldest_read->read_len); read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) | CQE_SWCQE_V(SW_CQE(hw_cqe)) | CQE_OPCODE_V(FW_RI_READ_REQ) | CQE_TYPE_V(1)); read_cqe->bits_type_ts = hw_cqe->bits_type_ts; } static void advance_oldest_read(struct t4_wq *wq) { u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; if (rptr == wq->sq.size) rptr = 0; while (rptr != wq->sq.pidx) { wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) return; if (++rptr == wq->sq.size) rptr = 0; } wq->sq.oldest_read = NULL; } /* * Move all CQEs from the HWCQ into the SWCQ. * Deal with out-of-order and/or completions that complete * prior unsignalled WRs. */ void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp) { struct t4_cqe *hw_cqe, *swcqe, read_cqe; struct c4iw_qp *qhp; struct t4_swsqe *swsqe; int ret; pr_debug("cqid 0x%x\n", chp->cq.cqid); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); /* * This logic is similar to poll_cq(), but not quite the same * unfortunately. Need to move pertinent HW CQEs to the SW CQ but * also do any translation magic that poll_cq() normally does. */ while (!ret) { qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); /* * drop CQEs with no associated QP */ if (qhp == NULL) goto next_cqe; if (flush_qhp != qhp) { spin_lock(&qhp->lock); if (qhp->wq.flushed == 1) goto next_cqe; } if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) goto next_cqe; if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) { /* If we have reached here because of async * event or other error, and have egress error * then drop */ if (CQE_TYPE(hw_cqe) == 1) goto next_cqe; /* drop peer2peer RTR reads. */ if (CQE_WRID_STAG(hw_cqe) == 1) goto next_cqe; /* * Eat completions for unsignaled read WRs. */ if (!qhp->wq.sq.oldest_read->signaled) { advance_oldest_read(&qhp->wq); goto next_cqe; } /* * Don't write to the HWCQ, create a new read req CQE * in local memory and move it into the swcq. */ create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe); hw_cqe = &read_cqe; advance_oldest_read(&qhp->wq); } /* if its a SQ completion, then do the magic to move all the * unsignaled and now in-order completions into the swcq. */ if (SQ_TYPE(hw_cqe)) { swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; swsqe->cqe = *hw_cqe; swsqe->complete = 1; flush_completed_wrs(&qhp->wq, &chp->cq); } else { swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx]; *swcqe = *hw_cqe; swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1)); t4_swcq_produce(&chp->cq); } next_cqe: t4_hwcq_consume(&chp->cq); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); if (qhp && flush_qhp != qhp) spin_unlock(&qhp->lock); } } static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) { if (DRAIN_CQE(cqe)) { WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); return 0; } if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) return 0; if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe)) return 0; if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe)) return 0; if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) return 0; return 1; } void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) { struct t4_cqe *cqe; u32 ptr; *count = 0; pr_debug("count zero %d\n", *count); ptr = cq->sw_cidx; while (ptr != cq->sw_pidx) { cqe = &cq->sw_queue[ptr]; if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) (*count)++; if (++ptr == cq->size) ptr = 0; } pr_debug("cq %p count %d\n", cq, *count); } static void post_pending_srq_wrs(struct t4_srq *srq) { struct t4_srq_pending_wr *pwr; u16 idx = 0; while (srq->pending_in_use) { pwr = &srq->pending_wrs[srq->pending_cidx]; srq->sw_rq[srq->pidx].wr_id = pwr->wr_id; srq->sw_rq[srq->pidx].valid = 1; pr_debug("%s posting pending cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n", __func__, srq->cidx, srq->pidx, srq->wq_pidx, srq->in_use, srq->size, (unsigned long long)pwr->wr_id); c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16); t4_srq_consume_pending_wr(srq); t4_srq_produce(srq, pwr->len16); idx += DIV_ROUND_UP(pwr->len16 * 16, T4_EQ_ENTRY_SIZE); } if (idx) { t4_ring_srq_db(srq, idx, pwr->len16, &pwr->wqe); srq->queue[srq->size].status.host_wq_pidx = srq->wq_pidx; } } static u64 reap_srq_cqe(struct t4_cqe *hw_cqe, struct t4_srq *srq) { int rel_idx = CQE_ABS_RQE_IDX(hw_cqe) - srq->rqt_abs_idx; u64 wr_id; srq->sw_rq[rel_idx].valid = 0; wr_id = srq->sw_rq[rel_idx].wr_id; if (rel_idx == srq->cidx) { pr_debug("%s in order cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n", __func__, rel_idx, srq->cidx, srq->pidx, srq->wq_pidx, srq->in_use, srq->size, (unsigned long long)srq->sw_rq[rel_idx].wr_id); t4_srq_consume(srq); while (srq->ooo_count && !srq->sw_rq[srq->cidx].valid) { pr_debug("%s eat ooo cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n", __func__, srq->cidx, srq->pidx, srq->wq_pidx, srq->in_use, srq->size, srq->ooo_count, (unsigned long long) srq->sw_rq[srq->cidx].wr_id); t4_srq_consume_ooo(srq); } if (srq->ooo_count == 0 && srq->pending_in_use) post_pending_srq_wrs(srq); } else { pr_debug("%s ooo cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n", __func__, rel_idx, srq->cidx, srq->pidx, srq->wq_pidx, srq->in_use, srq->size, srq->ooo_count, (unsigned long long)srq->sw_rq[rel_idx].wr_id); t4_srq_produce_ooo(srq); } return wr_id; } /* * poll_cq * * Caller must: * check the validity of the first CQE, * supply the wq assicated with the qpid. * * credit: cq credit to return to sge. * cqe_flushed: 1 iff the CQE is flushed. * cqe: copy of the polled CQE. * * return value: * 0 CQE returned ok. * -EAGAIN CQE skipped, try again. * -EOVERFLOW CQ overflow detected. */ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit, struct t4_srq *srq) { int ret = 0; struct t4_cqe *hw_cqe, read_cqe; *cqe_flushed = 0; *credit = 0; ret = t4_next_cqe(cq, &hw_cqe); if (ret) return ret; pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe), CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe), CQE_WRID_LOW(hw_cqe)); /* * skip cqe's not affiliated with a QP. */ if (wq == NULL) { ret = -EAGAIN; goto skip_cqe; } /* * skip hw cqe's if the wq is flushed. */ if (wq->flushed && !SW_CQE(hw_cqe)) { ret = -EAGAIN; goto skip_cqe; } /* * skip TERMINATE cqes... */ if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) { ret = -EAGAIN; goto skip_cqe; } /* * Special cqe for drain WR completions... */ if (DRAIN_CQE(hw_cqe)) { *cookie = CQE_DRAIN_COOKIE(hw_cqe); *cqe = *hw_cqe; goto skip_cqe; } /* * Gotta tweak READ completions: * 1) the cqe doesn't contain the sq_wptr from the wr. * 2) opcode not reflected from the wr. * 3) read_len not reflected from the wr. * 4) cq_type is RQ_TYPE not SQ_TYPE. */ if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) { /* If we have reached here because of async * event or other error, and have egress error * then drop */ if (CQE_TYPE(hw_cqe) == 1) { if (CQE_STATUS(hw_cqe)) t4_set_wq_in_error(wq, 0); ret = -EAGAIN; goto skip_cqe; } /* If this is an unsolicited read response, then the read * was generated by the kernel driver as part of peer-2-peer * connection setup. So ignore the completion. */ if (CQE_WRID_STAG(hw_cqe) == 1) { if (CQE_STATUS(hw_cqe)) t4_set_wq_in_error(wq, 0); ret = -EAGAIN; goto skip_cqe; } /* * Eat completions for unsignaled read WRs. */ if (!wq->sq.oldest_read->signaled) { advance_oldest_read(wq); ret = -EAGAIN; goto skip_cqe; } /* * Don't write to the HWCQ, so create a new read req CQE * in local memory. */ create_read_req_cqe(wq, hw_cqe, &read_cqe); hw_cqe = &read_cqe; advance_oldest_read(wq); } if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) { *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH); t4_set_wq_in_error(wq, 0); } /* * RECV completion. */ if (RQ_TYPE(hw_cqe)) { /* * HW only validates 4 bits of MSN. So we must validate that * the MSN in the SEND is the next expected MSN. If its not, * then we complete this with T4_ERR_MSN and mark the wq in * error. */ if (unlikely(!CQE_STATUS(hw_cqe) && CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) { t4_set_wq_in_error(wq, 0); hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN)); } goto proc_cqe; } /* * If we get here its a send completion. * * Handle out of order completion. These get stuffed * in the SW SQ. Then the SW SQ is walked to move any * now in-order completions into the SW CQ. This handles * 2 cases: * 1) reaping unsignaled WRs when the first subsequent * signaled WR is completed. * 2) out of order read completions. */ if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { struct t4_swsqe *swsqe; pr_debug("out of order completion going in sw_sq at idx %u\n", CQE_WRID_SQ_IDX(hw_cqe)); swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; swsqe->cqe = *hw_cqe; swsqe->complete = 1; ret = -EAGAIN; goto flush_wq; } proc_cqe: *cqe = *hw_cqe; /* * Reap the associated WR(s) that are freed up with this * completion. */ if (SQ_TYPE(hw_cqe)) { int idx = CQE_WRID_SQ_IDX(hw_cqe); /* * Account for any unsignaled completions completed by * this signaled completion. In this case, cidx points * to the first unsignaled one, and idx points to the * signaled one. So adjust in_use based on this delta. * if this is not completing any unsigned wrs, then the * delta will be 0. Handle wrapping also! */ if (idx < wq->sq.cidx) wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; else wq->sq.in_use -= idx - wq->sq.cidx; wq->sq.cidx = (uint16_t)idx; pr_debug("completing sq idx %u\n", wq->sq.cidx); *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; if (c4iw_wr_log) c4iw_log_wr_stats(wq, hw_cqe); t4_sq_consume(wq); } else { if (!srq) { pr_debug("completing rq idx %u\n", wq->rq.cidx); *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; if (c4iw_wr_log) c4iw_log_wr_stats(wq, hw_cqe); t4_rq_consume(wq); } else { *cookie = reap_srq_cqe(hw_cqe, srq); } wq->rq.msn++; goto skip_cqe; } flush_wq: /* * Flush any completed cqes that are now in-order. */ flush_completed_wrs(wq, cq); skip_cqe: if (SW_CQE(hw_cqe)) { pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n", cq, cq->cqid, cq->sw_cidx); t4_swcq_consume(cq); } else { pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n", cq, cq->cqid, cq->cidx); t4_hwcq_consume(cq); } return ret; } static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp, struct ib_wc *wc, struct c4iw_srq *srq) { struct t4_cqe cqe; struct t4_wq *wq = qhp ? &qhp->wq : NULL; u32 credit = 0; u8 cqe_flushed; u64 cookie = 0; int ret; ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit, srq ? &srq->wq : NULL); if (ret) goto out; wc->wr_id = cookie; wc->qp = &qhp->ibqp; wc->vendor_err = CQE_STATUS(&cqe); wc->wc_flags = 0; /* * Simulate a SRQ_LIMIT_REACHED HW notification if required. */ if (srq && !(srq->flags & T4_SRQ_LIMIT_SUPPORT) && srq->armed && srq->wq.in_use < srq->srq_limit) c4iw_dispatch_srq_limit_reached_event(srq); pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n", CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie); if (CQE_TYPE(&cqe) == 0) { if (!CQE_STATUS(&cqe)) wc->byte_len = CQE_LEN(&cqe); else wc->byte_len = 0; switch (CQE_OPCODE(&cqe)) { case FW_RI_SEND: wc->opcode = IB_WC_RECV; break; case FW_RI_SEND_WITH_INV: case FW_RI_SEND_WITH_SE_INV: wc->opcode = IB_WC_RECV; wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); wc->wc_flags |= IB_WC_WITH_INVALIDATE; c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); break; case FW_RI_WRITE_IMMEDIATE: wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; wc->ex.imm_data = CQE_IMM_DATA(&cqe); wc->wc_flags |= IB_WC_WITH_IMM; break; default: pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n", CQE_OPCODE(&cqe), CQE_QPID(&cqe)); ret = -EINVAL; goto out; } } else { switch (CQE_OPCODE(&cqe)) { case FW_RI_WRITE_IMMEDIATE: case FW_RI_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case FW_RI_READ_REQ: wc->opcode = IB_WC_RDMA_READ; wc->byte_len = CQE_LEN(&cqe); break; case FW_RI_SEND_WITH_INV: case FW_RI_SEND_WITH_SE_INV: wc->opcode = IB_WC_SEND; wc->wc_flags |= IB_WC_WITH_INVALIDATE; break; case FW_RI_SEND: case FW_RI_SEND_WITH_SE: wc->opcode = IB_WC_SEND; break; case FW_RI_LOCAL_INV: wc->opcode = IB_WC_LOCAL_INV; break; case FW_RI_FAST_REGISTER: wc->opcode = IB_WC_REG_MR; /* Invalidate the MR if the fastreg failed */ if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS) c4iw_invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); break; default: pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n", CQE_OPCODE(&cqe), CQE_QPID(&cqe)); ret = -EINVAL; goto out; } } if (cqe_flushed) wc->status = IB_WC_WR_FLUSH_ERR; else { switch (CQE_STATUS(&cqe)) { case T4_ERR_SUCCESS: wc->status = IB_WC_SUCCESS; break; case T4_ERR_STAG: wc->status = IB_WC_LOC_ACCESS_ERR; break; case T4_ERR_PDID: wc->status = IB_WC_LOC_PROT_ERR; break; case T4_ERR_QPID: case T4_ERR_ACCESS: wc->status = IB_WC_LOC_ACCESS_ERR; break; case T4_ERR_WRAP: wc->status = IB_WC_GENERAL_ERR; break; case T4_ERR_BOUND: wc->status = IB_WC_LOC_LEN_ERR; break; case T4_ERR_INVALIDATE_SHARED_MR: case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: wc->status = IB_WC_MW_BIND_ERR; break; case T4_ERR_CRC: case T4_ERR_MARKER: case T4_ERR_PDU_LEN_ERR: case T4_ERR_OUT_OF_RQE: case T4_ERR_DDP_VERSION: case T4_ERR_RDMA_VERSION: case T4_ERR_DDP_QUEUE_NUM: case T4_ERR_MSN: case T4_ERR_TBIT: case T4_ERR_MO: case T4_ERR_MSN_RANGE: case T4_ERR_IRD_OVERFLOW: case T4_ERR_OPCODE: case T4_ERR_INTERNAL_ERR: wc->status = IB_WC_FATAL_ERR; break; case T4_ERR_SWFLUSH: wc->status = IB_WC_WR_FLUSH_ERR; break; default: pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n", CQE_STATUS(&cqe), CQE_QPID(&cqe)); wc->status = IB_WC_FATAL_ERR; } } out: return ret; } /* * Get one cq entry from c4iw and map it to openib. * * Returns: * 0 cqe returned * -ENODATA EMPTY; * -EAGAIN caller must try again * any other -errno fatal error */ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) { struct c4iw_srq *srq = NULL; struct c4iw_qp *qhp = NULL; struct t4_cqe *rd_cqe; int ret; ret = t4_next_cqe(&chp->cq, &rd_cqe); if (ret) return ret; qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); if (qhp) { spin_lock(&qhp->lock); srq = qhp->srq; if (srq) spin_lock(&srq->lock); ret = __c4iw_poll_cq_one(chp, qhp, wc, srq); spin_unlock(&qhp->lock); if (srq) spin_unlock(&srq->lock); } else { ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL); } return ret; } int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct c4iw_cq *chp; unsigned long flags; int npolled; int err = 0; chp = to_c4iw_cq(ibcq); spin_lock_irqsave(&chp->lock, flags); for (npolled = 0; npolled < num_entries; ++npolled) { do { err = c4iw_poll_cq_one(chp, wc + npolled); } while (err == -EAGAIN); if (err) break; } spin_unlock_irqrestore(&chp->lock, flags); return !err || err == -ENODATA ? npolled : err; } void c4iw_cq_rem_ref(struct c4iw_cq *chp) { if (refcount_dec_and_test(&chp->refcnt)) complete(&chp->cq_rel_comp); } int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { struct c4iw_cq *chp; struct c4iw_ucontext *ucontext; pr_debug("ib_cq %p\n", ib_cq); chp = to_c4iw_cq(ib_cq); xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); c4iw_cq_rem_ref(chp); wait_for_completion(&chp->cq_rel_comp); ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, ibucontext); destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, chp->destroy_skb, chp->wr_waitp); c4iw_put_wr_wait(chp->wr_waitp); return 0; } int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { struct ib_device *ibdev = ibcq->device; int entries = attr->cqe; int vector = attr->comp_vector; struct c4iw_dev *rhp = to_c4iw_dev(ibcq->device); struct c4iw_cq *chp = to_c4iw_cq(ibcq); struct c4iw_create_cq ucmd; struct c4iw_create_cq_resp uresp; int ret, wr_len; size_t memsize, hwentries; struct c4iw_mm_entry *mm, *mm2; struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct c4iw_ucontext, ibucontext); pr_debug("ib_dev %p entries %d\n", ibdev, entries); if (attr->flags) return -EOPNOTSUPP; if (entries < 1 || entries > ibdev->attrs.max_cqe) return -EINVAL; if (vector >= rhp->rdev.lldi.nciq) return -EINVAL; if (udata) { if (udata->inlen < sizeof(ucmd)) ucontext->is_32b_cqe = 1; } chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); if (!chp->wr_waitp) { ret = -ENOMEM; goto err_free_chp; } c4iw_init_wr_wait(chp->wr_waitp); wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res); chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); if (!chp->destroy_skb) { ret = -ENOMEM; goto err_free_wr_wait; } /* account for the status page. */ entries++; /* IQ needs one extra entry to differentiate full vs empty. */ entries++; /* * entries must be multiple of 16 for HW. */ entries = roundup(entries, 16); /* * Make actual HW queue 2x to avoid cdix_inc overflows. */ hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); /* * Make HW queue at least 64 entries so GTS updates aren't too * frequent. */ if (hwentries < 64) hwentries = 64; memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ? (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue)); /* * memsize must be a multiple of the page size if its a user cq. */ if (udata) memsize = roundup(memsize, PAGE_SIZE); chp->cq.size = hwentries; chp->cq.memsize = memsize; chp->cq.vector = vector; ret = create_cq(&rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, chp->wr_waitp); if (ret) goto err_free_skb; chp->rhp = rhp; chp->cq.size--; /* status page */ chp->ibcq.cqe = entries - 2; spin_lock_init(&chp->lock); spin_lock_init(&chp->comp_handler_lock); refcount_set(&chp->refcnt, 1); init_completion(&chp->cq_rel_comp); ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL); if (ret) goto err_destroy_cq; if (ucontext) { ret = -ENOMEM; mm = kmalloc(sizeof(*mm), GFP_KERNEL); if (!mm) goto err_remove_handle; mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL); if (!mm2) goto err_free_mm; memset(&uresp, 0, sizeof(uresp)); uresp.qid_mask = rhp->rdev.cqmask; uresp.cqid = chp->cq.cqid; uresp.size = chp->cq.size; uresp.memsize = chp->cq.memsize; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.gts_key = ucontext->key; ucontext->key += PAGE_SIZE; /* communicate to the userspace that * kernel driver supports 64B CQE */ uresp.flags |= C4IW_64B_CQE; spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, ucontext->is_32b_cqe ? sizeof(uresp) - sizeof(uresp.flags) : sizeof(uresp)); if (ret) goto err_free_mm2; mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = chp->cq.memsize; insert_mmap(ucontext, mm); mm2->key = uresp.gts_key; mm2->addr = chp->cq.bar2_pa; mm2->len = PAGE_SIZE; insert_mmap(ucontext, mm2); } pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr %pad\n", chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, &chp->cq.dma_addr); return 0; err_free_mm2: kfree(mm2); err_free_mm: kfree(mm); err_remove_handle: xa_erase_irq(&rhp->cqs, chp->cq.cqid); err_destroy_cq: destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, chp->destroy_skb, chp->wr_waitp); err_free_skb: kfree_skb(chp->destroy_skb); err_free_wr_wait: c4iw_put_wr_wait(chp->wr_waitp); err_free_chp: return ret; } int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct c4iw_cq *chp; int ret = 0; unsigned long flag; chp = to_c4iw_cq(ibcq); spin_lock_irqsave(&chp->lock, flag); t4_arm_cq(&chp->cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); if (flags & IB_CQ_REPORT_MISSED_EVENTS) ret = t4_cq_notempty(&chp->cq); spin_unlock_irqrestore(&chp->lock, flag); return ret; } void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx) { struct c4iw_cq *rchp = to_c4iw_cq(qhp->ibqp.recv_cq); unsigned long flag; /* locking heirarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&rchp->lock, flag); spin_lock(&qhp->lock); /* create a SRQ RECV CQE for srqidx */ insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&rchp->lock, flag); }
linux-master
drivers/infiniband/hw/cxgb4/cq.c
/* * Copyright (c) 2011 Chelsio Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/kernel.h> #include <linux/random.h> #include "iw_cxgb4.h" #define RANDOM_SKIP 16 /* * Trivial bitmap-based allocator. If the random flag is set, the * allocator is designed to: * - pseudo-randomize the id returned such that it is not trivially predictable. * - avoid reuse of recently used id (at the expense of predictability) */ u32 c4iw_id_alloc(struct c4iw_id_table *alloc) { unsigned long flags; u32 obj; spin_lock_irqsave(&alloc->lock, flags); obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); if (obj >= alloc->max) obj = find_first_zero_bit(alloc->table, alloc->max); if (obj < alloc->max) { if (alloc->flags & C4IW_ID_TABLE_F_RANDOM) alloc->last += get_random_u32_below(RANDOM_SKIP); else alloc->last = obj + 1; if (alloc->last >= alloc->max) alloc->last = 0; __set_bit(obj, alloc->table); obj += alloc->start; } else obj = -1; spin_unlock_irqrestore(&alloc->lock, flags); return obj; } void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj) { unsigned long flags; obj -= alloc->start; spin_lock_irqsave(&alloc->lock, flags); __clear_bit(obj, alloc->table); spin_unlock_irqrestore(&alloc->lock, flags); } int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, u32 reserved, u32 flags) { alloc->start = start; alloc->flags = flags; if (flags & C4IW_ID_TABLE_F_RANDOM) alloc->last = get_random_u32_below(RANDOM_SKIP); else alloc->last = 0; alloc->max = num; spin_lock_init(&alloc->lock); alloc->table = bitmap_zalloc(num, GFP_KERNEL); if (!alloc->table) return -ENOMEM; if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY)) bitmap_set(alloc->table, 0, reserved); return 0; } void c4iw_id_table_free(struct c4iw_id_table *alloc) { bitmap_free(alloc->table); }
linux-master
drivers/infiniband/hw/cxgb4/id_table.c
/* * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/list.h> #include <linux/workqueue.h> #include <linux/skbuff.h> #include <linux/timer.h> #include <linux/notifier.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/if_vlan.h> #include <net/neighbour.h> #include <net/netevent.h> #include <net/route.h> #include <net/tcp.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <rdma/ib_addr.h> #include <libcxgb_cm.h> #include "iw_cxgb4.h" #include "clip_tbl.h" static char *states[] = { "idle", "listen", "connecting", "mpa_wait_req", "mpa_req_sent", "mpa_req_rcvd", "mpa_rep_sent", "fpdu_mode", "aborting", "closing", "moribund", "dead", NULL, }; static int nocong; module_param(nocong, int, 0644); MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); static int enable_ecn; module_param(enable_ecn, int, 0644); MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); static int dack_mode; module_param(dack_mode, int, 0644); MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); uint c4iw_max_read_depth = 32; module_param(c4iw_max_read_depth, int, 0644); MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=32)"); static int enable_tcp_timestamps; module_param(enable_tcp_timestamps, int, 0644); MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); static int enable_tcp_sack; module_param(enable_tcp_sack, int, 0644); MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); static int enable_tcp_window_scaling = 1; module_param(enable_tcp_window_scaling, int, 0644); MODULE_PARM_DESC(enable_tcp_window_scaling, "Enable tcp window scaling (default=1)"); static int peer2peer = 1; module_param(peer2peer, int, 0644); MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)"); static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; module_param(p2p_type, int, 0644); MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " "1=RDMA_READ 0=RDMA_WRITE (default 1)"); static int ep_timeout_secs = 60; module_param(ep_timeout_secs, int, 0644); MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " "in seconds (default=60)"); static int mpa_rev = 2; module_param(mpa_rev, int, 0644); MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft" " compliant (default=2)"); static int markers_enabled; module_param(markers_enabled, int, 0644); MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); static int crc_enabled = 1; module_param(crc_enabled, int, 0644); MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); static int rcv_win = 256 * 1024; module_param(rcv_win, int, 0644); MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); static int snd_win = 128 * 1024; module_param(snd_win, int, 0644); MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); static struct workqueue_struct *workq; static struct sk_buff_head rxq; static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); static void ep_timeout(struct timer_list *t); static void connect_reply_upcall(struct c4iw_ep *ep, int status); static int sched(struct c4iw_dev *dev, struct sk_buff *skb); static LIST_HEAD(timeout_list); static DEFINE_SPINLOCK(timeout_lock); static void deref_cm_id(struct c4iw_ep_common *epc) { epc->cm_id->rem_ref(epc->cm_id); epc->cm_id = NULL; set_bit(CM_ID_DEREFED, &epc->history); } static void ref_cm_id(struct c4iw_ep_common *epc) { set_bit(CM_ID_REFED, &epc->history); epc->cm_id->add_ref(epc->cm_id); } static void deref_qp(struct c4iw_ep *ep) { c4iw_qp_rem_ref(&ep->com.qp->ibqp); clear_bit(QP_REFERENCED, &ep->com.flags); set_bit(QP_DEREFED, &ep->com.history); } static void ref_qp(struct c4iw_ep *ep) { set_bit(QP_REFERENCED, &ep->com.flags); set_bit(QP_REFED, &ep->com.history); c4iw_qp_add_ref(&ep->com.qp->ibqp); } static void start_ep_timer(struct c4iw_ep *ep) { pr_debug("ep %p\n", ep); if (timer_pending(&ep->timer)) { pr_err("%s timer already started! ep %p\n", __func__, ep); return; } clear_bit(TIMEOUT, &ep->com.flags); c4iw_get_ep(&ep->com); ep->timer.expires = jiffies + ep_timeout_secs * HZ; add_timer(&ep->timer); } static int stop_ep_timer(struct c4iw_ep *ep) { pr_debug("ep %p stopping\n", ep); del_timer_sync(&ep->timer); if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { c4iw_put_ep(&ep->com); return 0; } return 1; } static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, struct l2t_entry *l2e) { int error = 0; if (c4iw_fatal_error(rdev)) { kfree_skb(skb); pr_err("%s - device in error state - dropping\n", __func__); return -EIO; } error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); if (error < 0) kfree_skb(skb); else if (error == NET_XMIT_DROP) return -ENOMEM; return error < 0 ? error : 0; } int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) { int error = 0; if (c4iw_fatal_error(rdev)) { kfree_skb(skb); pr_err("%s - device in error state - dropping\n", __func__); return -EIO; } error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); if (error < 0) kfree_skb(skb); return error < 0 ? error : 0; } static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) { u32 len = roundup(sizeof(struct cpl_tid_release), 16); skb = get_skb(skb, len, GFP_KERNEL); if (!skb) return; cxgb_mk_tid_release(skb, len, hwtid, 0); c4iw_ofld_send(rdev, skb); return; } static void set_emss(struct c4iw_ep *ep, u16 opt) { ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] - ((AF_INET == ep->com.remote_addr.ss_family) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - sizeof(struct tcphdr); ep->mss = ep->emss; if (TCPOPT_TSTAMP_G(opt)) ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); if (ep->emss < 128) ep->emss = 128; if (ep->emss & 7) pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss, ep->emss); pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss, ep->emss); } static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) { enum c4iw_ep_state state; mutex_lock(&epc->mutex); state = epc->state; mutex_unlock(&epc->mutex); return state; } static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) { epc->state = new; } static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) { mutex_lock(&epc->mutex); pr_debug("%s -> %s\n", states[epc->state], states[new]); __state_set(epc, new); mutex_unlock(&epc->mutex); return; } static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size) { struct sk_buff *skb; unsigned int i; size_t len; len = roundup(sizeof(union cpl_wr_size), 16); for (i = 0; i < size; i++) { skb = alloc_skb(len, GFP_KERNEL); if (!skb) goto fail; skb_queue_tail(ep_skb_list, skb); } return 0; fail: skb_queue_purge(ep_skb_list); return -ENOMEM; } static void *alloc_ep(int size, gfp_t gfp) { struct c4iw_ep_common *epc; epc = kzalloc(size, gfp); if (epc) { epc->wr_waitp = c4iw_alloc_wr_wait(gfp); if (!epc->wr_waitp) { kfree(epc); epc = NULL; goto out; } kref_init(&epc->kref); mutex_init(&epc->mutex); c4iw_init_wr_wait(epc->wr_waitp); } pr_debug("alloc ep %p\n", epc); out: return epc; } static void remove_ep_tid(struct c4iw_ep *ep) { unsigned long flags; xa_lock_irqsave(&ep->com.dev->hwtids, flags); __xa_erase(&ep->com.dev->hwtids, ep->hwtid); if (xa_empty(&ep->com.dev->hwtids)) wake_up(&ep->com.dev->wait); xa_unlock_irqrestore(&ep->com.dev->hwtids, flags); } static int insert_ep_tid(struct c4iw_ep *ep) { unsigned long flags; int err; xa_lock_irqsave(&ep->com.dev->hwtids, flags); err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL); xa_unlock_irqrestore(&ep->com.dev->hwtids, flags); return err; } /* * Atomically lookup the ep ptr given the tid and grab a reference on the ep. */ static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid) { struct c4iw_ep *ep; unsigned long flags; xa_lock_irqsave(&dev->hwtids, flags); ep = xa_load(&dev->hwtids, tid); if (ep) c4iw_get_ep(&ep->com); xa_unlock_irqrestore(&dev->hwtids, flags); return ep; } /* * Atomically lookup the ep ptr given the stid and grab a reference on the ep. */ static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev, unsigned int stid) { struct c4iw_listen_ep *ep; unsigned long flags; xa_lock_irqsave(&dev->stids, flags); ep = xa_load(&dev->stids, stid); if (ep) c4iw_get_ep(&ep->com); xa_unlock_irqrestore(&dev->stids, flags); return ep; } void _c4iw_free_ep(struct kref *kref) { struct c4iw_ep *ep; ep = container_of(kref, struct c4iw_ep, com.kref); pr_debug("ep %p state %s\n", ep, states[ep->com.state]); if (test_bit(QP_REFERENCED, &ep->com.flags)) deref_qp(ep); if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &ep->com.local_addr; cxgb4_clip_release( ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, ep->com.local_addr.ss_family); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); kfree_skb(ep->mpa_skb); } if (!skb_queue_empty(&ep->com.ep_skb_list)) skb_queue_purge(&ep->com.ep_skb_list); c4iw_put_wr_wait(ep->com.wr_waitp); kfree(ep); } static void release_ep_resources(struct c4iw_ep *ep) { set_bit(RELEASE_RESOURCES, &ep->com.flags); /* * If we have a hwtid, then remove it from the idr table * so lookups will no longer find this endpoint. Otherwise * we have a race where one thread finds the ep ptr just * before the other thread is freeing the ep memory. */ if (ep->hwtid != -1) remove_ep_tid(ep); c4iw_put_ep(&ep->com); } static int status2errno(int status) { switch (status) { case CPL_ERR_NONE: return 0; case CPL_ERR_CONN_RESET: return -ECONNRESET; case CPL_ERR_ARP_MISS: return -EHOSTUNREACH; case CPL_ERR_CONN_TIMEDOUT: return -ETIMEDOUT; case CPL_ERR_TCAM_FULL: return -ENOMEM; case CPL_ERR_CONN_EXIST: return -EADDRINUSE; default: return -EIO; } } /* * Try and reuse skbs already allocated... */ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) { if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { skb_trim(skb, 0); skb_get(skb); skb_reset_transport_header(skb); } else { skb = alloc_skb(len, gfp); if (!skb) return NULL; } t4_set_arp_err_handler(skb, NULL, NULL); return skb; } static struct net_device *get_real_dev(struct net_device *egress_dev) { return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev; } static void arp_failure_discard(void *handle, struct sk_buff *skb) { pr_err("ARP failure\n"); kfree_skb(skb); } static void mpa_start_arp_failure(void *handle, struct sk_buff *skb) { pr_err("ARP failure during MPA Negotiation - Closing Connection\n"); } enum { NUM_FAKE_CPLS = 2, FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0, FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1, }; static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); release_ep_resources(ep); return 0; } static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); c4iw_put_ep(&ep->parent_ep->com); release_ep_resources(ep); return 0; } /* * Fake up a special CPL opcode and call sched() so process_work() will call * _put_ep_safe() in a safe context to free the ep resources. This is needed * because ARP error handlers are called in an ATOMIC context, and * _c4iw_free_ep() needs to block. */ static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb, int cpl) { struct cpl_act_establish *rpl = cplhdr(skb); /* Set our special ARP_FAILURE opcode */ rpl->ot.opcode = cpl; /* * Save ep in the skb->cb area, after where sched() will save the dev * ptr. */ *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep; sched(ep->com.dev, skb); } /* Handle an ARP failure for an accept */ static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb) { struct c4iw_ep *ep = handle; pr_err("ARP failure during accept - tid %u - dropping connection\n", ep->hwtid); __state_set(&ep->com, DEAD); queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE); } /* * Handle an ARP failure for an active open. */ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) { struct c4iw_ep *ep = handle; pr_err("ARP failure during connect\n"); connect_reply_upcall(ep, -EHOSTUNREACH); __state_set(&ep->com, DEAD); if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } xa_erase_irq(&ep->com.dev->atids, ep->atid); cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); } /* * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant * and send it along. */ static void abort_arp_failure(void *handle, struct sk_buff *skb) { int ret; struct c4iw_ep *ep = handle; struct c4iw_rdev *rdev = &ep->com.dev->rdev; struct cpl_abort_req *req = cplhdr(skb); pr_debug("rdev %p\n", rdev); req->cmd = CPL_ABORT_NO_RST; skb_get(skb); ret = c4iw_ofld_send(rdev, skb); if (ret) { __state_set(&ep->com, DEAD); queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); } else kfree_skb(skb); } static int send_flowc(struct c4iw_ep *ep) { struct fw_flowc_wr *flowc; struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); u16 vlan = ep->l2t->vlan; int nparams; int flowclen, flowclen16; if (WARN_ON(!skb)) return -ENOMEM; if (vlan == CPL_L2T_VLAN_NONE) nparams = 9; else nparams = 10; flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); flowclen16 = DIV_ROUND_UP(flowclen, 16); flowclen = flowclen16 * 16; flowc = __skb_put(skb, flowclen); memset(flowc, 0, flowclen); flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(ep->hwtid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V (ep->com.dev->rdev.lldi.pf)); flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; flowc->mnemval[7].val = cpu_to_be32(ep->emss); flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_RCV_SCALE; flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale); if (nparams == 10) { u16 pri; pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; flowc->mnemval[9].val = cpu_to_be32(pri); } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); return c4iw_ofld_send(&ep->com.dev->rdev, skb); } static int send_halfclose(struct c4iw_ep *ep) { struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16); pr_debug("ep %p tid %u\n", ep, ep->hwtid); if (WARN_ON(!skb)) return -ENOMEM; cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx, NULL, arp_failure_discard); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static void read_tcb(struct c4iw_ep *ep) { struct sk_buff *skb; struct cpl_get_tcb *req; int wrlen = roundup(sizeof(*req), 16); skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); if (WARN_ON(!skb)) return; set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); req = (struct cpl_get_tcb *) skb_put(skb, wrlen); memset(req, 0, wrlen); INIT_TP_WR(req, ep->hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid)); req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid)); /* * keep a ref on the ep so the tcb is not unlocked before this * cpl completes. The ref is released in read_tcb_rpl(). */ c4iw_get_ep(&ep->com); if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb))) c4iw_put_ep(&ep->com); } static int send_abort_req(struct c4iw_ep *ep) { u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16); struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); pr_debug("ep %p tid %u\n", ep, ep->hwtid); if (WARN_ON(!req_skb)) return -ENOMEM; cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx, ep, abort_arp_failure); return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); } static int send_abort(struct c4iw_ep *ep) { if (!ep->com.qp || !ep->com.qp->srq) { send_abort_req(ep); return 0; } set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags); read_tcb(ep); return 0; } static int send_connect(struct c4iw_ep *ep) { struct cpl_act_open_req *req = NULL; struct cpl_t5_act_open_req *t5req = NULL; struct cpl_t6_act_open_req *t6req = NULL; struct cpl_act_open_req6 *req6 = NULL; struct cpl_t5_act_open_req6 *t5req6 = NULL; struct cpl_t6_act_open_req6 *t6req6 = NULL; struct sk_buff *skb; u64 opt0; u32 opt2; unsigned int mtu_idx; u32 wscale; int win, sizev4, sizev6, wrlen; struct sockaddr_in *la = (struct sockaddr_in *) &ep->com.local_addr; struct sockaddr_in *ra = (struct sockaddr_in *) &ep->com.remote_addr; struct sockaddr_in6 *la6 = (struct sockaddr_in6 *) &ep->com.local_addr; struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) &ep->com.remote_addr; int ret; enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; u32 isn = (get_random_u32() & ~7UL) - 1; struct net_device *netdev; u64 params; netdev = ep->com.dev->rdev.lldi.ports[0]; switch (CHELSIO_CHIP_VERSION(adapter_type)) { case CHELSIO_T4: sizev4 = sizeof(struct cpl_act_open_req); sizev6 = sizeof(struct cpl_act_open_req6); break; case CHELSIO_T5: sizev4 = sizeof(struct cpl_t5_act_open_req); sizev6 = sizeof(struct cpl_t5_act_open_req6); break; case CHELSIO_T6: sizev4 = sizeof(struct cpl_t6_act_open_req); sizev6 = sizeof(struct cpl_t6_act_open_req6); break; default: pr_err("T%d Chip is not supported\n", CHELSIO_CHIP_VERSION(adapter_type)); return -EINVAL; } wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? roundup(sizev4, 16) : roundup(sizev6, 16); pr_debug("ep %p atid %u\n", ep, ep->atid); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { pr_err("%s - failed to alloc skb\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, enable_tcp_timestamps, (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); wscale = cxgb_compute_wscale(rcv_win); /* * Specify the largest window that will fit in opt0. The * remainder will be specified in the rx_data_ack. */ win = ep->rcv_win >> 10; if (win > RCV_BUFSIZ_M) win = RCV_BUFSIZ_M; opt0 = (nocong ? NO_CONG_F : 0) | KEEP_ALIVE_F | DELACK_F | WND_SCALE_V(wscale) | MSS_IDX_V(mtu_idx) | L2T_IDX_V(ep->l2t->idx) | TX_CHAN_V(ep->tx_chan) | SMAC_SEL_V(ep->smac_idx) | DSCP_V(ep->tos >> 2) | ULP_MODE_V(ULP_MODE_TCPDDP) | RCV_BUFSIZ_V(win); opt2 = RX_CHANNEL_V(0) | CCTRL_ECN_V(enable_ecn) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); if (enable_tcp_timestamps) opt2 |= TSTAMPS_EN_F; if (enable_tcp_sack) opt2 |= SACK_EN_F; if (wscale && enable_tcp_window_scaling) opt2 |= WND_SCALE_EN_F; if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { if (peer2peer) isn += 4; opt2 |= T5_OPT_2_VALID_F; opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); opt2 |= T5_ISS_F; } params = cxgb4_select_ntuple(netdev, ep->l2t); if (ep->com.remote_addr.ss_family == AF_INET6) cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&la6->sin6_addr.s6_addr, 1); t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); if (ep->com.remote_addr.ss_family == AF_INET) { switch (CHELSIO_CHIP_VERSION(adapter_type)) { case CHELSIO_T4: req = skb_put(skb, wrlen); INIT_TP_WR(req, 0); break; case CHELSIO_T5: t5req = skb_put(skb, wrlen); INIT_TP_WR(t5req, 0); req = (struct cpl_act_open_req *)t5req; break; case CHELSIO_T6: t6req = skb_put(skb, wrlen); INIT_TP_WR(t6req, 0); req = (struct cpl_act_open_req *)t6req; t5req = (struct cpl_t5_act_open_req *)t6req; break; default: pr_err("T%d Chip is not supported\n", CHELSIO_CHIP_VERSION(adapter_type)); ret = -EINVAL; goto clip_release; } OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14) | ep->atid))); req->local_port = la->sin_port; req->peer_port = ra->sin_port; req->local_ip = la->sin_addr.s_addr; req->peer_ip = ra->sin_addr.s_addr; req->opt0 = cpu_to_be64(opt0); if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { req->params = cpu_to_be32(params); req->opt2 = cpu_to_be32(opt2); } else { if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { t5req->params = cpu_to_be64(FILTER_TUPLE_V(params)); t5req->rsvd = cpu_to_be32(isn); pr_debug("snd_isn %u\n", t5req->rsvd); t5req->opt2 = cpu_to_be32(opt2); } else { t6req->params = cpu_to_be64(FILTER_TUPLE_V(params)); t6req->rsvd = cpu_to_be32(isn); pr_debug("snd_isn %u\n", t6req->rsvd); t6req->opt2 = cpu_to_be32(opt2); } } } else { switch (CHELSIO_CHIP_VERSION(adapter_type)) { case CHELSIO_T4: req6 = skb_put(skb, wrlen); INIT_TP_WR(req6, 0); break; case CHELSIO_T5: t5req6 = skb_put(skb, wrlen); INIT_TP_WR(t5req6, 0); req6 = (struct cpl_act_open_req6 *)t5req6; break; case CHELSIO_T6: t6req6 = skb_put(skb, wrlen); INIT_TP_WR(t6req6, 0); req6 = (struct cpl_act_open_req6 *)t6req6; t5req6 = (struct cpl_t5_act_open_req6 *)t6req6; break; default: pr_err("T%d Chip is not supported\n", CHELSIO_CHIP_VERSION(adapter_type)); ret = -EINVAL; goto clip_release; } OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, ((ep->rss_qid<<14)|ep->atid))); req6->local_port = la6->sin6_port; req6->peer_port = ra6->sin6_port; req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr)); req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8)); req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr)); req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8)); req6->opt0 = cpu_to_be64(opt0); if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev, ep->l2t)); req6->opt2 = cpu_to_be32(opt2); } else { if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { t5req6->params = cpu_to_be64(FILTER_TUPLE_V(params)); t5req6->rsvd = cpu_to_be32(isn); pr_debug("snd_isn %u\n", t5req6->rsvd); t5req6->opt2 = cpu_to_be32(opt2); } else { t6req6->params = cpu_to_be64(FILTER_TUPLE_V(params)); t6req6->rsvd = cpu_to_be32(isn); pr_debug("snd_isn %u\n", t6req6->rsvd); t6req6->opt2 = cpu_to_be32(opt2); } } } set_bit(ACT_OPEN_REQ, &ep->com.history); ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); clip_release: if (ret && ep->com.remote_addr.ss_family == AF_INET6) cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&la6->sin6_addr.s6_addr, 1); return ret; } static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, u8 mpa_rev_to_use) { int mpalen, wrlen, ret; struct fw_ofld_tx_data_wr *req; struct mpa_message *mpa; struct mpa_v2_conn_params mpa_v2_params; pr_debug("ep %p tid %u pd_len %d\n", ep, ep->hwtid, ep->plen); mpalen = sizeof(*mpa) + ep->plen; if (mpa_rev_to_use == 2) mpalen += sizeof(struct mpa_v2_conn_params); wrlen = roundup(mpalen + sizeof(*req), 16); skb = get_skb(skb, wrlen, GFP_KERNEL); if (!skb) { connect_reply_upcall(ep, -ENOMEM); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | FW_WR_IMMDLEN_V(mpalen)); req->flowid_len16 = cpu_to_be32( FW_WR_FLOWID_V(ep->hwtid) | FW_WR_LEN16_V(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( FW_OFLD_TX_DATA_WR_FLUSH_F | FW_OFLD_TX_DATA_WR_SHOVE_F); mpa = (struct mpa_message *)(req + 1); memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); mpa->flags = 0; if (crc_enabled) mpa->flags |= MPA_CRC; if (markers_enabled) { mpa->flags |= MPA_MARKERS; ep->mpa_attr.recv_marker_enabled = 1; } else { ep->mpa_attr.recv_marker_enabled = 0; } if (mpa_rev_to_use == 2) mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->private_data_size = htons(ep->plen); mpa->revision = mpa_rev_to_use; if (mpa_rev_to_use == 1) { ep->tried_with_mpa_v1 = 1; ep->retry_with_mpa_v1 = 0; } if (mpa_rev_to_use == 2) { mpa->private_data_size = htons(ntohs(mpa->private_data_size) + sizeof(struct mpa_v2_conn_params)); pr_debug("initiator ird %u ord %u\n", ep->ird, ep->ord); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); if (peer2peer) { mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) mpa_v2_params.ord |= htons(MPA_V2_RDMA_WRITE_RTR); else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) mpa_v2_params.ord |= htons(MPA_V2_RDMA_READ_RTR); } memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), ep->mpa_pkt + sizeof(*mpa), ep->plen); } else if (ep->plen) memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); /* * Reference the mpa skb. This ensures the data area * will remain in memory until the hw acks the tx. * Function fw4_ack() will deref it. */ skb_get(skb); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); ep->mpa_skb = skb; ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); if (ret) return ret; start_ep_timer(ep); __state_set(&ep->com, MPA_REQ_SENT); ep->mpa_attr.initiator = 1; ep->snd_seq += mpalen; return ret; } static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) { int mpalen, wrlen; struct fw_ofld_tx_data_wr *req; struct mpa_message *mpa; struct sk_buff *skb; struct mpa_v2_conn_params mpa_v2_params; pr_debug("ep %p tid %u pd_len %d\n", ep, ep->hwtid, ep->plen); mpalen = sizeof(*mpa) + plen; if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) mpalen += sizeof(struct mpa_v2_conn_params); wrlen = roundup(mpalen + sizeof(*req), 16); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { pr_err("%s - cannot alloc skb!\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | FW_WR_IMMDLEN_V(mpalen)); req->flowid_len16 = cpu_to_be32( FW_WR_FLOWID_V(ep->hwtid) | FW_WR_LEN16_V(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( FW_OFLD_TX_DATA_WR_FLUSH_F | FW_OFLD_TX_DATA_WR_SHOVE_F); mpa = (struct mpa_message *)(req + 1); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = MPA_REJECT; mpa->revision = ep->mpa_attr.version; mpa->private_data_size = htons(plen); if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->private_data_size = htons(ntohs(mpa->private_data_size) + sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons(((u16)ep->ird) | (peer2peer ? MPA_V2_PEER2PEER_MODEL : 0)); mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE ? MPA_V2_RDMA_WRITE_RTR : p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ ? MPA_V2_RDMA_READ_RTR : 0) : 0)); memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), pdata, plen); } else if (plen) memcpy(mpa->private_data, pdata, plen); /* * Reference the mpa skb again. This ensures the data area * will remain in memory until the hw acks the tx. * Function fw4_ack() will deref it. */ skb_get(skb); set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); ep->mpa_skb = skb; ep->snd_seq += mpalen; return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) { int mpalen, wrlen; struct fw_ofld_tx_data_wr *req; struct mpa_message *mpa; struct sk_buff *skb; struct mpa_v2_conn_params mpa_v2_params; pr_debug("ep %p tid %u pd_len %d\n", ep, ep->hwtid, ep->plen); mpalen = sizeof(*mpa) + plen; if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) mpalen += sizeof(struct mpa_v2_conn_params); wrlen = roundup(mpalen + sizeof(*req), 16); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { pr_err("%s - cannot alloc skb!\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | FW_WR_IMMDLEN_V(mpalen)); req->flowid_len16 = cpu_to_be32( FW_WR_FLOWID_V(ep->hwtid) | FW_WR_LEN16_V(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( FW_OFLD_TX_DATA_WR_FLUSH_F | FW_OFLD_TX_DATA_WR_SHOVE_F); mpa = (struct mpa_message *)(req + 1); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = 0; if (ep->mpa_attr.crc_enabled) mpa->flags |= MPA_CRC; if (ep->mpa_attr.recv_marker_enabled) mpa->flags |= MPA_MARKERS; mpa->revision = ep->mpa_attr.version; mpa->private_data_size = htons(plen); if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->private_data_size = htons(ntohs(mpa->private_data_size) + sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); if (peer2peer && (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED)) { mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) mpa_v2_params.ord |= htons(MPA_V2_RDMA_WRITE_RTR); else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) mpa_v2_params.ord |= htons(MPA_V2_RDMA_READ_RTR); } memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), pdata, plen); } else if (plen) memcpy(mpa->private_data, pdata, plen); /* * Reference the mpa skb. This ensures the data area * will remain in memory until the hw acks the tx. * Function fw4_ack() will deref it. */ skb_get(skb); t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); ep->mpa_skb = skb; __state_set(&ep->com, MPA_REP_SENT); ep->snd_seq += mpalen; return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_act_establish *req = cplhdr(skb); unsigned short tcp_opt = ntohs(req->tcp_opt); unsigned int tid = GET_TID(req); unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); struct tid_info *t = dev->rdev.lldi.tids; int ret; ep = lookup_atid(t, atid); pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid, be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); mutex_lock(&ep->com.mutex); dst_confirm(ep->dst); /* setup the hwtid for this connection */ ep->hwtid = tid; cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family); insert_ep_tid(ep); ep->snd_seq = be32_to_cpu(req->snd_isn); ep->rcv_seq = be32_to_cpu(req->rcv_isn); ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); set_emss(ep, tcp_opt); /* dealloc the atid */ xa_erase_irq(&ep->com.dev->atids, atid); cxgb4_free_atid(t, atid); set_bit(ACT_ESTAB, &ep->com.history); /* start MPA negotiation */ ret = send_flowc(ep); if (ret) goto err; if (ep->retry_with_mpa_v1) ret = send_mpa_req(ep, skb, 1); else ret = send_mpa_req(ep, skb, mpa_rev); if (ret) goto err; mutex_unlock(&ep->com.mutex); return 0; err: mutex_unlock(&ep->com.mutex); connect_reply_upcall(ep, -ENOMEM); c4iw_ep_disconnect(ep, 0, GFP_KERNEL); return 0; } static void close_complete_upcall(struct c4iw_ep *ep, int status) { struct iw_cm_event event; pr_debug("ep %p tid %u\n", ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CLOSE; event.status = status; if (ep->com.cm_id) { pr_debug("close complete delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); deref_cm_id(&ep->com); set_bit(CLOSE_UPCALL, &ep->com.history); } } static void peer_close_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; pr_debug("ep %p tid %u\n", ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_DISCONNECT; if (ep->com.cm_id) { pr_debug("peer close delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); set_bit(DISCONN_UPCALL, &ep->com.history); } } static void peer_abort_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; pr_debug("ep %p tid %u\n", ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CLOSE; event.status = -ECONNRESET; if (ep->com.cm_id) { pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); deref_cm_id(&ep->com); set_bit(ABORT_UPCALL, &ep->com.history); } } static void connect_reply_upcall(struct c4iw_ep *ep, int status) { struct iw_cm_event event; pr_debug("ep %p tid %u status %d\n", ep, ep->hwtid, status); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CONNECT_REPLY; event.status = status; memcpy(&event.local_addr, &ep->com.local_addr, sizeof(ep->com.local_addr)); memcpy(&event.remote_addr, &ep->com.remote_addr, sizeof(ep->com.remote_addr)); if ((status == 0) || (status == -ECONNREFUSED)) { if (!ep->tried_with_mpa_v1) { /* this means MPA_v2 is used */ event.ord = ep->ird; event.ird = ep->ord; event.private_data_len = ep->plen - sizeof(struct mpa_v2_conn_params); event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + sizeof(struct mpa_v2_conn_params); } else { /* this means MPA_v1 is used */ event.ord = cur_max_read_depth(ep->com.dev); event.ird = cur_max_read_depth(ep->com.dev); event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); } } pr_debug("ep %p tid %u status %d\n", ep, ep->hwtid, status); set_bit(CONN_RPL_UPCALL, &ep->com.history); ep->com.cm_id->event_handler(ep->com.cm_id, &event); if (status < 0) deref_cm_id(&ep->com); } static int connect_request_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; int ret; pr_debug("ep %p tid %u\n", ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CONNECT_REQUEST; memcpy(&event.local_addr, &ep->com.local_addr, sizeof(ep->com.local_addr)); memcpy(&event.remote_addr, &ep->com.remote_addr, sizeof(ep->com.remote_addr)); event.provider_data = ep; if (!ep->tried_with_mpa_v1) { /* this means MPA_v2 is used */ event.ord = ep->ord; event.ird = ep->ird; event.private_data_len = ep->plen - sizeof(struct mpa_v2_conn_params); event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + sizeof(struct mpa_v2_conn_params); } else { /* this means MPA_v1 is used. Send max supported */ event.ord = cur_max_read_depth(ep->com.dev); event.ird = cur_max_read_depth(ep->com.dev); event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); } c4iw_get_ep(&ep->com); ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, &event); if (ret) c4iw_put_ep(&ep->com); set_bit(CONNREQ_UPCALL, &ep->com.history); c4iw_put_ep(&ep->parent_ep->com); return ret; } static void established_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; pr_debug("ep %p tid %u\n", ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_ESTABLISHED; event.ird = ep->ord; event.ord = ep->ird; if (ep->com.cm_id) { pr_debug("ep %p tid %u\n", ep, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); set_bit(ESTAB_UPCALL, &ep->com.history); } } static int update_rx_credits(struct c4iw_ep *ep, u32 credits) { struct sk_buff *skb; u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16); u32 credit_dack; pr_debug("ep %p tid %u credits %u\n", ep, ep->hwtid, credits); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { pr_err("update_rx_credits - cannot alloc skb!\n"); return 0; } /* * If we couldn't specify the entire rcv window at connection setup * due to the limit in the number of bits in the RCV_BUFSIZ field, * then add the overage in to the credits returned. */ if (ep->rcv_win > RCV_BUFSIZ_M * 1024) credits += ep->rcv_win - RCV_BUFSIZ_M * 1024; credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F | RX_DACK_MODE_V(dack_mode); cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx, credit_dack); c4iw_ofld_send(&ep->com.dev->rdev, skb); return credits; } #define RELAXED_IRD_NEGOTIATION 1 /* * process_mpa_reply - process streaming mode MPA reply * * Returns: * * 0 upon success indicating a connect request was delivered to the ULP * or the mpa request is incomplete but valid so far. * * 1 if a failure requires the caller to close the connection. * * 2 if a failure requires the caller to abort the connection. */ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; u16 plen; u16 resp_ird, resp_ord; u8 rtr_mismatch = 0, insuff_ird = 0; struct c4iw_qp_attributes attrs; enum c4iw_qp_attr_mask mask; int err; int disconnect = 0; pr_debug("ep %p tid %u\n", ep, ep->hwtid); /* * If we get more than the supported amount of private data * then we must fail this connection. */ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { err = -EINVAL; goto err_stop_timer; } /* * copy the new data into our accumulation buffer. */ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), skb->len); ep->mpa_pkt_len += skb->len; /* * if we don't even have the mpa message, then bail. */ if (ep->mpa_pkt_len < sizeof(*mpa)) return 0; mpa = (struct mpa_message *) ep->mpa_pkt; /* Validate MPA header. */ if (mpa->revision > mpa_rev) { pr_err("%s MPA version mismatch. Local = %d, Received = %d\n", __func__, mpa_rev, mpa->revision); err = -EPROTO; goto err_stop_timer; } if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { err = -EPROTO; goto err_stop_timer; } plen = ntohs(mpa->private_data_size); /* * Fail if there's too much private data. */ if (plen > MPA_MAX_PRIVATE_DATA) { err = -EPROTO; goto err_stop_timer; } /* * If plen does not account for pkt size */ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { err = -EPROTO; goto err_stop_timer; } ep->plen = (u8) plen; /* * If we don't have all the pdata yet, then bail. * We'll continue process when more data arrives. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) return 0; if (mpa->flags & MPA_REJECT) { err = -ECONNREFUSED; goto err_stop_timer; } /* * Stop mpa timer. If it expired, then * we ignore the MPA reply. process_timeout() * will abort the connection. */ if (stop_ep_timer(ep)) return 0; /* * If we get here we have accumulated the entire mpa * start reply message including private data. And * the MPA header is valid. */ __state_set(&ep->com, FPDU_MODE); ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; ep->mpa_attr.version = mpa->revision; ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; if (mpa->revision == 2) { ep->mpa_attr.enhanced_rdma_conn = mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; if (ep->mpa_attr.enhanced_rdma_conn) { mpa_v2_params = (struct mpa_v2_conn_params *) (ep->mpa_pkt + sizeof(*mpa)); resp_ird = ntohs(mpa_v2_params->ird) & MPA_V2_IRD_ORD_MASK; resp_ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; pr_debug("responder ird %u ord %u ep ird %u ord %u\n", resp_ird, resp_ord, ep->ird, ep->ord); /* * This is a double-check. Ideally, below checks are * not required since ird/ord stuff has been taken * care of in c4iw_accept_cr */ if (ep->ird < resp_ord) { if (RELAXED_IRD_NEGOTIATION && resp_ord <= ep->com.dev->rdev.lldi.max_ordird_qp) ep->ird = resp_ord; else insuff_ird = 1; } else if (ep->ird > resp_ord) { ep->ird = resp_ord; } if (ep->ord > resp_ird) { if (RELAXED_IRD_NEGOTIATION) ep->ord = resp_ird; else insuff_ird = 1; } if (insuff_ird) { err = -ENOMEM; ep->ird = resp_ord; ep->ord = resp_ird; } if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) { if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_WRITE_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_RDMA_WRITE; else if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_READ_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; } } } else if (mpa->revision == 1) if (peer2peer) ep->mpa_attr.p2p_type = p2p_type; pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n", ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, ep->mpa_attr.p2p_type, p2p_type); /* * If responder's RTR does not match with that of initiator, assign * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not * generated when moving QP to RTS state. * A TERM message will be sent after QP has moved to RTS state */ if ((ep->mpa_attr.version == 2) && peer2peer && (ep->mpa_attr.p2p_type != p2p_type)) { ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; rtr_mismatch = 1; } attrs.mpa_attr = ep->mpa_attr; attrs.max_ird = ep->ird; attrs.max_ord = ep->ord; attrs.llp_stream_handle = ep; attrs.next_state = C4IW_QP_STATE_RTS; mask = C4IW_QP_ATTR_NEXT_STATE | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; /* bind QP and TID with INIT_WR */ err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); if (err) goto err; /* * If responder's RTR requirement did not match with what initiator * supports, generate TERM message */ if (rtr_mismatch) { pr_err("%s: RTR mismatch, sending TERM\n", __func__); attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_NOMATCH_RTR; attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.send_term = 1; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); err = -ENOMEM; disconnect = 1; goto out; } /* * Generate TERM if initiator IRD is not sufficient for responder * provided ORD. Currently, we do the same behaviour even when * responder provided IRD is also not sufficient as regards to * initiator ORD. */ if (insuff_ird) { pr_err("%s: Insufficient IRD, sending TERM\n", __func__); attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_INSUFF_IRD; attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.send_term = 1; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); err = -ENOMEM; disconnect = 1; goto out; } goto out; err_stop_timer: stop_ep_timer(ep); err: disconnect = 2; out: connect_reply_upcall(ep, err); return disconnect; } /* * process_mpa_request - process streaming mode MPA request * * Returns: * * 0 upon success indicating a connect request was delivered to the ULP * or the mpa request is incomplete but valid so far. * * 1 if a failure requires the caller to close the connection. * * 2 if a failure requires the caller to abort the connection. */ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; u16 plen; pr_debug("ep %p tid %u\n", ep, ep->hwtid); /* * If we get more than the supported amount of private data * then we must fail this connection. */ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) goto err_stop_timer; pr_debug("enter (%s line %u)\n", __FILE__, __LINE__); /* * Copy the new data into our accumulation buffer. */ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), skb->len); ep->mpa_pkt_len += skb->len; /* * If we don't even have the mpa message, then bail. * We'll continue process when more data arrives. */ if (ep->mpa_pkt_len < sizeof(*mpa)) return 0; pr_debug("enter (%s line %u)\n", __FILE__, __LINE__); mpa = (struct mpa_message *) ep->mpa_pkt; /* * Validate MPA Header. */ if (mpa->revision > mpa_rev) { pr_err("%s MPA version mismatch. Local = %d, Received = %d\n", __func__, mpa_rev, mpa->revision); goto err_stop_timer; } if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) goto err_stop_timer; plen = ntohs(mpa->private_data_size); /* * Fail if there's too much private data. */ if (plen > MPA_MAX_PRIVATE_DATA) goto err_stop_timer; /* * If plen does not account for pkt size */ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) goto err_stop_timer; ep->plen = (u8) plen; /* * If we don't have all the pdata yet, then bail. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) return 0; /* * If we get here we have accumulated the entire mpa * start reply message including private data. */ ep->mpa_attr.initiator = 0; ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; ep->mpa_attr.recv_marker_enabled = markers_enabled; ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; ep->mpa_attr.version = mpa->revision; if (mpa->revision == 1) ep->tried_with_mpa_v1 = 1; ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; if (mpa->revision == 2) { ep->mpa_attr.enhanced_rdma_conn = mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; if (ep->mpa_attr.enhanced_rdma_conn) { mpa_v2_params = (struct mpa_v2_conn_params *) (ep->mpa_pkt + sizeof(*mpa)); ep->ird = ntohs(mpa_v2_params->ird) & MPA_V2_IRD_ORD_MASK; ep->ird = min_t(u32, ep->ird, cur_max_read_depth(ep->com.dev)); ep->ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; ep->ord = min_t(u32, ep->ord, cur_max_read_depth(ep->com.dev)); pr_debug("initiator ird %u ord %u\n", ep->ird, ep->ord); if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) if (peer2peer) { if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_WRITE_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_RDMA_WRITE; else if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_READ_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; } } } else if (mpa->revision == 1) if (peer2peer) ep->mpa_attr.p2p_type = p2p_type; pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n", ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, ep->mpa_attr.p2p_type); __state_set(&ep->com, MPA_REQ_RCVD); /* drive upcall */ mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING); if (ep->parent_ep->com.state != DEAD) { if (connect_request_upcall(ep)) goto err_unlock_parent; } else { goto err_unlock_parent; } mutex_unlock(&ep->parent_ep->com.mutex); return 0; err_unlock_parent: mutex_unlock(&ep->parent_ep->com.mutex); goto err_out; err_stop_timer: (void)stop_ep_timer(ep); err_out: return 2; } static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_rx_data *hdr = cplhdr(skb); unsigned int dlen = ntohs(hdr->len); unsigned int tid = GET_TID(hdr); __u8 status = hdr->status; int disconnect = 0; ep = get_ep_from_tid(dev, tid); if (!ep) return 0; pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen); skb_pull(skb, sizeof(*hdr)); skb_trim(skb, dlen); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case MPA_REQ_SENT: update_rx_credits(ep, dlen); ep->rcv_seq += dlen; disconnect = process_mpa_reply(ep, skb); break; case MPA_REQ_WAIT: update_rx_credits(ep, dlen); ep->rcv_seq += dlen; disconnect = process_mpa_request(ep, skb); break; case FPDU_MODE: { struct c4iw_qp_attributes attrs; update_rx_credits(ep, dlen); if (status) pr_err("%s Unexpected streaming data." \ " qpid %u ep %p state %d tid %u status %d\n", __func__, ep->com.qp->wq.sq.qid, ep, ep->com.state, ep->hwtid, status); attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); disconnect = 1; break; } default: break; } mutex_unlock(&ep->com.mutex); if (disconnect) c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); c4iw_put_ep(&ep->com); return 0; } static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx) { enum chip_type adapter_type; adapter_type = ep->com.dev->rdev.lldi.adapter_type; /* * If this TCB had a srq buffer cached, then we must complete * it. For user mode, that means saving the srqidx in the * user/kernel status page for this qp. For kernel mode, just * synthesize the CQE now. */ if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T5 && srqidx) { if (ep->com.qp->ibqp.uobject) t4_set_wq_in_error(&ep->com.qp->wq, srqidx); else c4iw_flush_srqidx(ep->com.qp, srqidx); } } static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { u32 srqidx; struct c4iw_ep *ep; struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb); int release = 0; unsigned int tid = GET_TID(rpl); ep = get_ep_from_tid(dev, tid); if (!ep) { pr_warn("Abort rpl to freed endpoint\n"); return 0; } if (ep->com.qp && ep->com.qp->srq) { srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(rpl->srqidx_status)); complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx); } pr_debug("ep %p tid %u\n", ep, ep->hwtid); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case ABORTING: c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); __state_set(&ep->com, DEAD); release = 1; break; default: pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state); break; } mutex_unlock(&ep->com.mutex); if (release) { close_complete_upcall(ep, -ECONNRESET); release_ep_resources(ep); } c4iw_put_ep(&ep->com); return 0; } static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) { struct sk_buff *skb; struct fw_ofld_connection_wr *req; unsigned int mtu_idx; u32 wscale; struct sockaddr_in *sin; int win; skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); req = __skb_put_zero(skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.filter = cpu_to_be32(cxgb4_select_ntuple( ep->com.dev->rdev.lldi.ports[0], ep->l2t)); sin = (struct sockaddr_in *)&ep->com.local_addr; req->le.lport = sin->sin_port; req->le.u.ipv4.lip = sin->sin_addr.s_addr; sin = (struct sockaddr_in *)&ep->com.remote_addr; req->le.pport = sin->sin_port; req->le.u.ipv4.pip = sin->sin_addr.s_addr; req->tcb.t_state_to_astid = htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) | FW_OFLD_CONNECTION_WR_ASTID_V(atid)); req->tcb.cplrxdataack_cplpassacceptrpl = htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F); req->tcb.tx_max = (__force __be32) jiffies; req->tcb.rcv_adv = htons(1); cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, enable_tcp_timestamps, (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); wscale = cxgb_compute_wscale(rcv_win); /* * Specify the largest window that will fit in opt0. The * remainder will be specified in the rx_data_ack. */ win = ep->rcv_win >> 10; if (win > RCV_BUFSIZ_M) win = RCV_BUFSIZ_M; req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F | (nocong ? NO_CONG_F : 0) | KEEP_ALIVE_F | DELACK_F | WND_SCALE_V(wscale) | MSS_IDX_V(mtu_idx) | L2T_IDX_V(ep->l2t->idx) | TX_CHAN_V(ep->tx_chan) | SMAC_SEL_V(ep->smac_idx) | DSCP_V(ep->tos >> 2) | ULP_MODE_V(ULP_MODE_TCPDDP) | RCV_BUFSIZ_V(win)); req->tcb.opt2 = (__force __be32) (PACE_V(1) | TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | RX_CHANNEL_V(0) | CCTRL_ECN_V(enable_ecn) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); if (enable_tcp_timestamps) req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F; if (enable_tcp_sack) req->tcb.opt2 |= (__force __be32)SACK_EN_F; if (wscale && enable_tcp_window_scaling) req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2); set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); set_bit(ACT_OFLD_CONN, &ep->com.history); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } /* * Some of the error codes above implicitly indicate that there is no TID * allocated with the result of an ACT_OPEN. We use this predicate to make * that explicit. */ static inline int act_open_has_tid(int status) { return (status != CPL_ERR_TCAM_PARITY && status != CPL_ERR_TCAM_MISS && status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST_SYNRECV && status != CPL_ERR_CONN_EXIST); } static char *neg_adv_str(unsigned int status) { switch (status) { case CPL_ERR_RTX_NEG_ADVICE: return "Retransmit timeout"; case CPL_ERR_PERSIST_NEG_ADVICE: return "Persist timeout"; case CPL_ERR_KEEPALV_NEG_ADVICE: return "Keepalive timeout"; default: return "Unknown"; } } static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) { ep->snd_win = snd_win; ep->rcv_win = rcv_win; pr_debug("snd_win %d rcv_win %d\n", ep->snd_win, ep->rcv_win); } #define ACT_OPEN_RETRY_COUNT 2 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, struct dst_entry *dst, struct c4iw_dev *cdev, bool clear_mpa_v1, enum chip_type adapter_type, u8 tos) { struct neighbour *n; int err, step; struct net_device *pdev; n = dst_neigh_lookup(dst, peer_ip); if (!n) return -ENODEV; rcu_read_lock(); err = -ENOMEM; if (n->dev->flags & IFF_LOOPBACK) { if (iptype == 4) pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip); else if (IS_ENABLED(CONFIG_IPV6)) for_each_netdev(&init_net, pdev) { if (ipv6_chk_addr(&init_net, (struct in6_addr *)peer_ip, pdev, 1)) break; } else pdev = NULL; if (!pdev) { err = -ENODEV; goto out; } ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, pdev, rt_tos2priority(tos)); if (!ep->l2t) { dev_put(pdev); goto out; } ep->mtu = pdev->mtu; ep->tx_chan = cxgb4_port_chan(pdev); ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx; step = cdev->rdev.lldi.ntxq / cdev->rdev.lldi.nchan; ep->txq_idx = cxgb4_port_idx(pdev) * step; step = cdev->rdev.lldi.nrxq / cdev->rdev.lldi.nchan; ep->ctrlq_idx = cxgb4_port_idx(pdev); ep->rss_qid = cdev->rdev.lldi.rxq_ids[ cxgb4_port_idx(pdev) * step]; set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); dev_put(pdev); } else { pdev = get_real_dev(n->dev); ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, pdev, rt_tos2priority(tos)); if (!ep->l2t) goto out; ep->mtu = dst_mtu(dst); ep->tx_chan = cxgb4_port_chan(pdev); ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx; step = cdev->rdev.lldi.ntxq / cdev->rdev.lldi.nchan; ep->txq_idx = cxgb4_port_idx(pdev) * step; ep->ctrlq_idx = cxgb4_port_idx(pdev); step = cdev->rdev.lldi.nrxq / cdev->rdev.lldi.nchan; ep->rss_qid = cdev->rdev.lldi.rxq_ids[ cxgb4_port_idx(pdev) * step]; set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); if (clear_mpa_v1) { ep->retry_with_mpa_v1 = 0; ep->tried_with_mpa_v1 = 0; } } err = 0; out: rcu_read_unlock(); neigh_release(n); return err; } static int c4iw_reconnect(struct c4iw_ep *ep) { int err = 0; int size = 0; struct sockaddr_in *laddr = (struct sockaddr_in *) &ep->com.cm_id->m_local_addr; struct sockaddr_in *raddr = (struct sockaddr_in *) &ep->com.cm_id->m_remote_addr; struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *) &ep->com.cm_id->m_local_addr; struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *) &ep->com.cm_id->m_remote_addr; int iptype; __u8 *ra; pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id); c4iw_init_wr_wait(ep->com.wr_waitp); /* When MPA revision is different on nodes, the node with MPA_rev=2 * tries to reconnect with MPA_rev 1 for the same EP through * c4iw_reconnect(), where the same EP is assigned with new tid for * further connection establishment. As we are using the same EP pointer * for reconnect, few skbs are used during the previous c4iw_connect(), * which leaves the EP with inadequate skbs for further * c4iw_reconnect(), Further causing a crash due to an empty * skb_list() during peer_abort(). Allocate skbs which is already used. */ size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list)); if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) { err = -ENOMEM; goto fail1; } /* * Allocate an active TID to initiate a TCP connection. */ ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); if (ep->atid == -1) { pr_err("%s - cannot alloc atid\n", __func__); err = -ENOMEM; goto fail2; } err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL); if (err) goto fail2a; /* find a route */ if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) { ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev, laddr->sin_addr.s_addr, raddr->sin_addr.s_addr, laddr->sin_port, raddr->sin_port, ep->com.cm_id->tos); iptype = 4; ra = (__u8 *)&raddr->sin_addr; } else { ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi, get_real_dev, laddr6->sin6_addr.s6_addr, raddr6->sin6_addr.s6_addr, laddr6->sin6_port, raddr6->sin6_port, ep->com.cm_id->tos, raddr6->sin6_scope_id); iptype = 6; ra = (__u8 *)&raddr6->sin6_addr; } if (!ep->dst) { pr_err("%s - cannot find route\n", __func__); err = -EHOSTUNREACH; goto fail3; } err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false, ep->com.dev->rdev.lldi.adapter_type, ep->com.cm_id->tos); if (err) { pr_err("%s - cannot alloc l2e\n", __func__); goto fail4; } pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, ep->l2t->idx); state_set(&ep->com, CONNECTING); ep->tos = ep->com.cm_id->tos; /* send connect request to rnic */ err = send_connect(ep); if (!err) goto out; cxgb4_l2t_release(ep->l2t); fail4: dst_release(ep->dst); fail3: xa_erase_irq(&ep->com.dev->atids, ep->atid); fail2a: cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); fail2: /* * remember to send notification to upper layer. * We are in here so the upper layer is not aware that this is * re-connect attempt and so, upper layer is still waiting for * response of 1st connect request. */ connect_reply_upcall(ep, -ECONNRESET); fail1: c4iw_put_ep(&ep->com); out: return err; } static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_act_open_rpl *rpl = cplhdr(skb); unsigned int atid = TID_TID_G(AOPEN_ATID_G( ntohl(rpl->atid_status))); struct tid_info *t = dev->rdev.lldi.tids; int status = AOPEN_STATUS_G(ntohl(rpl->atid_status)); struct sockaddr_in *la; struct sockaddr_in *ra; struct sockaddr_in6 *la6; struct sockaddr_in6 *ra6; int ret = 0; ep = lookup_atid(t, atid); la = (struct sockaddr_in *)&ep->com.local_addr; ra = (struct sockaddr_in *)&ep->com.remote_addr; la6 = (struct sockaddr_in6 *)&ep->com.local_addr; ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; pr_debug("ep %p atid %u status %u errno %d\n", ep, atid, status, status2errno(status)); if (cxgb_is_neg_adv(status)) { pr_debug("Connection problems for atid %u status %u (%s)\n", atid, status, neg_adv_str(status)); ep->stats.connect_neg_adv++; mutex_lock(&dev->rdev.stats.lock); dev->rdev.stats.neg_adv++; mutex_unlock(&dev->rdev.stats.lock); return 0; } set_bit(ACT_OPEN_RPL, &ep->com.history); /* * Log interesting failures. */ switch (status) { case CPL_ERR_CONN_RESET: case CPL_ERR_CONN_TIMEDOUT: break; case CPL_ERR_TCAM_FULL: mutex_lock(&dev->rdev.stats.lock); dev->rdev.stats.tcam_full++; mutex_unlock(&dev->rdev.stats.lock); if (ep->com.local_addr.ss_family == AF_INET && dev->rdev.lldi.enable_fw_ofld_conn) { ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G( ntohl(rpl->atid_status)))); if (ret) goto fail; return 0; } break; case CPL_ERR_CONN_EXIST: if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { set_bit(ACT_RETRY_INUSE, &ep->com.history); if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &ep->com.local_addr; cxgb4_clip_release( ep->com.dev->rdev.lldi.ports[0], (const u32 *) &sin6->sin6_addr.s6_addr, 1); } xa_erase_irq(&ep->com.dev->atids, atid); cxgb4_free_atid(t, atid); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_reconnect(ep); return 0; } break; default: if (ep->com.local_addr.ss_family == AF_INET) { pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n", atid, status, status2errno(status), &la->sin_addr.s_addr, ntohs(la->sin_port), &ra->sin_addr.s_addr, ntohs(ra->sin_port)); } else { pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n", atid, status, status2errno(status), la6->sin6_addr.s6_addr, ntohs(la6->sin6_port), ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port)); } break; } fail: connect_reply_upcall(ep, status2errno(status)); state_set(&ep->com, DEAD); if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } if (status && act_open_has_tid(status)) cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl), ep->com.local_addr.ss_family); xa_erase_irq(&ep->com.dev->atids, atid); cxgb4_free_atid(t, atid); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_put_ep(&ep->com); return 0; } static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_pass_open_rpl *rpl = cplhdr(skb); unsigned int stid = GET_TID(rpl); struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); if (!ep) { pr_warn("%s stid %d lookup failure!\n", __func__, stid); goto out; } pr_debug("ep %p status %d error %d\n", ep, rpl->status, status2errno(rpl->status)); c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status)); c4iw_put_ep(&ep->com); out: return 0; } static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); unsigned int stid = GET_TID(rpl); struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); if (!ep) { pr_warn("%s stid %d lookup failure!\n", __func__, stid); goto out; } pr_debug("ep %p\n", ep); c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status)); c4iw_put_ep(&ep->com); out: return 0; } static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, struct cpl_pass_accept_req *req) { struct cpl_pass_accept_rpl *rpl; unsigned int mtu_idx; u64 opt0; u32 opt2; u32 wscale; struct cpl_t5_pass_accept_rpl *rpl5 = NULL; int win; enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; pr_debug("ep %p tid %u\n", ep, ep->hwtid); cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, enable_tcp_timestamps && req->tcpopt.tstamp, (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); wscale = cxgb_compute_wscale(rcv_win); /* * Specify the largest window that will fit in opt0. The * remainder will be specified in the rx_data_ack. */ win = ep->rcv_win >> 10; if (win > RCV_BUFSIZ_M) win = RCV_BUFSIZ_M; opt0 = (nocong ? NO_CONG_F : 0) | KEEP_ALIVE_F | DELACK_F | WND_SCALE_V(wscale) | MSS_IDX_V(mtu_idx) | L2T_IDX_V(ep->l2t->idx) | TX_CHAN_V(ep->tx_chan) | SMAC_SEL_V(ep->smac_idx) | DSCP_V(ep->tos >> 2) | ULP_MODE_V(ULP_MODE_TCPDDP) | RCV_BUFSIZ_V(win); opt2 = RX_CHANNEL_V(0) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); if (enable_tcp_timestamps && req->tcpopt.tstamp) opt2 |= TSTAMPS_EN_F; if (enable_tcp_sack && req->tcpopt.sack) opt2 |= SACK_EN_F; if (wscale && enable_tcp_window_scaling) opt2 |= WND_SCALE_EN_F; if (enable_ecn) { const struct tcphdr *tcph; u32 hlen = ntohl(req->hdr_len); if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5) tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen); else tcph = (const void *)(req + 1) + T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen); if (tcph->ece && tcph->cwr) opt2 |= CCTRL_ECN_V(1); } if (!is_t4(adapter_type)) { u32 isn = (get_random_u32() & ~7UL) - 1; skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL); rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16)); rpl = (void *)rpl5; INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid); opt2 |= T5_OPT_2_VALID_F; opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); opt2 |= T5_ISS_F; if (peer2peer) isn += 4; rpl5->iss = cpu_to_be32(isn); pr_debug("iss %u\n", be32_to_cpu(rpl5->iss)); } else { skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); rpl = __skb_put_zero(skb, sizeof(*rpl)); INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid); } rpl->opt0 = cpu_to_be64(opt0); rpl->opt2 = cpu_to_be32(opt2); set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) { pr_debug("c4iw_dev %p tid %u\n", dev, hwtid); skb_trim(skb, sizeof(struct cpl_tid_release)); release_tid(&dev->rdev, hwtid, skb); return; } static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *child_ep = NULL, *parent_ep; struct cpl_pass_accept_req *req = cplhdr(skb); unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); struct tid_info *t = dev->rdev.lldi.tids; unsigned int hwtid = GET_TID(req); struct dst_entry *dst; __u8 local_ip[16], peer_ip[16]; __be16 local_port, peer_port; struct sockaddr_in6 *sin6; int err; u16 peer_mss = ntohs(req->tcpopt.mss); int iptype; unsigned short hdrs; u8 tos; parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); if (!parent_ep) { pr_err("%s connect request on invalid stid %d\n", __func__, stid); goto reject; } if (state_read(&parent_ep->com) != LISTEN) { pr_err("%s - listening ep not in LISTEN\n", __func__); goto reject; } if (parent_ep->com.cm_id->tos_set) tos = parent_ep->com.cm_id->tos; else tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype, local_ip, peer_ip, &local_port, &peer_port); /* Find output route */ if (iptype == 4) { pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" , parent_ep, hwtid, local_ip, peer_ip, ntohs(local_port), ntohs(peer_port), peer_mss); dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, local_port, peer_port, tos); } else { pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" , parent_ep, hwtid, local_ip, peer_ip, ntohs(local_port), ntohs(peer_port), peer_mss); dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, local_ip, peer_ip, local_port, peer_port, tos, ((struct sockaddr_in6 *) &parent_ep->com.local_addr)->sin6_scope_id); } if (!dst) { pr_err("%s - failed to find dst entry!\n", __func__); goto reject; } child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); if (!child_ep) { pr_err("%s - failed to allocate ep entry!\n", __func__); dst_release(dst); goto reject; } err = import_ep(child_ep, iptype, peer_ip, dst, dev, false, parent_ep->com.dev->rdev.lldi.adapter_type, tos); if (err) { pr_err("%s - failed to allocate l2t entry!\n", __func__); dst_release(dst); kfree(child_ep); goto reject; } hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + sizeof(struct tcphdr) + ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) child_ep->mtu = peer_mss + hdrs; skb_queue_head_init(&child_ep->com.ep_skb_list); if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF)) goto fail; state_set(&child_ep->com, CONNECTING); child_ep->com.dev = dev; child_ep->com.cm_id = NULL; if (iptype == 4) { struct sockaddr_in *sin = (struct sockaddr_in *) &child_ep->com.local_addr; sin->sin_family = AF_INET; sin->sin_port = local_port; sin->sin_addr.s_addr = *(__be32 *)local_ip; sin = (struct sockaddr_in *)&child_ep->com.local_addr; sin->sin_family = AF_INET; sin->sin_port = ((struct sockaddr_in *) &parent_ep->com.local_addr)->sin_port; sin->sin_addr.s_addr = *(__be32 *)local_ip; sin = (struct sockaddr_in *)&child_ep->com.remote_addr; sin->sin_family = AF_INET; sin->sin_port = peer_port; sin->sin_addr.s_addr = *(__be32 *)peer_ip; } else { sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; sin6->sin6_family = PF_INET6; sin6->sin6_port = local_port; memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; sin6->sin6_family = PF_INET6; sin6->sin6_port = ((struct sockaddr_in6 *) &parent_ep->com.local_addr)->sin6_port; memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; sin6->sin6_family = PF_INET6; sin6->sin6_port = peer_port; memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); } c4iw_get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; child_ep->tos = tos; child_ep->dst = dst; child_ep->hwtid = hwtid; pr_debug("tx_chan %u smac_idx %u rss_qid %u\n", child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); timer_setup(&child_ep->timer, ep_timeout, 0); cxgb4_insert_tid(t, child_ep, hwtid, child_ep->com.local_addr.ss_family); insert_ep_tid(child_ep); if (accept_cr(child_ep, skb, req)) { c4iw_put_ep(&parent_ep->com); release_ep_resources(child_ep); } else { set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); } if (iptype == 6) { sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } goto out; fail: c4iw_put_ep(&child_ep->com); reject: reject_cr(dev, hwtid, skb); out: if (parent_ep) c4iw_put_ep(&parent_ep->com); return 0; } static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_pass_establish *req = cplhdr(skb); unsigned int tid = GET_TID(req); int ret; u16 tcp_opt = ntohs(req->tcp_opt); ep = get_ep_from_tid(dev, tid); if (!ep) return 0; pr_debug("ep %p tid %u\n", ep, ep->hwtid); ep->snd_seq = be32_to_cpu(req->snd_isn); ep->rcv_seq = be32_to_cpu(req->rcv_isn); ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt); set_emss(ep, tcp_opt); dst_confirm(ep->dst); mutex_lock(&ep->com.mutex); ep->com.state = MPA_REQ_WAIT; start_ep_timer(ep); set_bit(PASS_ESTAB, &ep->com.history); ret = send_flowc(ep); mutex_unlock(&ep->com.mutex); if (ret) c4iw_ep_disconnect(ep, 1, GFP_KERNEL); c4iw_put_ep(&ep->com); return 0; } static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_peer_close *hdr = cplhdr(skb); struct c4iw_ep *ep; struct c4iw_qp_attributes attrs; int disconnect = 1; int release = 0; unsigned int tid = GET_TID(hdr); int ret; ep = get_ep_from_tid(dev, tid); if (!ep) return 0; pr_debug("ep %p tid %u\n", ep, ep->hwtid); dst_confirm(ep->dst); set_bit(PEER_CLOSE, &ep->com.history); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case MPA_REQ_WAIT: __state_set(&ep->com, CLOSING); break; case MPA_REQ_SENT: __state_set(&ep->com, CLOSING); connect_reply_upcall(ep, -ECONNRESET); break; case MPA_REQ_RCVD: /* * We're gonna mark this puppy DEAD, but keep * the reference on it until the ULP accepts or * rejects the CR. Also wake up anyone waiting * in rdma connection migration (see c4iw_accept_cr()). */ __state_set(&ep->com, CLOSING); pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid); c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); break; case MPA_REP_SENT: __state_set(&ep->com, CLOSING); pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid); c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); break; case FPDU_MODE: start_ep_timer(ep); __state_set(&ep->com, CLOSING); attrs.next_state = C4IW_QP_STATE_CLOSING; ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); if (ret != -ECONNRESET) { peer_close_upcall(ep); disconnect = 1; } break; case ABORTING: disconnect = 0; break; case CLOSING: __state_set(&ep->com, MORIBUND); disconnect = 0; break; case MORIBUND: (void)stop_ep_timer(ep); if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_IDLE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } close_complete_upcall(ep, 0); __state_set(&ep->com, DEAD); release = 1; disconnect = 0; break; case DEAD: disconnect = 0; break; default: WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); } mutex_unlock(&ep->com.mutex); if (disconnect) c4iw_ep_disconnect(ep, 0, GFP_KERNEL); if (release) release_ep_resources(ep); c4iw_put_ep(&ep->com); return 0; } static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep) { complete_cached_srq_buffers(ep, ep->srqe_idx); if (ep->com.cm_id && ep->com.qp) { struct c4iw_qp_attributes attrs; attrs.next_state = C4IW_QP_STATE_ERROR; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } peer_abort_upcall(ep); release_ep_resources(ep); c4iw_put_ep(&ep->com); } static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_abort_req_rss6 *req = cplhdr(skb); struct c4iw_ep *ep; struct sk_buff *rpl_skb; struct c4iw_qp_attributes attrs; int ret; int release = 0; unsigned int tid = GET_TID(req); u8 status; u32 srqidx; u32 len = roundup(sizeof(struct cpl_abort_rpl), 16); ep = get_ep_from_tid(dev, tid); if (!ep) return 0; status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status)); if (cxgb_is_neg_adv(status)) { pr_debug("Negative advice on abort- tid %u status %d (%s)\n", ep->hwtid, status, neg_adv_str(status)); ep->stats.abort_neg_adv++; mutex_lock(&dev->rdev.stats.lock); dev->rdev.stats.neg_adv++; mutex_unlock(&dev->rdev.stats.lock); goto deref_ep; } pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state); set_bit(PEER_ABORT, &ep->com.history); /* * Wake up any threads in rdma_init() or rdma_fini(). * However, this is not needed if com state is just * MPA_REQ_SENT */ if (ep->com.state != MPA_REQ_SENT) c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case CONNECTING: c4iw_put_ep(&ep->parent_ep->com); break; case MPA_REQ_WAIT: (void)stop_ep_timer(ep); break; case MPA_REQ_SENT: (void)stop_ep_timer(ep); if (status != CPL_ERR_CONN_RESET || mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) connect_reply_upcall(ep, -ECONNRESET); else { /* * we just don't send notification upwards because we * want to retry with mpa_v1 without upper layers even * knowing it. * * do some housekeeping so as to re-initiate the * connection */ pr_info("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, mpa_rev); ep->retry_with_mpa_v1 = 1; } break; case MPA_REP_SENT: break; case MPA_REQ_RCVD: break; case MORIBUND: case CLOSING: stop_ep_timer(ep); fallthrough; case FPDU_MODE: if (ep->com.qp && ep->com.qp->srq) { srqidx = ABORT_RSS_SRQIDX_G( be32_to_cpu(req->srqidx_status)); if (srqidx) { complete_cached_srq_buffers(ep, srqidx); } else { /* Hold ep ref until finish_peer_abort() */ c4iw_get_ep(&ep->com); __state_set(&ep->com, ABORTING); set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags); read_tcb(ep); break; } } if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_ERROR; ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); if (ret) pr_err("%s - qp <- error failed!\n", __func__); } peer_abort_upcall(ep); break; case ABORTING: break; case DEAD: pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); mutex_unlock(&ep->com.mutex); goto deref_ep; default: WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); break; } dst_confirm(ep->dst); if (ep->com.state != ABORTING) { __state_set(&ep->com, DEAD); /* we don't release if we want to retry with mpa_v1 */ if (!ep->retry_with_mpa_v1) release = 1; } mutex_unlock(&ep->com.mutex); rpl_skb = skb_dequeue(&ep->com.ep_skb_list); if (WARN_ON(!rpl_skb)) { release = 1; goto out; } cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx); c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); out: if (release) release_ep_resources(ep); else if (ep->retry_with_mpa_v1) { if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &ep->com.local_addr; cxgb4_clip_release( ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid); cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, ep->com.local_addr.ss_family); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_reconnect(ep); } deref_ep: c4iw_put_ep(&ep->com); /* Dereferencing ep, referenced in peer_abort_intr() */ c4iw_put_ep(&ep->com); return 0; } static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct c4iw_qp_attributes attrs; struct cpl_close_con_rpl *rpl = cplhdr(skb); int release = 0; unsigned int tid = GET_TID(rpl); ep = get_ep_from_tid(dev, tid); if (!ep) return 0; pr_debug("ep %p tid %u\n", ep, ep->hwtid); /* The cm_id may be null if we failed to connect */ mutex_lock(&ep->com.mutex); set_bit(CLOSE_CON_RPL, &ep->com.history); switch (ep->com.state) { case CLOSING: __state_set(&ep->com, MORIBUND); break; case MORIBUND: (void)stop_ep_timer(ep); if ((ep->com.cm_id) && (ep->com.qp)) { attrs.next_state = C4IW_QP_STATE_IDLE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } close_complete_upcall(ep, 0); __state_set(&ep->com, DEAD); release = 1; break; case ABORTING: case DEAD: break; default: WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); break; } mutex_unlock(&ep->com.mutex); if (release) release_ep_resources(ep); c4iw_put_ep(&ep->com); return 0; } static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_rdma_terminate *rpl = cplhdr(skb); unsigned int tid = GET_TID(rpl); struct c4iw_ep *ep; struct c4iw_qp_attributes attrs; ep = get_ep_from_tid(dev, tid); if (ep) { if (ep->com.qp) { pr_warn("TERM received tid %u qpid %u\n", tid, ep->com.qp->wq.sq.qid); attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3, * when entering the TERM state the RNIC MUST initiate a CLOSE. */ c4iw_ep_disconnect(ep, 1, GFP_KERNEL); c4iw_put_ep(&ep->com); } else pr_warn("TERM received tid %u no ep/qp\n", tid); return 0; } /* * Upcall from the adapter indicating data has been transmitted. * For us its just the single MPA request or reply. We can now free * the skb holding the mpa message. */ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_fw4_ack *hdr = cplhdr(skb); u8 credits = hdr->credits; unsigned int tid = GET_TID(hdr); ep = get_ep_from_tid(dev, tid); if (!ep) return 0; pr_debug("ep %p tid %u credits %u\n", ep, ep->hwtid, credits); if (credits == 0) { pr_debug("0 credit ack ep %p tid %u state %u\n", ep, ep->hwtid, state_read(&ep->com)); goto out; } dst_confirm(ep->dst); if (ep->mpa_skb) { pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n", ep, ep->hwtid, state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); mutex_lock(&ep->com.mutex); kfree_skb(ep->mpa_skb); ep->mpa_skb = NULL; if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) stop_ep_timer(ep); mutex_unlock(&ep->com.mutex); } out: c4iw_put_ep(&ep->com); return 0; } int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { int abort; struct c4iw_ep *ep = to_ep(cm_id); pr_debug("ep %p tid %u\n", ep, ep->hwtid); mutex_lock(&ep->com.mutex); if (ep->com.state != MPA_REQ_RCVD) { mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); return -ECONNRESET; } set_bit(ULP_REJECT, &ep->com.history); if (mpa_rev == 0) abort = 1; else abort = send_mpa_reject(ep, pdata, pdata_len); mutex_unlock(&ep->com.mutex); stop_ep_timer(ep); c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); c4iw_put_ep(&ep->com); return 0; } int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { int err; struct c4iw_qp_attributes attrs; enum c4iw_qp_attr_mask mask; struct c4iw_ep *ep = to_ep(cm_id); struct c4iw_dev *h = to_c4iw_dev(cm_id->device); struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); int abort = 0; pr_debug("ep %p tid %u\n", ep, ep->hwtid); mutex_lock(&ep->com.mutex); if (ep->com.state != MPA_REQ_RCVD) { err = -ECONNRESET; goto err_out; } if (!qp) { err = -EINVAL; goto err_out; } set_bit(ULP_ACCEPT, &ep->com.history); if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) || (conn_param->ird > cur_max_read_depth(ep->com.dev))) { err = -EINVAL; goto err_abort; } if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { if (conn_param->ord > ep->ird) { if (RELAXED_IRD_NEGOTIATION) { conn_param->ord = ep->ird; } else { ep->ird = conn_param->ird; ep->ord = conn_param->ord; send_mpa_reject(ep, conn_param->private_data, conn_param->private_data_len); err = -ENOMEM; goto err_abort; } } if (conn_param->ird < ep->ord) { if (RELAXED_IRD_NEGOTIATION && ep->ord <= h->rdev.lldi.max_ordird_qp) { conn_param->ird = ep->ord; } else { err = -ENOMEM; goto err_abort; } } } ep->ird = conn_param->ird; ep->ord = conn_param->ord; if (ep->mpa_attr.version == 1) { if (peer2peer && ep->ird == 0) ep->ird = 1; } else { if (peer2peer && (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) ep->ird = 1; } pr_debug("ird %d ord %d\n", ep->ird, ep->ord); ep->com.cm_id = cm_id; ref_cm_id(&ep->com); ep->com.qp = qp; ref_qp(ep); /* bind QP to EP and move to RTS */ attrs.mpa_attr = ep->mpa_attr; attrs.max_ird = ep->ird; attrs.max_ord = ep->ord; attrs.llp_stream_handle = ep; attrs.next_state = C4IW_QP_STATE_RTS; /* bind QP and TID with INIT_WR */ mask = C4IW_QP_ATTR_NEXT_STATE | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); if (err) goto err_deref_cm_id; set_bit(STOP_MPA_TIMER, &ep->com.flags); err = send_mpa_reply(ep, conn_param->private_data, conn_param->private_data_len); if (err) goto err_deref_cm_id; __state_set(&ep->com, FPDU_MODE); established_upcall(ep); mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); return 0; err_deref_cm_id: deref_cm_id(&ep->com); err_abort: abort = 1; err_out: mutex_unlock(&ep->com.mutex); if (abort) c4iw_ep_disconnect(ep, 1, GFP_KERNEL); c4iw_put_ep(&ep->com); return err; } static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) { struct in_device *ind; int found = 0; struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; const struct in_ifaddr *ifa; ind = in_dev_get(dev->rdev.lldi.ports[0]); if (!ind) return -EADDRNOTAVAIL; rcu_read_lock(); in_dev_for_each_ifa_rcu(ifa, ind) { if (ifa->ifa_flags & IFA_F_SECONDARY) continue; laddr->sin_addr.s_addr = ifa->ifa_address; raddr->sin_addr.s_addr = ifa->ifa_address; found = 1; break; } rcu_read_unlock(); in_dev_put(ind); return found ? 0 : -EADDRNOTAVAIL; } static int get_lladdr(struct net_device *dev, struct in6_addr *addr, unsigned char banned_flags) { struct inet6_dev *idev; int err = -EADDRNOTAVAIL; rcu_read_lock(); idev = __in6_dev_get(dev); if (idev != NULL) { struct inet6_ifaddr *ifp; read_lock_bh(&idev->lock); list_for_each_entry(ifp, &idev->addr_list, if_list) { if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { memcpy(addr, &ifp->addr, 16); err = 0; break; } } read_unlock_bh(&idev->lock); } rcu_read_unlock(); return err; } static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) { struct in6_addr addr; struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) { memcpy(la6->sin6_addr.s6_addr, &addr, 16); memcpy(ra6->sin6_addr.s6_addr, &addr, 16); return 0; } return -EADDRNOTAVAIL; } int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_ep *ep; int err = 0; struct sockaddr_in *laddr; struct sockaddr_in *raddr; struct sockaddr_in6 *laddr6; struct sockaddr_in6 *raddr6; __u8 *ra; int iptype; if ((conn_param->ord > cur_max_read_depth(dev)) || (conn_param->ird > cur_max_read_depth(dev))) { err = -EINVAL; goto out; } ep = alloc_ep(sizeof(*ep), GFP_KERNEL); if (!ep) { pr_err("%s - cannot alloc ep\n", __func__); err = -ENOMEM; goto out; } skb_queue_head_init(&ep->com.ep_skb_list); if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) { err = -ENOMEM; goto fail1; } timer_setup(&ep->timer, ep_timeout, 0); ep->plen = conn_param->private_data_len; if (ep->plen) memcpy(ep->mpa_pkt + sizeof(struct mpa_message), conn_param->private_data, ep->plen); ep->ird = conn_param->ird; ep->ord = conn_param->ord; if (peer2peer && ep->ord == 0) ep->ord = 1; ep->com.cm_id = cm_id; ref_cm_id(&ep->com); cm_id->provider_data = ep; ep->com.dev = dev; ep->com.qp = get_qhp(dev, conn_param->qpn); if (!ep->com.qp) { pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); err = -EINVAL; goto fail2; } ref_qp(ep); pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn, ep->com.qp, cm_id); /* * Allocate an active TID to initiate a TCP connection. */ ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); if (ep->atid == -1) { pr_err("%s - cannot alloc atid\n", __func__); err = -ENOMEM; goto fail2; } err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL); if (err) goto fail5; memcpy(&ep->com.local_addr, &cm_id->m_local_addr, sizeof(ep->com.local_addr)); memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr, sizeof(ep->com.remote_addr)); laddr = (struct sockaddr_in *)&ep->com.local_addr; raddr = (struct sockaddr_in *)&ep->com.remote_addr; laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr; raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr; if (cm_id->m_remote_addr.ss_family == AF_INET) { iptype = 4; ra = (__u8 *)&raddr->sin_addr; /* * Handle loopback requests to INADDR_ANY. */ if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { err = pick_local_ipaddrs(dev, cm_id); if (err) goto fail3; } /* find a route */ pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n", &laddr->sin_addr, ntohs(laddr->sin_port), ra, ntohs(raddr->sin_port)); ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, laddr->sin_addr.s_addr, raddr->sin_addr.s_addr, laddr->sin_port, raddr->sin_port, cm_id->tos); } else { iptype = 6; ra = (__u8 *)&raddr6->sin6_addr; /* * Handle loopback requests to INADDR_ANY. */ if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { err = pick_local_ip6addrs(dev, cm_id); if (err) goto fail3; } /* find a route */ pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n", laddr6->sin6_addr.s6_addr, ntohs(laddr6->sin6_port), raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port)); ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, laddr6->sin6_addr.s6_addr, raddr6->sin6_addr.s6_addr, laddr6->sin6_port, raddr6->sin6_port, cm_id->tos, raddr6->sin6_scope_id); } if (!ep->dst) { pr_err("%s - cannot find route\n", __func__); err = -EHOSTUNREACH; goto fail3; } err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); if (err) { pr_err("%s - cannot alloc l2e\n", __func__); goto fail4; } pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, ep->l2t->idx); state_set(&ep->com, CONNECTING); ep->tos = cm_id->tos; /* send connect request to rnic */ err = send_connect(ep); if (!err) goto out; cxgb4_l2t_release(ep->l2t); fail4: dst_release(ep->dst); fail3: xa_erase_irq(&ep->com.dev->atids, ep->atid); fail5: cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); fail2: skb_queue_purge(&ep->com.ep_skb_list); deref_cm_id(&ep->com); fail1: c4iw_put_ep(&ep->com); out: return err; } static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) { int err; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &ep->com.local_addr; if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) { err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); if (err) return err; } c4iw_init_wr_wait(ep->com.wr_waitp); err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], ep->stid, &sin6->sin6_addr, sin6->sin6_port, ep->com.dev->rdev.lldi.rxq_ids[0]); if (!err) err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, 0, 0, __func__); else if (err > 0) err = net_xmit_errno(err); if (err) { cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n", err, ep->stid, sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port)); } return err; } static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) { int err; struct sockaddr_in *sin = (struct sockaddr_in *) &ep->com.local_addr; if (dev->rdev.lldi.enable_fw_ofld_conn) { do { err = cxgb4_create_server_filter( ep->com.dev->rdev.lldi.ports[0], ep->stid, sin->sin_addr.s_addr, sin->sin_port, 0, ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0); if (err == -EBUSY) { if (c4iw_fatal_error(&ep->com.dev->rdev)) { err = -EIO; break; } set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(usecs_to_jiffies(100)); } } while (err == -EBUSY); } else { c4iw_init_wr_wait(ep->com.wr_waitp); err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, sin->sin_addr.s_addr, sin->sin_port, 0, ep->com.dev->rdev.lldi.rxq_ids[0]); if (!err) err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, 0, 0, __func__); else if (err > 0) err = net_xmit_errno(err); } if (err) pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n" , err, ep->stid, &sin->sin_addr, ntohs(sin->sin_port)); return err; } int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) { int err = 0; struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_listen_ep *ep; might_sleep(); ep = alloc_ep(sizeof(*ep), GFP_KERNEL); if (!ep) { pr_err("%s - cannot alloc ep\n", __func__); err = -ENOMEM; goto fail1; } skb_queue_head_init(&ep->com.ep_skb_list); pr_debug("ep %p\n", ep); ep->com.cm_id = cm_id; ref_cm_id(&ep->com); ep->com.dev = dev; ep->backlog = backlog; memcpy(&ep->com.local_addr, &cm_id->m_local_addr, sizeof(ep->com.local_addr)); /* * Allocate a server TID. */ if (dev->rdev.lldi.enable_fw_ofld_conn && ep->com.local_addr.ss_family == AF_INET) ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, cm_id->m_local_addr.ss_family, ep); else ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, cm_id->m_local_addr.ss_family, ep); if (ep->stid == -1) { pr_err("%s - cannot alloc stid\n", __func__); err = -ENOMEM; goto fail2; } err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL); if (err) goto fail3; state_set(&ep->com, LISTEN); if (ep->com.local_addr.ss_family == AF_INET) err = create_server4(dev, ep); else err = create_server6(dev, ep); if (!err) { cm_id->provider_data = ep; goto out; } xa_erase_irq(&ep->com.dev->stids, ep->stid); fail3: cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, ep->com.local_addr.ss_family); fail2: deref_cm_id(&ep->com); c4iw_put_ep(&ep->com); fail1: out: return err; } int c4iw_destroy_listen(struct iw_cm_id *cm_id) { int err; struct c4iw_listen_ep *ep = to_listen_ep(cm_id); pr_debug("ep %p\n", ep); might_sleep(); state_set(&ep->com, DEAD); if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn && ep->com.local_addr.ss_family == AF_INET) { err = cxgb4_remove_server_filter( ep->com.dev->rdev.lldi.ports[0], ep->stid, ep->com.dev->rdev.lldi.rxq_ids[0], false); } else { struct sockaddr_in6 *sin6; c4iw_init_wr_wait(ep->com.wr_waitp); err = cxgb4_remove_server( ep->com.dev->rdev.lldi.ports[0], ep->stid, ep->com.dev->rdev.lldi.rxq_ids[0], ep->com.local_addr.ss_family == AF_INET6); if (err) goto done; err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, 0, 0, __func__); sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } xa_erase_irq(&ep->com.dev->stids, ep->stid); cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, ep->com.local_addr.ss_family); done: deref_cm_id(&ep->com); c4iw_put_ep(&ep->com); return err; } int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) { int ret = 0; int close = 0; int fatal = 0; struct c4iw_rdev *rdev; mutex_lock(&ep->com.mutex); pr_debug("ep %p state %s, abrupt %d\n", ep, states[ep->com.state], abrupt); /* * Ref the ep here in case we have fatal errors causing the * ep to be released and freed. */ c4iw_get_ep(&ep->com); rdev = &ep->com.dev->rdev; if (c4iw_fatal_error(rdev)) { fatal = 1; close_complete_upcall(ep, -EIO); ep->com.state = DEAD; } switch (ep->com.state) { case MPA_REQ_WAIT: case MPA_REQ_SENT: case MPA_REQ_RCVD: case MPA_REP_SENT: case FPDU_MODE: case CONNECTING: close = 1; if (abrupt) ep->com.state = ABORTING; else { ep->com.state = CLOSING; /* * if we close before we see the fw4_ack() then we fix * up the timer state since we're reusing it. */ if (ep->mpa_skb && test_bit(STOP_MPA_TIMER, &ep->com.flags)) { clear_bit(STOP_MPA_TIMER, &ep->com.flags); stop_ep_timer(ep); } start_ep_timer(ep); } set_bit(CLOSE_SENT, &ep->com.flags); break; case CLOSING: if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { close = 1; if (abrupt) { (void)stop_ep_timer(ep); ep->com.state = ABORTING; } else ep->com.state = MORIBUND; } break; case MORIBUND: case ABORTING: case DEAD: pr_debug("ignoring disconnect ep %p state %u\n", ep, ep->com.state); break; default: WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); break; } if (close) { if (abrupt) { set_bit(EP_DISC_ABORT, &ep->com.history); ret = send_abort(ep); } else { set_bit(EP_DISC_CLOSE, &ep->com.history); ret = send_halfclose(ep); } if (ret) { set_bit(EP_DISC_FAIL, &ep->com.history); if (!abrupt) { stop_ep_timer(ep); close_complete_upcall(ep, -EIO); } if (ep->com.qp) { struct c4iw_qp_attributes attrs; attrs.next_state = C4IW_QP_STATE_ERROR; ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); if (ret) pr_err("%s - qp <- error failed!\n", __func__); } fatal = 1; } } mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); if (fatal) release_ep_resources(ep); return ret; } static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, struct cpl_fw6_msg_ofld_connection_wr_rpl *req) { struct c4iw_ep *ep; int atid = be32_to_cpu(req->tid); ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, (__force u32) req->tid); if (!ep) return; switch (req->retval) { case FW_ENOMEM: set_bit(ACT_RETRY_NOMEM, &ep->com.history); if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { send_fw_act_open_req(ep, atid); return; } fallthrough; case FW_EADDRINUSE: set_bit(ACT_RETRY_INUSE, &ep->com.history); if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { send_fw_act_open_req(ep, atid); return; } break; default: pr_info("%s unexpected ofld conn wr retval %d\n", __func__, req->retval); break; } pr_err("active ofld_connect_wr failure %d atid %d\n", req->retval, atid); mutex_lock(&dev->rdev.stats.lock); dev->rdev.stats.act_ofld_conn_fails++; mutex_unlock(&dev->rdev.stats.lock); connect_reply_upcall(ep, status2errno(req->retval)); state_set(&ep->com, DEAD); if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } xa_erase_irq(&dev->atids, atid); cxgb4_free_atid(dev->rdev.lldi.tids, atid); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_put_ep(&ep->com); } static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, struct cpl_fw6_msg_ofld_connection_wr_rpl *req) { struct sk_buff *rpl_skb; struct cpl_pass_accept_req *cpl; int ret; rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; if (req->retval) { pr_err("%s passive open failure %d\n", __func__, req->retval); mutex_lock(&dev->rdev.stats.lock); dev->rdev.stats.pas_ofld_conn_fails++; mutex_unlock(&dev->rdev.stats.lock); kfree_skb(rpl_skb); } else { cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, (__force u32) htonl( (__force u32) req->tid))); ret = pass_accept_req(dev, rpl_skb); if (!ret) kfree_skb(rpl_skb); } return; } static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word) { u64 tlo = be64_to_cpu(tcb[((31 - word) / 2)]); u64 thi = be64_to_cpu(tcb[((31 - word) / 2) - 1]); u64 t; u32 shift = 32; t = (thi << shift) | (tlo >> shift); return t; } static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift) { u32 v; u64 t = be64_to_cpu(tcb[(31 - word) / 2]); if (word & 0x1) shift += 32; v = (t >> shift) & mask; return v; } static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_get_tcb_rpl *rpl = cplhdr(skb); __be64 *tcb = (__be64 *)(rpl + 1); unsigned int tid = GET_TID(rpl); struct c4iw_ep *ep; u64 t_flags_64; u32 rx_pdu_out; ep = get_ep_from_tid(dev, tid); if (!ep) return 0; /* Examine the TF_RX_PDU_OUT (bit 49 of the t_flags) in order to * determine if there's a rx PDU feedback event pending. * * If that bit is set, it means we'll need to re-read the TCB's * rq_start value. The final value is the one present in a TCB * with the TF_RX_PDU_OUT bit cleared. */ t_flags_64 = t4_tcb_get_field64(tcb, TCB_T_FLAGS_W); rx_pdu_out = (t_flags_64 & TF_RX_PDU_OUT_V(1)) >> TF_RX_PDU_OUT_S; c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */ c4iw_put_ep(&ep->com); /* from read_tcb() */ /* If TF_RX_PDU_OUT bit is set, re-read the TCB */ if (rx_pdu_out) { if (++ep->rx_pdu_out_cnt >= 2) { WARN_ONCE(1, "tcb re-read() reached the guard limit, finishing the cleanup\n"); goto cleanup; } read_tcb(ep); return 0; } ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M, TCB_RQ_START_S); cleanup: pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx); if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) finish_peer_abort(dev, ep); else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) send_abort_req(ep); else WARN_ONCE(1, "unexpected state!"); return 0; } static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_fw6_msg *rpl = cplhdr(skb); struct cpl_fw6_msg_ofld_connection_wr_rpl *req; switch (rpl->type) { case FW6_TYPE_CQE: c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); break; case FW6_TYPE_OFLD_CONNECTION_WR_RPL: req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; switch (req->t_state) { case TCP_SYN_SENT: active_ofld_conn_reply(dev, skb, req); break; case TCP_SYN_RECV: passive_ofld_conn_reply(dev, skb, req); break; default: pr_err("%s unexpected ofld conn wr state %d\n", __func__, req->t_state); break; } break; } return 0; } static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) { __be32 l2info; __be16 hdr_len, vlantag, len; u16 eth_hdr_len; int tcp_hdr_len, ip_hdr_len; u8 intf; struct cpl_rx_pkt *cpl = cplhdr(skb); struct cpl_pass_accept_req *req; struct tcp_options_received tmp_opt; struct c4iw_dev *dev; enum chip_type type; dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); /* Store values from cpl_rx_pkt in temporary location. */ vlantag = cpl->vlan; len = cpl->len; l2info = cpl->l2info; hdr_len = cpl->hdr_len; intf = cpl->iff; __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); /* * We need to parse the TCP options from SYN packet. * to generate cpl_pass_accept_req. */ memset(&tmp_opt, 0, sizeof(tmp_opt)); tcp_clear_options(&tmp_opt); tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL); req = __skb_push(skb, sizeof(*req)); memset(req, 0, sizeof(*req)); req->l2info = cpu_to_be16(SYN_INTF_V(intf) | SYN_MAC_IDX_V(RX_MACIDX_G( be32_to_cpu(l2info))) | SYN_XACT_MATCH_F); type = dev->rdev.lldi.adapter_type; tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len)); ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len)); req->hdr_len = cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info)))); if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) { eth_hdr_len = is_t4(type) ? RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) : RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info)); req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) | IP_HDR_LEN_V(ip_hdr_len) | ETH_HDR_LEN_V(eth_hdr_len)); } else { /* T6 and later */ eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info)); req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) | T6_IP_HDR_LEN_V(ip_hdr_len) | T6_ETH_HDR_LEN_V(eth_hdr_len)); } req->vlan = vlantag; req->len = len; req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) | PASS_OPEN_TOS_V(tos)); req->tcpopt.mss = htons(tmp_opt.mss_clamp); if (tmp_opt.wscale_ok) req->tcpopt.wsf = tmp_opt.snd_wscale; req->tcpopt.tstamp = tmp_opt.saw_tstamp; if (tmp_opt.sack_ok) req->tcpopt.sack = 1; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); return; } static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, __be32 laddr, __be16 lport, __be32 raddr, __be16 rport, u32 rcv_isn, u32 filter, u16 window, u32 rss_qid, u8 port_id) { struct sk_buff *req_skb; struct fw_ofld_connection_wr *req; struct cpl_pass_accept_req *cpl = cplhdr(skb); int ret; req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); if (!req_skb) return; req = __skb_put_zero(req_skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); req->le.filter = (__force __be32) filter; req->le.lport = lport; req->le.pport = rport; req->le.u.ipv4.lip = laddr; req->le.u.ipv4.pip = raddr; req->tcb.rcv_nxt = htonl(rcv_isn + 1); req->tcb.rcv_adv = htons(window); req->tcb.t_state_to_astid = htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) | FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) | FW_OFLD_CONNECTION_WR_ASTID_V( PASS_OPEN_TID_G(ntohl(cpl->tos_stid)))); /* * We store the qid in opt2 which will be used by the firmware * to send us the wr response. */ req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid)); /* * We initialize the MSS index in TCB to 0xF. * So that when driver sends cpl_pass_accept_rpl * TCB picks up the correct value. If this was 0 * TP will ignore any value > 0 for MSS index. */ req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); req->cookie = (uintptr_t)skb; set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); if (ret < 0) { pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__, ret); kfree_skb(skb); kfree_skb(req_skb); } } /* * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt * messages when a filter is being used instead of server to * redirect a syn packet. When packets hit filter they are redirected * to the offload queue and driver tries to establish the connection * using firmware work request. */ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) { int stid; unsigned int filter; struct ethhdr *eh = NULL; struct vlan_ethhdr *vlan_eh = NULL; struct iphdr *iph; struct tcphdr *tcph; struct rss_header *rss = (void *)skb->data; struct cpl_rx_pkt *cpl = (void *)skb->data; struct cpl_pass_accept_req *req = (void *)(rss + 1); struct l2t_entry *e; struct dst_entry *dst; struct c4iw_ep *lep = NULL; u16 window; struct port_info *pi; struct net_device *pdev; u16 rss_qid, eth_hdr_len; int step; struct neighbour *neigh; /* Drop all non-SYN packets */ if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F))) goto reject; /* * Drop all packets which did not hit the filter. * Unlikely to happen. */ if (!(rss->filter_hit && rss->filter_tid)) goto reject; /* * Calculate the server tid from filter hit index from cpl_rx_pkt. */ stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); if (!lep) { pr_warn("%s connect request on invalid stid %d\n", __func__, stid); goto reject; } switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) { case CHELSIO_T4: eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); break; case CHELSIO_T5: eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); break; case CHELSIO_T6: eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); break; default: pr_err("T%d Chip is not supported\n", CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)); goto reject; } if (eth_hdr_len == ETH_HLEN) { eh = (struct ethhdr *)(req + 1); iph = (struct iphdr *)(eh + 1); } else { vlan_eh = (struct vlan_ethhdr *)(req + 1); iph = (struct iphdr *)(vlan_eh + 1); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan)); } if (iph->version != 0x4) goto reject; tcph = (struct tcphdr *)(iph + 1); skb_set_network_header(skb, (void *)iph - (void *)rss); skb_set_transport_header(skb, (void *)tcph - (void *)rss); skb_get(skb); pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n", ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), ntohs(tcph->source), iph->tos); dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, iph->tos); if (!dst) { pr_err("%s - failed to find dst entry!\n", __func__); goto reject; } neigh = dst_neigh_lookup_skb(dst, skb); if (!neigh) { pr_err("%s - failed to allocate neigh!\n", __func__); goto free_dst; } if (neigh->dev->flags & IFF_LOOPBACK) { pdev = ip_dev_find(&init_net, iph->daddr); if (!pdev) { pr_err("%s - failed to find device!\n", __func__); goto free_dst; } e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0); pi = (struct port_info *)netdev_priv(pdev); dev_put(pdev); } else { pdev = get_real_dev(neigh->dev); e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0); pi = (struct port_info *)netdev_priv(pdev); } neigh_release(neigh); if (!e) { pr_err("%s - failed to allocate l2t entry!\n", __func__); goto free_dst; } step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; window = (__force u16) htons((__force u16)tcph->window); /* Calcuate filter portion for LE region. */ filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( dev->rdev.lldi.ports[0], e)); /* * Synthesize the cpl_pass_accept_req. We have everything except the * TID. Once firmware sends a reply with TID we update the TID field * in cpl and pass it through the regular cpl_pass_accept_req path. */ build_cpl_pass_accept_req(skb, stid, iph->tos); send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, tcph->source, ntohl(tcph->seq), filter, window, rss_qid, pi->port_id); cxgb4_l2t_release(e); free_dst: dst_release(dst); reject: if (lep) c4iw_put_ep(&lep->com); return 0; } /* * These are the real handlers that are called from a * work queue. */ static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = { [CPL_ACT_ESTABLISH] = act_establish, [CPL_ACT_OPEN_RPL] = act_open_rpl, [CPL_RX_DATA] = rx_data, [CPL_ABORT_RPL_RSS] = abort_rpl, [CPL_ABORT_RPL] = abort_rpl, [CPL_PASS_OPEN_RPL] = pass_open_rpl, [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, [CPL_PASS_ACCEPT_REQ] = pass_accept_req, [CPL_PASS_ESTABLISH] = pass_establish, [CPL_PEER_CLOSE] = peer_close, [CPL_ABORT_REQ_RSS] = peer_abort, [CPL_CLOSE_CON_RPL] = close_con_rpl, [CPL_RDMA_TERMINATE] = terminate, [CPL_FW4_ACK] = fw4_ack, [CPL_GET_TCB_RPL] = read_tcb_rpl, [CPL_FW6_MSG] = deferred_fw6_msg, [CPL_RX_PKT] = rx_pkt, [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe, [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe }; static void process_timeout(struct c4iw_ep *ep) { struct c4iw_qp_attributes attrs; int abort = 1; mutex_lock(&ep->com.mutex); pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state); set_bit(TIMEDOUT, &ep->com.history); switch (ep->com.state) { case MPA_REQ_SENT: connect_reply_upcall(ep, -ETIMEDOUT); break; case MPA_REQ_WAIT: case MPA_REQ_RCVD: case MPA_REP_SENT: case FPDU_MODE: break; case CLOSING: case MORIBUND: if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_ERROR; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } close_complete_upcall(ep, -ETIMEDOUT); break; case ABORTING: case DEAD: /* * These states are expected if the ep timed out at the same * time as another thread was calling stop_ep_timer(). * So we silently do nothing for these states. */ abort = 0; break; default: WARN(1, "%s unexpected state ep %p tid %u state %u\n", __func__, ep, ep->hwtid, ep->com.state); abort = 0; } mutex_unlock(&ep->com.mutex); if (abort) c4iw_ep_disconnect(ep, 1, GFP_KERNEL); c4iw_put_ep(&ep->com); } static void process_timedout_eps(void) { struct c4iw_ep *ep; spin_lock_irq(&timeout_lock); while (!list_empty(&timeout_list)) { struct list_head *tmp; tmp = timeout_list.next; list_del(tmp); tmp->next = NULL; tmp->prev = NULL; spin_unlock_irq(&timeout_lock); ep = list_entry(tmp, struct c4iw_ep, entry); process_timeout(ep); spin_lock_irq(&timeout_lock); } spin_unlock_irq(&timeout_lock); } static void process_work(struct work_struct *work) { struct sk_buff *skb = NULL; struct c4iw_dev *dev; struct cpl_act_establish *rpl; unsigned int opcode; int ret; process_timedout_eps(); while ((skb = skb_dequeue(&rxq))) { rpl = cplhdr(skb); dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); opcode = rpl->ot.opcode; if (opcode >= ARRAY_SIZE(work_handlers) || !work_handlers[opcode]) { pr_err("No handler for opcode 0x%x.\n", opcode); kfree_skb(skb); } else { ret = work_handlers[opcode](dev, skb); if (!ret) kfree_skb(skb); } process_timedout_eps(); } } static DECLARE_WORK(skb_work, process_work); static void ep_timeout(struct timer_list *t) { struct c4iw_ep *ep = from_timer(ep, t, timer); int kickit = 0; spin_lock(&timeout_lock); if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { /* * Only insert if it is not already on the list. */ if (!ep->entry.next) { list_add_tail(&ep->entry, &timeout_list); kickit = 1; } } spin_unlock(&timeout_lock); if (kickit) queue_work(workq, &skb_work); } /* * All the CM events are handled on a work queue to have a safe context. */ static int sched(struct c4iw_dev *dev, struct sk_buff *skb) { /* * Save dev in the skb->cb area. */ *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; /* * Queue the skb and schedule the worker thread. */ skb_queue_tail(&rxq, skb); queue_work(workq, &skb_work); return 0; } static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_set_tcb_rpl *rpl = cplhdr(skb); if (rpl->status != CPL_ERR_NONE) { pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n", rpl->status, GET_TID(rpl)); } kfree_skb(skb); return 0; } static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_fw6_msg *rpl = cplhdr(skb); struct c4iw_wr_wait *wr_waitp; int ret; pr_debug("type %u\n", rpl->type); switch (rpl->type) { case FW6_TYPE_WR_RPL: ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret); if (wr_waitp) c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0); kfree_skb(skb); break; case FW6_TYPE_CQE: case FW6_TYPE_OFLD_CONNECTION_WR_RPL: sched(dev, skb); break; default: pr_err("%s unexpected fw6 msg type %u\n", __func__, rpl->type); kfree_skb(skb); break; } return 0; } static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_abort_req_rss *req = cplhdr(skb); struct c4iw_ep *ep; unsigned int tid = GET_TID(req); ep = get_ep_from_tid(dev, tid); /* This EP will be dereferenced in peer_abort() */ if (!ep) { pr_warn("Abort on non-existent endpoint, tid %d\n", tid); kfree_skb(skb); return 0; } if (cxgb_is_neg_adv(req->status)) { pr_debug("Negative advice on abort- tid %u status %d (%s)\n", ep->hwtid, req->status, neg_adv_str(req->status)); goto out; } pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state); c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); out: sched(dev, skb); return 0; } /* * Most upcalls from the T4 Core go to sched() to * schedule the processing on a work queue. */ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = sched, [CPL_ACT_OPEN_RPL] = sched, [CPL_RX_DATA] = sched, [CPL_ABORT_RPL_RSS] = sched, [CPL_ABORT_RPL] = sched, [CPL_PASS_OPEN_RPL] = sched, [CPL_CLOSE_LISTSRV_RPL] = sched, [CPL_PASS_ACCEPT_REQ] = sched, [CPL_PASS_ESTABLISH] = sched, [CPL_PEER_CLOSE] = sched, [CPL_CLOSE_CON_RPL] = sched, [CPL_ABORT_REQ_RSS] = peer_abort_intr, [CPL_RDMA_TERMINATE] = sched, [CPL_FW4_ACK] = sched, [CPL_SET_TCB_RPL] = set_tcb_rpl, [CPL_GET_TCB_RPL] = sched, [CPL_FW6_MSG] = fw6_msg, [CPL_RX_PKT] = sched }; int __init c4iw_cm_init(void) { skb_queue_head_init(&rxq); workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM); if (!workq) return -ENOMEM; return 0; } void c4iw_cm_term(void) { WARN_ON(!list_empty(&timeout_list)); destroy_workqueue(workq); }
linux-master
drivers/infiniband/hw/cxgb4/cm.c
/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> #include <rdma/ib_sa.h> #include <rdma/ib_cache.h> #include <linux/random.h> #include <linux/mlx4/cmd.h> #include <linux/gfp.h> #include <rdma/ib_pma.h> #include <linux/ip.h> #include <net/ipv6.h> #include <linux/mlx4/driver.h> #include "mlx4_ib.h" enum { MLX4_IB_VENDOR_CLASS1 = 0x9, MLX4_IB_VENDOR_CLASS2 = 0xa }; #define MLX4_TUN_SEND_WRID_SHIFT 34 #define MLX4_TUN_QPN_SHIFT 32 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT) #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT) #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1) #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3) /* Port mgmt change event handling */ #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr) #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask) #define NUM_IDX_IN_PKEY_TBL_BLK 32 #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */ #define GUID_TBL_BLK_NUM_ENTRIES 8 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) struct mlx4_mad_rcv_buf { struct ib_grh grh; u8 payload[256]; } __packed; struct mlx4_mad_snd_buf { u8 payload[256]; } __packed; struct mlx4_tunnel_mad { struct ib_grh grh; struct mlx4_ib_tunnel_header hdr; struct ib_mad mad; } __packed; struct mlx4_rcv_tunnel_mad { struct mlx4_rcv_tunnel_hdr hdr; struct ib_grh grh; struct ib_mad mad; } __packed; static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num); static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num); static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, int block, u32 change_bitmap); __be64 mlx4_ib_gen_node_guid(void) { #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40)) return cpu_to_be64(NODE_GUID_HI | get_random_u32()); } __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) { return cpu_to_be64(atomic_inc_return(&ctx->tid)) | cpu_to_be64(0xff00000000000000LL); } int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const void *in_mad, void *response_mad) { struct mlx4_cmd_mailbox *inmailbox, *outmailbox; void *inbox; int err; u32 in_modifier = port; u8 op_modifier = 0; inmailbox = mlx4_alloc_cmd_mailbox(dev->dev); if (IS_ERR(inmailbox)) return PTR_ERR(inmailbox); inbox = inmailbox->buf; outmailbox = mlx4_alloc_cmd_mailbox(dev->dev); if (IS_ERR(outmailbox)) { mlx4_free_cmd_mailbox(dev->dev, inmailbox); return PTR_ERR(outmailbox); } memcpy(inbox, in_mad, 256); /* * Key check traps can't be generated unless we have in_wc to * tell us where to send the trap. */ if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc) op_modifier |= 0x1; if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc) op_modifier |= 0x2; if (mlx4_is_mfunc(dev->dev) && (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc)) op_modifier |= 0x8; if (in_wc) { struct { __be32 my_qpn; u32 reserved1; __be32 rqpn; u8 sl; u8 g_path; u16 reserved2[2]; __be16 pkey; u32 reserved3[11]; u8 grh[40]; } *ext_info; memset(inbox + 256, 0, 256); ext_info = inbox + 256; ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); ext_info->rqpn = cpu_to_be32(in_wc->src_qp); ext_info->sl = in_wc->sl << 4; ext_info->g_path = in_wc->dlid_path_bits | (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); ext_info->pkey = cpu_to_be16(in_wc->pkey_index); if (in_grh) memcpy(ext_info->grh, in_grh, 40); op_modifier |= 0x4; in_modifier |= ib_lid_cpu16(in_wc->slid) << 16; } err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier, mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier, MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED); if (!err) memcpy(response_mad, outmailbox->buf, 256); mlx4_free_cmd_mailbox(dev->dev, inmailbox); mlx4_free_cmd_mailbox(dev->dev, outmailbox); return err; } static void update_sm_ah(struct mlx4_ib_dev *dev, u32 port_num, u16 lid, u8 sl) { struct ib_ah *new_ah; struct rdma_ah_attr ah_attr; unsigned long flags; if (!dev->send_agent[port_num - 1][0]) return; memset(&ah_attr, 0, sizeof ah_attr); ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num); rdma_ah_set_dlid(&ah_attr, lid); rdma_ah_set_sl(&ah_attr, sl); rdma_ah_set_port_num(&ah_attr, port_num); new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, &ah_attr, 0); if (IS_ERR(new_ah)) return; spin_lock_irqsave(&dev->sm_lock, flags); if (dev->sm_ah[port_num - 1]) rdma_destroy_ah(dev->sm_ah[port_num - 1], 0); dev->sm_ah[port_num - 1] = new_ah; spin_unlock_irqrestore(&dev->sm_lock, flags); } /* * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can * synthesize LID change, Client-Rereg, GID change, and P_Key change events. */ static void smp_snoop(struct ib_device *ibdev, u32 port_num, const struct ib_mad *mad, u16 prev_lid) { struct ib_port_info *pinfo; u16 lid; __be16 *base; u32 bn, pkey_change_bitmap; int i; struct mlx4_ib_dev *dev = to_mdev(ibdev); if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_SET) switch (mad->mad_hdr.attr_id) { case IB_SMP_ATTR_PORT_INFO: if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) return; pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; lid = be16_to_cpu(pinfo->lid); update_sm_ah(dev, port_num, be16_to_cpu(pinfo->sm_lid), pinfo->neighbormtu_mastersmsl & 0xf); if (pinfo->clientrereg_resv_subnetto & 0x80) handle_client_rereg_event(dev, port_num); if (prev_lid != lid) handle_lid_change_event(dev, port_num); break; case IB_SMP_ATTR_PKEY_TABLE: if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) return; if (!mlx4_is_mfunc(dev->dev)) { mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_PKEY_CHANGE); break; } /* at this point, we are running in the master. * Slaves do not receive SMPs. */ bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF; base = (__be16 *) &(((struct ib_smp *)mad)->data[0]); pkey_change_bitmap = 0; for (i = 0; i < 32; i++) { pr_debug("PKEY[%d] = x%x\n", i + bn*32, be16_to_cpu(base[i])); if (be16_to_cpu(base[i]) != dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) { pkey_change_bitmap |= (1 << i); dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] = be16_to_cpu(base[i]); } } pr_debug("PKEY Change event: port=%u, " "block=0x%x, change_bitmap=0x%x\n", port_num, bn, pkey_change_bitmap); if (pkey_change_bitmap) { mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_PKEY_CHANGE); if (!dev->sriov.is_going_down) __propagate_pkey_ev(dev, port_num, bn, pkey_change_bitmap); } break; case IB_SMP_ATTR_GUID_INFO: if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) return; /* paravirtualized master's guid is guid 0 -- does not change */ if (!mlx4_is_master(dev->dev)) mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_GID_CHANGE); /*if master, notify relevant slaves*/ if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) { bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod); mlx4_ib_update_cache_on_guid_change(dev, bn, port_num, (u8 *)(&((struct ib_smp *)mad)->data)); mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num, (u8 *)(&((struct ib_smp *)mad)->data)); } break; case IB_SMP_ATTR_SL_TO_VL_TABLE: /* cache sl to vl mapping changes for use in * filling QP1 LRH VL field when sending packets */ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV && dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT) return; if (!mlx4_is_slave(dev->dev)) { union sl2vl_tbl_to_u64 sl2vl64; int jj; for (jj = 0; jj < 8; jj++) { sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj]; pr_debug("port %u, sl2vl[%d] = %02x\n", port_num, jj, sl2vl64.sl8[jj]); } atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64); } break; default: break; } } static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, int block, u32 change_bitmap) { int i, ix, slave, err; int have_event = 0; for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) { if (slave == mlx4_master_func_num(dev->dev)) continue; if (!mlx4_is_slave_active(dev->dev, slave)) continue; have_event = 0; for (i = 0; i < 32; i++) { if (!(change_bitmap & (1 << i))) continue; for (ix = 0; ix < dev->dev->caps.pkey_table_len[port_num]; ix++) { if (dev->pkeys.virt2phys_pkey[slave][port_num - 1] [ix] == i + 32 * block) { err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num); pr_debug("propagate_pkey_ev: slave %d," " port %d, ix %d (%d)\n", slave, port_num, ix, err); have_event = 1; break; } } if (have_event) break; } } } static void node_desc_override(struct ib_device *dev, struct ib_mad *mad) { unsigned long flags; if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); memcpy(((struct ib_smp *) mad)->data, dev->node_desc, IB_DEVICE_NODE_DESC_MAX); spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); } } static void forward_trap(struct mlx4_ib_dev *dev, u32 port_num, const struct ib_mad *mad) { int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; int ret; unsigned long flags; if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC, IB_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) return; /* * We rely here on the fact that MLX QPs don't use the * address handle after the send is posted (this is * wrong following the IB spec strictly, but we know * it's OK for our devices). */ spin_lock_irqsave(&dev->sm_lock, flags); memcpy(send_buf->mad, mad, sizeof *mad); if ((send_buf->ah = dev->sm_ah[port_num - 1])) ret = ib_post_send_mad(send_buf, NULL); else ret = -EINVAL; spin_unlock_irqrestore(&dev->sm_lock, flags); if (ret) ib_free_send_mad(send_buf); } } static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave, struct ib_sa_mad *sa_mad) { int ret = 0; /* dispatch to different sa handlers */ switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) { case IB_SA_ATTR_MC_MEMBER_REC: ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad); break; default: break; } return ret; } int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid) { struct mlx4_ib_dev *dev = to_mdev(ibdev); int i; for (i = 0; i < dev->dev->caps.sqp_demux; i++) { if (dev->sriov.demux[port - 1].guid_cache[i] == guid) return i; } return -1; } static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave, u32 port, u16 pkey, u16 *ix) { int i, ret; u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF; u16 slot_pkey; if (slave == mlx4_master_func_num(dev->dev)) return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix); unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1; for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) { if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix) continue; pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i]; ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey); if (ret) continue; if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) { if (slot_pkey & 0x8000) { *ix = (u16) pkey_ix; return 0; } else { /* take first partial pkey index found */ if (partial_ix == 0xFF) partial_ix = pkey_ix; } } } if (partial_ix < 0xFF) { *ix = (u16) partial_ix; return 0; } return -EINVAL; } static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid, union ib_gid *dgid) { int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh); enum rdma_network_type net_type; if (version == 4) net_type = RDMA_NETWORK_IPV4; else if (version == 6) net_type = RDMA_NETWORK_IPV6; else return -EINVAL; return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, sgid, dgid); } static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave) { int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave; return (qpn >= proxy_start && qpn <= proxy_start + 1); } int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port, enum ib_qp_type dest_qpt, struct ib_wc *wc, struct ib_grh *grh, struct ib_mad *mad) { struct ib_sge list; struct ib_ud_wr wr; const struct ib_send_wr *bad_wr; struct mlx4_ib_demux_pv_ctx *tun_ctx; struct mlx4_ib_demux_pv_qp *tun_qp; struct mlx4_rcv_tunnel_mad *tun_mad; struct rdma_ah_attr attr; struct ib_ah *ah; struct ib_qp *src_qp = NULL; unsigned tun_tx_ix = 0; int dqpn; int ret = 0; u16 tun_pkey_ix; u16 cached_pkey; u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; if (dest_qpt > IB_QPT_GSI) { pr_debug("dest_qpt (%d) > IB_QPT_GSI\n", dest_qpt); return -EINVAL; } tun_ctx = dev->sriov.demux[port-1].tun[slave]; /* check if proxy qp created */ if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE) return -EAGAIN; if (!dest_qpt) tun_qp = &tun_ctx->qp[0]; else tun_qp = &tun_ctx->qp[1]; /* compute P_Key index to put in tunnel header for slave */ if (dest_qpt) { u16 pkey_ix; ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey); if (ret) { pr_debug("unable to get %s cached pkey for index %d, ret %d\n", is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", wc->pkey_index, ret); return -EINVAL; } ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix); if (ret) { pr_debug("unable to get %s pkey ix for pkey 0x%x, ret %d\n", is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", cached_pkey, ret); return -EINVAL; } tun_pkey_ix = pkey_ix; } else tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1; /* get tunnel tx data buf for slave */ src_qp = tun_qp->qp; /* create ah. Just need an empty one with the port num for the post send. * The driver will set the force loopback bit in post_send */ memset(&attr, 0, sizeof attr); attr.type = rdma_ah_find_type(&dev->ib_dev, port); rdma_ah_set_port_num(&attr, port); if (is_eth) { union ib_gid sgid; union ib_gid dgid; if (get_gids_from_l3_hdr(grh, &sgid, &dgid)) return -EINVAL; rdma_ah_set_grh(&attr, &dgid, 0, 0, 0, 0); } ah = rdma_create_ah(tun_ctx->pd, &attr, 0); if (IS_ERR(ah)) return -ENOMEM; /* allocate tunnel tx buf after pass failure returns */ spin_lock(&tun_qp->tx_lock); if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >= (MLX4_NUM_TUNNEL_BUFS - 1)) ret = -EAGAIN; else tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); spin_unlock(&tun_qp->tx_lock); if (ret) goto end; tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); if (tun_qp->tx_ring[tun_tx_ix].ah) rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0); tun_qp->tx_ring[tun_tx_ix].ah = ah; ib_dma_sync_single_for_cpu(&dev->ib_dev, tun_qp->tx_ring[tun_tx_ix].buf.map, sizeof (struct mlx4_rcv_tunnel_mad), DMA_TO_DEVICE); /* copy over to tunnel buffer */ if (grh) memcpy(&tun_mad->grh, grh, sizeof *grh); memcpy(&tun_mad->mad, mad, sizeof *mad); /* adjust tunnel data */ tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix); tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF); tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0; if (is_eth) { u16 vlan = 0; if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan, NULL)) { /* VST mode */ if (vlan != wc->vlan_id) /* Packet vlan is not the VST-assigned vlan. * Drop the packet. */ goto out; else /* Remove the vlan tag before forwarding * the packet to the VF. */ vlan = 0xffff; } else { vlan = wc->vlan_id; } tun_mad->hdr.sl_vid = cpu_to_be16(vlan); memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4); memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2); } else { tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12); tun_mad->hdr.slid_mac_47_32 = ib_lid_be16(wc->slid); } ib_dma_sync_single_for_device(&dev->ib_dev, tun_qp->tx_ring[tun_tx_ix].buf.map, sizeof (struct mlx4_rcv_tunnel_mad), DMA_TO_DEVICE); list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map; list.length = sizeof (struct mlx4_rcv_tunnel_mad); list.lkey = tun_ctx->pd->local_dma_lkey; wr.ah = ah; wr.port_num = port; wr.remote_qkey = IB_QP_SET_QKEY; wr.remote_qpn = dqpn; wr.wr.next = NULL; wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); wr.wr.sg_list = &list; wr.wr.num_sge = 1; wr.wr.opcode = IB_WR_SEND; wr.wr.send_flags = IB_SEND_SIGNALED; ret = ib_post_send(src_qp, &wr.wr, &bad_wr); if (!ret) return 0; out: spin_lock(&tun_qp->tx_lock); tun_qp->tx_ix_tail++; spin_unlock(&tun_qp->tx_lock); tun_qp->tx_ring[tun_tx_ix].ah = NULL; end: rdma_destroy_ah(ah, 0); return ret; } static int mlx4_ib_demux_mad(struct ib_device *ibdev, u32 port, struct ib_wc *wc, struct ib_grh *grh, struct ib_mad *mad) { struct mlx4_ib_dev *dev = to_mdev(ibdev); int err, other_port; int slave = -1; u8 *slave_id; int is_eth = 0; if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND) is_eth = 0; else is_eth = 1; if (is_eth) { union ib_gid dgid; union ib_gid sgid; if (get_gids_from_l3_hdr(grh, &sgid, &dgid)) return -EINVAL; if (!(wc->wc_flags & IB_WC_GRH)) { mlx4_ib_warn(ibdev, "RoCE grh not present.\n"); return -EINVAL; } if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) { mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n"); return -EINVAL; } err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave); if (err && mlx4_is_mf_bonded(dev->dev)) { other_port = (port == 1) ? 2 : 1; err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave); if (!err) { port = other_port; pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n", slave, grh->dgid.raw, port, other_port); } } if (err) { mlx4_ib_warn(ibdev, "failed matching grh\n"); return -ENOENT; } if (slave >= dev->dev->caps.sqp_demux) { mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n", slave, dev->dev->caps.sqp_demux); return -ENOENT; } if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad)) return 0; err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); if (err) pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n", is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", slave, err); return 0; } /* Initially assume that this mad is for us */ slave = mlx4_master_func_num(dev->dev); /* See if the slave id is encoded in a response mad */ if (mad->mad_hdr.method & 0x80) { slave_id = (u8 *) &mad->mad_hdr.tid; slave = *slave_id; if (slave != 255) /*255 indicates the dom0*/ *slave_id = 0; /* remap tid */ } /* If a grh is present, we demux according to it */ if (wc->wc_flags & IB_WC_GRH) { if (grh->dgid.global.interface_id == cpu_to_be64(IB_SA_WELL_KNOWN_GUID) && grh->dgid.global.subnet_prefix == cpu_to_be64( atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) { slave = 0; } else { slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id); if (slave < 0) { mlx4_ib_warn(ibdev, "failed matching grh\n"); return -ENOENT; } } } /* Class-specific handling */ switch (mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_LID_ROUTED: case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: /* 255 indicates the dom0 */ if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) { if (!mlx4_vf_smi_enabled(dev->dev, slave, port)) return -EPERM; /* for a VF. drop unsolicited MADs */ if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) { mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n", slave, mad->mad_hdr.mgmt_class, mad->mad_hdr.method); return -EINVAL; } } break; case IB_MGMT_CLASS_SUBN_ADM: if (mlx4_ib_demux_sa_handler(ibdev, port, slave, (struct ib_sa_mad *) mad)) return 0; break; case IB_MGMT_CLASS_CM: if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad)) return 0; break; case IB_MGMT_CLASS_DEVICE_MGMT: if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP) return 0; break; default: /* Drop unsupported classes for slaves in tunnel mode */ if (slave != mlx4_master_func_num(dev->dev)) { pr_debug("dropping unsupported ingress mad from class:%d " "for slave:%d\n", mad->mad_hdr.mgmt_class, slave); return 0; } } /*make sure that no slave==255 was not handled yet.*/ if (slave >= dev->dev->caps.sqp_demux) { mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n", slave, dev->dev->caps.sqp_demux); return -ENOENT; } err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); if (err) pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n", is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", slave, err); return 0; } static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in_mad, struct ib_mad *out_mad) { u16 slid, prev_lid = 0; int err; struct ib_port_attr pattr; slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE); if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { forward_trap(to_mdev(ibdev), port_num, in_mad); return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; } if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) return IB_MAD_RESULT_SUCCESS; /* * Don't process SMInfo queries -- the SMA can't handle them. */ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) return IB_MAD_RESULT_SUCCESS; } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 || in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 || in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) return IB_MAD_RESULT_SUCCESS; } else return IB_MAD_RESULT_SUCCESS; if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && !ib_query_port(ibdev, port_num, &pattr)) prev_lid = ib_lid_cpu16(pattr.lid); err = mlx4_MAD_IFC(to_mdev(ibdev), (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) | (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) | MLX4_MAD_IFC_NET_VIEW, port_num, in_wc, in_grh, in_mad, out_mad); if (err) return IB_MAD_RESULT_FAILURE; if (!out_mad->mad_hdr.status) { smp_snoop(ibdev, port_num, in_mad, prev_lid); /* slaves get node desc from FW */ if (!mlx4_is_slave(to_mdev(ibdev)->dev)) node_desc_override(ibdev, out_mad); } /* set return bit in status of directed route responses */ if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) /* no response for trap repress */ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } static void edit_counter(struct mlx4_counter *cnt, void *counters, __be16 attr_id) { switch (attr_id) { case IB_PMA_PORT_COUNTERS: { struct ib_pma_portcounters *pma_cnt = (struct ib_pma_portcounters *)counters; ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, (be64_to_cpu(cnt->tx_bytes) >> 2)); ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, (be64_to_cpu(cnt->rx_bytes) >> 2)); ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, be64_to_cpu(cnt->tx_frames)); ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, be64_to_cpu(cnt->rx_frames)); break; } case IB_PMA_PORT_COUNTERS_EXT: { struct ib_pma_portcounters_ext *pma_cnt_ext = (struct ib_pma_portcounters_ext *)counters; pma_cnt_ext->port_xmit_data = cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2); pma_cnt_ext->port_rcv_data = cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2); pma_cnt_ext->port_xmit_packets = cnt->tx_frames; pma_cnt_ext->port_rcv_packets = cnt->rx_frames; break; } } } static int iboe_process_mad_port_info(void *out_mad) { struct ib_class_port_info cpi = {}; cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; memcpy(out_mad, &cpi, sizeof(cpi)); return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct mlx4_counter counter_stats; struct mlx4_ib_dev *dev = to_mdev(ibdev); struct counter_index *tmp_counter; int err = IB_MAD_RESULT_FAILURE, stats_avail = 0; if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) return -EINVAL; if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) return iboe_process_mad_port_info((void *)(out_mad->data + 40)); memset(&counter_stats, 0, sizeof(counter_stats)); mutex_lock(&dev->counters_table[port_num - 1].mutex); list_for_each_entry(tmp_counter, &dev->counters_table[port_num - 1].counters_list, list) { err = mlx4_get_counter_stats(dev->dev, tmp_counter->index, &counter_stats, 0); if (err) { err = IB_MAD_RESULT_FAILURE; stats_avail = 0; break; } stats_avail = 1; } mutex_unlock(&dev->counters_table[port_num - 1].mutex); if (stats_avail) { switch (counter_stats.counter_mode & 0xf) { case 0: edit_counter(&counter_stats, (void *)(out_mad->data + 40), in_mad->mad_hdr.attr_id); err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; break; default: err = IB_MAD_RESULT_FAILURE; } } return err; } int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in, struct ib_mad *out, size_t *out_mad_size, u16 *out_mad_pkey_index) { struct mlx4_ib_dev *dev = to_mdev(ibdev); enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num); /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA * queries, should be called only by VFs and for that specific purpose */ if (link == IB_LINK_LAYER_INFINIBAND) { if (mlx4_is_slave(dev->dev) && (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS || in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT || in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO))) return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, in, out); return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, in, out); } if (link == IB_LINK_LAYER_ETHERNET) return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, in, out); return -EINVAL; } static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { if (mad_send_wc->send_buf->context[0]) rdma_destroy_ah(mad_send_wc->send_buf->context[0], 0); ib_free_send_mad(mad_send_wc->send_buf); } int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) { struct ib_mad_agent *agent; int p, q; int ret; enum rdma_link_layer ll; for (p = 0; p < dev->num_ports; ++p) { ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1); for (q = 0; q <= 1; ++q) { if (ll == IB_LINK_LAYER_INFINIBAND) { agent = ib_register_mad_agent(&dev->ib_dev, p + 1, q ? IB_QPT_GSI : IB_QPT_SMI, NULL, 0, send_handler, NULL, NULL, 0); if (IS_ERR(agent)) { ret = PTR_ERR(agent); goto err; } dev->send_agent[p][q] = agent; } else dev->send_agent[p][q] = NULL; } } return 0; err: for (p = 0; p < dev->num_ports; ++p) for (q = 0; q <= 1; ++q) if (dev->send_agent[p][q]) ib_unregister_mad_agent(dev->send_agent[p][q]); return ret; } void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) { struct ib_mad_agent *agent; int p, q; for (p = 0; p < dev->num_ports; ++p) { for (q = 0; q <= 1; ++q) { agent = dev->send_agent[p][q]; if (agent) { dev->send_agent[p][q] = NULL; ib_unregister_mad_agent(agent); } } if (dev->sm_ah[p]) rdma_destroy_ah(dev->sm_ah[p], 0); } } static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num) { mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE); if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num, MLX4_EQ_PORT_INFO_LID_CHANGE_MASK); } static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num) { /* re-configure the alias-guid and mcg's */ if (mlx4_is_master(dev->dev)) { mlx4_ib_invalidate_all_guid_record(dev, port_num); if (!dev->sriov.is_going_down) { mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num, MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK); } } /* Update the sl to vl table from inside client rereg * only if in secure-host mode (snooping is not possible) * and the sl-to-vl change event is not generated by FW. */ if (!mlx4_is_slave(dev->dev) && dev->dev->flags & MLX4_FLAG_SECURE_HOST && !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) { if (mlx4_is_master(dev->dev)) /* already in work queue from mlx4_ib_event queueing * mlx4_handle_port_mgmt_change_event, which calls * this procedure. Therefore, call sl2vl_update directly. */ mlx4_ib_sl2vl_update(dev, port_num); else mlx4_sched_ib_sl2vl_update_work(dev, port_num); } mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER); } static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, struct mlx4_eqe *eqe) { __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe), GET_MASK_FROM_EQE(eqe)); } static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u32 port_num, u32 guid_tbl_blk_num, u32 change_bitmap) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; u16 i; if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev)) return; in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; guid_tbl_blk_num *= 4; for (i = 0; i < 4; i++) { if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff))) continue; memset(in_mad, 0, sizeof *in_mad); memset(out_mad, 0, sizeof *out_mad); in_mad->base_version = 1; in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; in_mad->class_version = 1; in_mad->method = IB_MGMT_METHOD_GET; in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i); if (mlx4_MAD_IFC(dev, MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW, port_num, NULL, NULL, in_mad, out_mad)) { mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n"); goto out; } mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i, port_num, (u8 *)(&((struct ib_smp *)out_mad)->data)); mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i, port_num, (u8 *)(&((struct ib_smp *)out_mad)->data)); } out: kfree(in_mad); kfree(out_mad); return; } void handle_port_mgmt_change_event(struct work_struct *work) { struct ib_event_work *ew = container_of(work, struct ib_event_work, work); struct mlx4_ib_dev *dev = ew->ib_dev; struct mlx4_eqe *eqe = &(ew->ib_eqe); u32 port = eqe->event.port_mgmt_change.port; u32 changed_attr; u32 tbl_block; u32 change_bitmap; switch (eqe->subtype) { case MLX4_DEV_PMC_SUBTYPE_PORT_INFO: changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); /* Update the SM ah - This should be done before handling the other changed attributes so that MADs can be sent to the SM */ if (changed_attr & MSTR_SM_CHANGE_MASK) { u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf; update_sm_ah(dev, port, lid, sl); } /* Check if it is a lid change event */ if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK) handle_lid_change_event(dev, port); /* Generate GUID changed event */ if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) { if (mlx4_is_master(dev->dev)) { union ib_gid gid; int err = 0; if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix) err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1); else gid.global.subnet_prefix = eqe->event.port_mgmt_change.params.port_info.gid_prefix; if (err) { pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n", port, err); } else { pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n", port, (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix), be64_to_cpu(gid.global.subnet_prefix)); atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix, be64_to_cpu(gid.global.subnet_prefix)); } } mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); /*if master, notify all slaves*/ if (mlx4_is_master(dev->dev)) mlx4_gen_slaves_port_mgt_ev(dev->dev, port, MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK); } if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK) handle_client_rereg_event(dev, port); break; case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE: mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) propagate_pkey_ev(dev, port, eqe); break; case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: /* paravirtualized master's guid is guid 0 -- does not change */ if (!mlx4_is_master(dev->dev)) mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); /*if master, notify relevant slaves*/ else if (!dev->sriov.is_going_down) { tbl_block = GET_BLK_PTR_FROM_EQE(eqe); change_bitmap = GET_MASK_FROM_EQE(eqe); handle_slaves_guid_change(dev, port, tbl_block, change_bitmap); } break; case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP: /* cache sl to vl mapping changes for use in * filling QP1 LRH VL field when sending packets */ if (!mlx4_is_slave(dev->dev)) { union sl2vl_tbl_to_u64 sl2vl64; int jj; for (jj = 0; jj < 8; jj++) { sl2vl64.sl8[jj] = eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj]; pr_debug("port %u, sl2vl[%d] = %02x\n", port, jj, sl2vl64.sl8[jj]); } atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64); } break; default: pr_warn("Unsupported subtype 0x%x for " "Port Management Change event\n", eqe->subtype); } kfree(ew); } void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num, enum ib_event_type type) { struct ib_event event; event.device = &dev->ib_dev; event.element.port_num = port_num; event.event = type; ib_dispatch_event(&event); } static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) { unsigned long flags; struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); spin_lock_irqsave(&dev->sriov.going_down_lock, flags); if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) queue_work(ctx->wq, &ctx->work); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg) { unsigned long flags; struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); spin_lock_irqsave(&dev->sriov.going_down_lock, flags); if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) queue_work(ctx->wi_wq, &ctx->work); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, struct mlx4_ib_demux_pv_qp *tun_qp, int index) { struct ib_sge sg_list; struct ib_recv_wr recv_wr; const struct ib_recv_wr *bad_recv_wr; int size; size = (tun_qp->qp->qp_type == IB_QPT_UD) ? sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf); sg_list.addr = tun_qp->ring[index].map; sg_list.length = size; sg_list.lkey = ctx->pd->local_dma_lkey; recv_wr.next = NULL; recv_wr.sg_list = &sg_list; recv_wr.num_sge = 1; recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV | MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt); ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map, size, DMA_FROM_DEVICE); return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr); } static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port, int slave, struct ib_sa_mad *sa_mad) { int ret = 0; /* dispatch to different sa handlers */ switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) { case IB_SA_ATTR_MC_MEMBER_REC: ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad); break; default: break; } return ret; } int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port, enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr, u8 *s_mac, u16 vlan_id, struct ib_mad *mad) { struct ib_sge list; struct ib_ud_wr wr; const struct ib_send_wr *bad_wr; struct mlx4_ib_demux_pv_ctx *sqp_ctx; struct mlx4_ib_demux_pv_qp *sqp; struct mlx4_mad_snd_buf *sqp_mad; struct ib_ah *ah; struct ib_qp *send_qp = NULL; unsigned wire_tx_ix = 0; u16 wire_pkey_ix; int src_qpnum; int ret; sqp_ctx = dev->sriov.sqps[port-1]; /* check if proxy qp created */ if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE) return -EAGAIN; if (dest_qpt == IB_QPT_SMI) { src_qpnum = 0; sqp = &sqp_ctx->qp[0]; wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; } else { src_qpnum = 1; sqp = &sqp_ctx->qp[1]; wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index]; } send_qp = sqp->qp; ah = rdma_zalloc_drv_obj(sqp_ctx->pd->device, ib_ah); if (!ah) return -ENOMEM; ah->device = sqp_ctx->pd->device; ah->pd = sqp_ctx->pd; /* create ah */ ret = mlx4_ib_create_ah_slave(ah, attr, rdma_ah_retrieve_grh(attr)->sgid_index, s_mac, vlan_id); if (ret) goto out; spin_lock(&sqp->tx_lock); if (sqp->tx_ix_head - sqp->tx_ix_tail >= (MLX4_NUM_WIRE_BUFS - 1)) ret = -EAGAIN; else wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_WIRE_BUFS - 1); spin_unlock(&sqp->tx_lock); if (ret) goto out; sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); kfree(sqp->tx_ring[wire_tx_ix].ah); sqp->tx_ring[wire_tx_ix].ah = ah; ib_dma_sync_single_for_cpu(&dev->ib_dev, sqp->tx_ring[wire_tx_ix].buf.map, sizeof (struct mlx4_mad_snd_buf), DMA_TO_DEVICE); memcpy(&sqp_mad->payload, mad, sizeof *mad); ib_dma_sync_single_for_device(&dev->ib_dev, sqp->tx_ring[wire_tx_ix].buf.map, sizeof (struct mlx4_mad_snd_buf), DMA_TO_DEVICE); list.addr = sqp->tx_ring[wire_tx_ix].buf.map; list.length = sizeof (struct mlx4_mad_snd_buf); list.lkey = sqp_ctx->pd->local_dma_lkey; wr.ah = ah; wr.port_num = port; wr.pkey_index = wire_pkey_ix; wr.remote_qkey = qkey; wr.remote_qpn = remote_qpn; wr.wr.next = NULL; wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); wr.wr.sg_list = &list; wr.wr.num_sge = 1; wr.wr.opcode = IB_WR_SEND; wr.wr.send_flags = IB_SEND_SIGNALED; ret = ib_post_send(send_qp, &wr.wr, &bad_wr); if (!ret) return 0; spin_lock(&sqp->tx_lock); sqp->tx_ix_tail++; spin_unlock(&sqp->tx_lock); sqp->tx_ring[wire_tx_ix].ah = NULL; out: kfree(ah); return ret; } static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port) { if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) return slave; return mlx4_get_base_gid_ix(dev->dev, slave, port); } static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port, struct rdma_ah_attr *ah_attr) { struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr); if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) grh->sgid_index = slave; else grh->sgid_index += get_slave_base_gid_ix(dev, slave, port); } static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc) { struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1); struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr; struct mlx4_ib_ah ah; struct rdma_ah_attr ah_attr; u8 *slave_id; int slave; int port; u16 vlan_id; u8 qos; u8 *dmac; int sts; /* Get slave that sent this packet */ if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn || wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX || (wc->src_qp & 0x1) != ctx->port - 1 || wc->src_qp & 0x4) { mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp); return; } slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8; if (slave != ctx->slave) { mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " "belongs to another slave\n", wc->src_qp); return; } /* Map transaction ID */ ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, sizeof (struct mlx4_tunnel_mad), DMA_FROM_DEVICE); switch (tunnel->mad.mad_hdr.method) { case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_GET: case IB_MGMT_METHOD_REPORT: case IB_SA_METHOD_GET_TABLE: case IB_SA_METHOD_DELETE: case IB_SA_METHOD_GET_MULTI: case IB_SA_METHOD_GET_TRACE_TBL: slave_id = (u8 *) &tunnel->mad.mad_hdr.tid; if (*slave_id) { mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d " "class:%d slave:%d\n", *slave_id, tunnel->mad.mad_hdr.mgmt_class, slave); return; } else *slave_id = slave; break; default: /* nothing */; } /* Class-specific handling */ switch (tunnel->mad.mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_LID_ROUTED: case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: if (slave != mlx4_master_func_num(dev->dev) && !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port)) return; break; case IB_MGMT_CLASS_SUBN_ADM: if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, (struct ib_sa_mad *) &tunnel->mad)) return; break; case IB_MGMT_CLASS_CM: if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave, (struct ib_mad *) &tunnel->mad)) return; break; case IB_MGMT_CLASS_DEVICE_MGMT: if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET && tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET) return; break; default: /* Drop unsupported classes for slaves in tunnel mode */ if (slave != mlx4_master_func_num(dev->dev)) { mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d " "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave); return; } } /* We are using standard ib_core services to send the mad, so generate a * stadard address handle by decoding the tunnelled mlx4_ah fields */ memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av)); ah.ibah.device = ctx->ib_dev; port = be32_to_cpu(ah.av.ib.port_pd) >> 24; port = mlx4_slave_convert_port(dev->dev, slave, port); if (port < 0) return; ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port); mlx4_ib_query_ah(&ah.ibah, &ah_attr); if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH) fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); dmac = rdma_ah_retrieve_dmac(&ah_attr); if (dmac) memcpy(dmac, tunnel->hdr.mac, ETH_ALEN); vlan_id = be16_to_cpu(tunnel->hdr.vlan); /* if slave have default vlan use it */ if (mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, &vlan_id, &qos)) rdma_ah_set_sl(&ah_attr, qos); sts = mlx4_ib_send_to_wire(dev, slave, ctx->port, is_proxy_qp0(dev, wc->src_qp, slave) ? IB_QPT_SMI : IB_QPT_GSI, be16_to_cpu(tunnel->hdr.pkey_index), be32_to_cpu(tunnel->hdr.remote_qpn), be32_to_cpu(tunnel->hdr.qkey), &ah_attr, wc->smac, vlan_id, &tunnel->mad); if (sts) pr_debug("failed sending %s to wire on behalf of slave %d (%d)\n", is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", slave, sts); } static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, enum ib_qp_type qp_type, int is_tun) { int i; struct mlx4_ib_demux_pv_qp *tun_qp; int rx_buf_size, tx_buf_size; const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS; if (qp_type > IB_QPT_GSI) return -EINVAL; tun_qp = &ctx->qp[qp_type]; tun_qp->ring = kcalloc(nmbr_bufs, sizeof(struct mlx4_ib_buf), GFP_KERNEL); if (!tun_qp->ring) return -ENOMEM; tun_qp->tx_ring = kcalloc(nmbr_bufs, sizeof (struct mlx4_ib_tun_tx_buf), GFP_KERNEL); if (!tun_qp->tx_ring) { kfree(tun_qp->ring); tun_qp->ring = NULL; return -ENOMEM; } if (is_tun) { rx_buf_size = sizeof (struct mlx4_tunnel_mad); tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad); } else { rx_buf_size = sizeof (struct mlx4_mad_rcv_buf); tx_buf_size = sizeof (struct mlx4_mad_snd_buf); } for (i = 0; i < nmbr_bufs; i++) { tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL); if (!tun_qp->ring[i].addr) goto err; tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev, tun_qp->ring[i].addr, rx_buf_size, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { kfree(tun_qp->ring[i].addr); goto err; } } for (i = 0; i < nmbr_bufs; i++) { tun_qp->tx_ring[i].buf.addr = kmalloc(tx_buf_size, GFP_KERNEL); if (!tun_qp->tx_ring[i].buf.addr) goto tx_err; tun_qp->tx_ring[i].buf.map = ib_dma_map_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.addr, tx_buf_size, DMA_TO_DEVICE); if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->tx_ring[i].buf.map)) { kfree(tun_qp->tx_ring[i].buf.addr); goto tx_err; } tun_qp->tx_ring[i].ah = NULL; } spin_lock_init(&tun_qp->tx_lock); tun_qp->tx_ix_head = 0; tun_qp->tx_ix_tail = 0; tun_qp->proxy_qpt = qp_type; return 0; tx_err: while (i > 0) { --i; ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, tx_buf_size, DMA_TO_DEVICE); kfree(tun_qp->tx_ring[i].buf.addr); } i = nmbr_bufs; err: while (i > 0) { --i; ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, rx_buf_size, DMA_FROM_DEVICE); kfree(tun_qp->ring[i].addr); } kfree(tun_qp->tx_ring); tun_qp->tx_ring = NULL; kfree(tun_qp->ring); tun_qp->ring = NULL; return -ENOMEM; } static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx, enum ib_qp_type qp_type, int is_tun) { int i; struct mlx4_ib_demux_pv_qp *tun_qp; int rx_buf_size, tx_buf_size; const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS; if (qp_type > IB_QPT_GSI) return; tun_qp = &ctx->qp[qp_type]; if (is_tun) { rx_buf_size = sizeof (struct mlx4_tunnel_mad); tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad); } else { rx_buf_size = sizeof (struct mlx4_mad_rcv_buf); tx_buf_size = sizeof (struct mlx4_mad_snd_buf); } for (i = 0; i < nmbr_bufs; i++) { ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, rx_buf_size, DMA_FROM_DEVICE); kfree(tun_qp->ring[i].addr); } for (i = 0; i < nmbr_bufs; i++) { ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, tx_buf_size, DMA_TO_DEVICE); kfree(tun_qp->tx_ring[i].buf.addr); if (tun_qp->tx_ring[i].ah) rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0); } kfree(tun_qp->tx_ring); kfree(tun_qp->ring); } static void mlx4_ib_tunnel_comp_worker(struct work_struct *work) { struct mlx4_ib_demux_pv_ctx *ctx; struct mlx4_ib_demux_pv_qp *tun_qp; struct ib_wc wc; int ret; ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); while (ib_poll_cq(ctx->cq, 1, &wc) == 1) { tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; if (wc.status == IB_WC_SUCCESS) { switch (wc.opcode) { case IB_WC_RECV: mlx4_ib_multiplex_mad(ctx, &wc); ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)); if (ret) pr_err("Failed reposting tunnel " "buf:%lld\n", wc.wr_id); break; case IB_WC_SEND: rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; spin_lock(&tun_qp->tx_lock); tun_qp->tx_ix_tail++; spin_unlock(&tun_qp->tx_lock); break; default: break; } } else { pr_debug("mlx4_ib: completion error in tunnel: %d." " status = %d, wrid = 0x%llx\n", ctx->slave, wc.status, wc.wr_id); if (!MLX4_TUN_IS_RECV(wc.wr_id)) { rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; spin_lock(&tun_qp->tx_lock); tun_qp->tx_ix_tail++; spin_unlock(&tun_qp->tx_lock); } } } } static void pv_qp_event_handler(struct ib_event *event, void *qp_context) { struct mlx4_ib_demux_pv_ctx *sqp = qp_context; /* It's worse than that! He's dead, Jim! */ pr_err("Fatal error (%d) on a MAD QP on port %d\n", event->event, sqp->port); } static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx, enum ib_qp_type qp_type, int create_tun) { int i, ret; struct mlx4_ib_demux_pv_qp *tun_qp; struct mlx4_ib_qp_tunnel_init_attr qp_init_attr; struct ib_qp_attr attr; int qp_attr_mask_INIT; const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS; if (qp_type > IB_QPT_GSI) return -EINVAL; tun_qp = &ctx->qp[qp_type]; memset(&qp_init_attr, 0, sizeof qp_init_attr); qp_init_attr.init_attr.send_cq = ctx->cq; qp_init_attr.init_attr.recv_cq = ctx->cq; qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; qp_init_attr.init_attr.cap.max_send_wr = nmbr_bufs; qp_init_attr.init_attr.cap.max_recv_wr = nmbr_bufs; qp_init_attr.init_attr.cap.max_send_sge = 1; qp_init_attr.init_attr.cap.max_recv_sge = 1; if (create_tun) { qp_init_attr.init_attr.qp_type = IB_QPT_UD; qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP; qp_init_attr.port = ctx->port; qp_init_attr.slave = ctx->slave; qp_init_attr.proxy_qp_type = qp_type; qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT; } else { qp_init_attr.init_attr.qp_type = qp_type; qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP; qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY; } qp_init_attr.init_attr.port_num = ctx->port; qp_init_attr.init_attr.qp_context = ctx; qp_init_attr.init_attr.event_handler = pv_qp_event_handler; tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); if (IS_ERR(tun_qp->qp)) { ret = PTR_ERR(tun_qp->qp); tun_qp->qp = NULL; pr_err("Couldn't create %s QP (%d)\n", create_tun ? "tunnel" : "special", ret); return ret; } memset(&attr, 0, sizeof attr); attr.qp_state = IB_QPS_INIT; ret = 0; if (create_tun) ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, ctx->port, IB_DEFAULT_PKEY_FULL, &attr.pkey_index); if (ret || !create_tun) attr.pkey_index = to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; attr.qkey = IB_QP1_QKEY; attr.port_num = ctx->port; ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); if (ret) { pr_err("Couldn't change %s qp state to INIT (%d)\n", create_tun ? "tunnel" : "special", ret); goto err_qp; } attr.qp_state = IB_QPS_RTR; ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE); if (ret) { pr_err("Couldn't change %s qp state to RTR (%d)\n", create_tun ? "tunnel" : "special", ret); goto err_qp; } attr.qp_state = IB_QPS_RTS; attr.sq_psn = 0; ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN); if (ret) { pr_err("Couldn't change %s qp state to RTS (%d)\n", create_tun ? "tunnel" : "special", ret); goto err_qp; } for (i = 0; i < nmbr_bufs; i++) { ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i); if (ret) { pr_err(" mlx4_ib_post_pv_buf error" " (err = %d, i = %d)\n", ret, i); goto err_qp; } } return 0; err_qp: ib_destroy_qp(tun_qp->qp); tun_qp->qp = NULL; return ret; } /* * IB MAD completion callback for real SQPs */ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) { struct mlx4_ib_demux_pv_ctx *ctx; struct mlx4_ib_demux_pv_qp *sqp; struct ib_wc wc; struct ib_grh *grh; struct ib_mad *mad; ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) { sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; if (wc.status == IB_WC_SUCCESS) { switch (wc.opcode) { case IB_WC_SEND: kfree(sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah); sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah = NULL; spin_lock(&sqp->tx_lock); sqp->tx_ix_tail++; spin_unlock(&sqp->tx_lock); break; case IB_WC_RECV: mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *) (sqp->ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].addr))->payload); grh = &(((struct mlx4_mad_rcv_buf *) (sqp->ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].addr))->grh); mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad); if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1))) pr_err("Failed reposting SQP " "buf:%lld\n", wc.wr_id); break; default: break; } } else { pr_debug("mlx4_ib: completion error in tunnel: %d." " status = %d, wrid = 0x%llx\n", ctx->slave, wc.status, wc.wr_id); if (!MLX4_TUN_IS_RECV(wc.wr_id)) { kfree(sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah); sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah = NULL; spin_lock(&sqp->tx_lock); sqp->tx_ix_tail++; spin_unlock(&sqp->tx_lock); } } } } static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port, struct mlx4_ib_demux_pv_ctx **ret_ctx) { struct mlx4_ib_demux_pv_ctx *ctx; *ret_ctx = NULL; ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->ib_dev = &dev->ib_dev; ctx->port = port; ctx->slave = slave; *ret_ctx = ctx; return 0; } static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port) { if (dev->sriov.demux[port - 1].tun[slave]) { kfree(dev->sriov.demux[port - 1].tun[slave]); dev->sriov.demux[port - 1].tun[slave] = NULL; } } static int create_pv_resources(struct ib_device *ibdev, int slave, int port, int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) { int ret, cq_size; struct ib_cq_init_attr cq_attr = {}; const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS; if (ctx->state != DEMUX_PV_STATE_DOWN) return -EEXIST; ctx->state = DEMUX_PV_STATE_STARTING; /* have QP0 only if link layer is IB */ if (rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND) ctx->has_smi = 1; if (ctx->has_smi) { ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun); if (ret) { pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret); goto err_out; } } ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun); if (ret) { pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret); goto err_out_qp0; } cq_size = 2 * nmbr_bufs; if (ctx->has_smi) cq_size *= 2; cq_attr.cqe = cq_size; ctx->cq = ib_create_cq(ctx->ib_dev, create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler, NULL, ctx, &cq_attr); if (IS_ERR(ctx->cq)) { ret = PTR_ERR(ctx->cq); pr_err("Couldn't create tunnel CQ (%d)\n", ret); goto err_buf; } ctx->pd = ib_alloc_pd(ctx->ib_dev, 0); if (IS_ERR(ctx->pd)) { ret = PTR_ERR(ctx->pd); pr_err("Couldn't create tunnel PD (%d)\n", ret); goto err_cq; } if (ctx->has_smi) { ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun); if (ret) { pr_err("Couldn't create %s QP0 (%d)\n", create_tun ? "tunnel for" : "", ret); goto err_pd; } } ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun); if (ret) { pr_err("Couldn't create %s QP1 (%d)\n", create_tun ? "tunnel for" : "", ret); goto err_qp0; } if (create_tun) INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker); else INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq; ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); if (ret) { pr_err("Couldn't arm tunnel cq (%d)\n", ret); goto err_wq; } ctx->state = DEMUX_PV_STATE_ACTIVE; return 0; err_wq: ctx->wq = NULL; ib_destroy_qp(ctx->qp[1].qp); ctx->qp[1].qp = NULL; err_qp0: if (ctx->has_smi) ib_destroy_qp(ctx->qp[0].qp); ctx->qp[0].qp = NULL; err_pd: ib_dealloc_pd(ctx->pd); ctx->pd = NULL; err_cq: ib_destroy_cq(ctx->cq); ctx->cq = NULL; err_buf: mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun); err_out_qp0: if (ctx->has_smi) mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun); err_out: ctx->state = DEMUX_PV_STATE_DOWN; return ret; } static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port, struct mlx4_ib_demux_pv_ctx *ctx, int flush) { if (!ctx) return; if (ctx->state > DEMUX_PV_STATE_DOWN) { ctx->state = DEMUX_PV_STATE_DOWNING; if (flush) flush_workqueue(ctx->wq); if (ctx->has_smi) { ib_destroy_qp(ctx->qp[0].qp); ctx->qp[0].qp = NULL; mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1); } ib_destroy_qp(ctx->qp[1].qp); ctx->qp[1].qp = NULL; mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1); ib_dealloc_pd(ctx->pd); ctx->pd = NULL; ib_destroy_cq(ctx->cq); ctx->cq = NULL; ctx->state = DEMUX_PV_STATE_DOWN; } } static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave, int port, int do_init) { int ret = 0; if (!do_init) { clean_vf_mcast(&dev->sriov.demux[port - 1], slave); /* for master, destroy real sqp resources */ if (slave == mlx4_master_func_num(dev->dev)) destroy_pv_resources(dev, slave, port, dev->sriov.sqps[port - 1], 1); /* destroy the tunnel qp resources */ destroy_pv_resources(dev, slave, port, dev->sriov.demux[port - 1].tun[slave], 1); return 0; } /* create the tunnel qp resources */ ret = create_pv_resources(&dev->ib_dev, slave, port, 1, dev->sriov.demux[port - 1].tun[slave]); /* for master, create the real sqp resources */ if (!ret && slave == mlx4_master_func_num(dev->dev)) ret = create_pv_resources(&dev->ib_dev, slave, port, 0, dev->sriov.sqps[port - 1]); return ret; } void mlx4_ib_tunnels_update_work(struct work_struct *work) { struct mlx4_ib_demux_work *dmxw; dmxw = container_of(work, struct mlx4_ib_demux_work, work); mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port, dmxw->do_init); kfree(dmxw); return; } static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, struct mlx4_ib_demux_ctx *ctx, int port) { char name[12]; int ret = 0; int i; ctx->tun = kcalloc(dev->dev->caps.sqp_demux, sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL); if (!ctx->tun) return -ENOMEM; ctx->dev = dev; ctx->port = port; ctx->ib_dev = &dev->ib_dev; for (i = 0; i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->persist->num_vfs + 1)); i++) { struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev->dev, i); if (!test_bit(port - 1, actv_ports.ports)) continue; ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); if (ret) { ret = -ENOMEM; goto err_mcg; } } ret = mlx4_ib_mcg_port_init(ctx); if (ret) { pr_err("Failed initializing mcg para-virt (%d)\n", ret); goto err_mcg; } snprintf(name, sizeof(name), "mlx4_ibt%d", port); ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); if (!ctx->wq) { pr_err("Failed to create tunnelling WQ for port %d\n", port); ret = -ENOMEM; goto err_wq; } snprintf(name, sizeof(name), "mlx4_ibwi%d", port); ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); if (!ctx->wi_wq) { pr_err("Failed to create wire WQ for port %d\n", port); ret = -ENOMEM; goto err_wiwq; } snprintf(name, sizeof(name), "mlx4_ibud%d", port); ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); if (!ctx->ud_wq) { pr_err("Failed to create up/down WQ for port %d\n", port); ret = -ENOMEM; goto err_udwq; } return 0; err_udwq: destroy_workqueue(ctx->wi_wq); ctx->wi_wq = NULL; err_wiwq: destroy_workqueue(ctx->wq); ctx->wq = NULL; err_wq: mlx4_ib_mcg_port_cleanup(ctx, 1); err_mcg: for (i = 0; i < dev->dev->caps.sqp_demux; i++) free_pv_object(dev, i, port); kfree(ctx->tun); ctx->tun = NULL; return ret; } static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx) { if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) { sqp_ctx->state = DEMUX_PV_STATE_DOWNING; flush_workqueue(sqp_ctx->wq); if (sqp_ctx->has_smi) { ib_destroy_qp(sqp_ctx->qp[0].qp); sqp_ctx->qp[0].qp = NULL; mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0); } ib_destroy_qp(sqp_ctx->qp[1].qp); sqp_ctx->qp[1].qp = NULL; mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0); ib_dealloc_pd(sqp_ctx->pd); sqp_ctx->pd = NULL; ib_destroy_cq(sqp_ctx->cq); sqp_ctx->cq = NULL; sqp_ctx->state = DEMUX_PV_STATE_DOWN; } } static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) { int i; if (ctx) { struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); mlx4_ib_mcg_port_cleanup(ctx, 1); for (i = 0; i < dev->dev->caps.sqp_demux; i++) { if (!ctx->tun[i]) continue; if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN) ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; } flush_workqueue(ctx->wq); flush_workqueue(ctx->wi_wq); for (i = 0; i < dev->dev->caps.sqp_demux; i++) { destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); free_pv_object(dev, i, ctx->port); } kfree(ctx->tun); destroy_workqueue(ctx->ud_wq); destroy_workqueue(ctx->wi_wq); destroy_workqueue(ctx->wq); } } static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init) { int i; if (!mlx4_is_master(dev->dev)) return; /* initialize or tear down tunnel QPs for the master */ for (i = 0; i < dev->dev->caps.num_ports; i++) mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init); return; } int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) { int i = 0; int err; if (!mlx4_is_mfunc(dev->dev)) return 0; dev->sriov.is_going_down = 0; spin_lock_init(&dev->sriov.going_down_lock); mlx4_ib_cm_paravirt_init(dev); mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n"); if (mlx4_is_slave(dev->dev)) { mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n"); return 0; } for (i = 0; i < dev->dev->caps.sqp_demux; i++) { if (i == mlx4_master_func_num(dev->dev)) mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid); else mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid()); } err = mlx4_ib_init_alias_guid_service(dev); if (err) { mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n"); goto paravirt_err; } err = mlx4_ib_device_register_sysfs(dev); if (err) { mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n"); goto sysfs_err; } mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n", dev->dev->caps.sqp_demux); for (i = 0; i < dev->num_ports; i++) { union ib_gid gid; err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1); if (err) goto demux_err; dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; atomic64_set(&dev->sriov.demux[i].subnet_prefix, be64_to_cpu(gid.global.subnet_prefix)); err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, &dev->sriov.sqps[i]); if (err) goto demux_err; err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); if (err) goto free_pv; } mlx4_ib_master_tunnels(dev, 1); return 0; free_pv: free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); demux_err: while (--i >= 0) { free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); } mlx4_ib_device_unregister_sysfs(dev); sysfs_err: mlx4_ib_destroy_alias_guid_service(dev); paravirt_err: mlx4_ib_cm_paravirt_clean(dev, -1); return err; } void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev) { int i; unsigned long flags; if (!mlx4_is_mfunc(dev->dev)) return; spin_lock_irqsave(&dev->sriov.going_down_lock, flags); dev->sriov.is_going_down = 1; spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); if (mlx4_is_master(dev->dev)) { for (i = 0; i < dev->num_ports; i++) { flush_workqueue(dev->sriov.demux[i].ud_wq); mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]); kfree(dev->sriov.sqps[i]); dev->sriov.sqps[i] = NULL; mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); } mlx4_ib_cm_paravirt_clean(dev, -1); mlx4_ib_destroy_alias_guid_service(dev); mlx4_ib_device_unregister_sysfs(dev); } }
linux-master
drivers/infiniband/hw/mlx4/mad.c
/* * Copyright (c) 2012 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /*#include "core_priv.h"*/ #include "mlx4_ib.h" #include <linux/slab.h> #include <linux/string.h> #include <linux/stat.h> #include <rdma/ib_mad.h> /*show_admin_alias_guid returns the administratively assigned value of that GUID. * Values returned in buf parameter string: * 0 - requests opensm to assign a value. * ffffffffffffffff - delete this entry. * other - value assigned by administrator. */ static ssize_t show_admin_alias_guid(struct device *dev, struct device_attribute *attr, char *buf) { struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry = container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry); struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; struct mlx4_ib_dev *mdev = port->dev; __be64 sysadmin_ag_val; sysadmin_ag_val = mlx4_get_admin_guid(mdev->dev, mlx4_ib_iov_dentry->entry_num, port->num); return sysfs_emit(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val)); } /* store_admin_alias_guid stores the (new) administratively assigned value of that GUID. * Values in buf parameter string: * 0 - requests opensm to assign a value. * 0xffffffffffffffff - delete this entry. * other - guid value assigned by the administrator. */ static ssize_t store_admin_alias_guid(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int record_num;/*0-15*/ int guid_index_in_rec; /*0 - 7*/ struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry = container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry); struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; struct mlx4_ib_dev *mdev = port->dev; u64 sysadmin_ag_val; unsigned long flags; record_num = mlx4_ib_iov_dentry->entry_num / 8; guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8; if (0 == record_num && 0 == guid_index_in_rec) { pr_err("GUID 0 block 0 is RO\n"); return count; } spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags); sscanf(buf, "%llx", &sysadmin_ag_val); *(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1]. all_rec_per_port[record_num]. all_recs[GUID_REC_SIZE * guid_index_in_rec] = cpu_to_be64(sysadmin_ag_val); /* Change the state to be pending for update */ mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status = MLX4_GUID_INFO_STATUS_IDLE ; mlx4_set_admin_guid(mdev->dev, cpu_to_be64(sysadmin_ag_val), mlx4_ib_iov_dentry->entry_num, port->num); /* set the record index */ mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec); spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags); mlx4_ib_init_alias_guid_work(mdev, port->num - 1); return count; } static ssize_t show_port_gid(struct device *dev, struct device_attribute *attr, char *buf) { struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry = container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry); struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; struct mlx4_ib_dev *mdev = port->dev; union ib_gid gid; int ret; __be16 *raw; ret = __mlx4_ib_query_gid(&mdev->ib_dev, port->num, mlx4_ib_iov_dentry->entry_num, &gid, 1); if (ret) return ret; raw = (__be16 *)gid.raw; return sysfs_emit(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", be16_to_cpu(raw[0]), be16_to_cpu(raw[1]), be16_to_cpu(raw[2]), be16_to_cpu(raw[3]), be16_to_cpu(raw[4]), be16_to_cpu(raw[5]), be16_to_cpu(raw[6]), be16_to_cpu(raw[7])); } static ssize_t show_phys_port_pkey(struct device *dev, struct device_attribute *attr, char *buf) { struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry = container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry); struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; struct mlx4_ib_dev *mdev = port->dev; u16 pkey; ssize_t ret; ret = __mlx4_ib_query_pkey(&mdev->ib_dev, port->num, mlx4_ib_iov_dentry->entry_num, &pkey, 1); if (ret) return ret; return sysfs_emit(buf, "0x%04x\n", pkey); } #define DENTRY_REMOVE(_dentry) \ do { \ sysfs_remove_file((_dentry)->kobj, &(_dentry)->dentry.attr); \ } while (0); static int create_sysfs_entry(void *_ctx, struct mlx4_ib_iov_sysfs_attr *_dentry, char *_name, struct kobject *_kobj, ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf), ssize_t (*store)(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) ) { int ret = 0; struct mlx4_ib_iov_sysfs_attr *vdentry = _dentry; vdentry->ctx = _ctx; vdentry->dentry.show = show; vdentry->dentry.store = store; sysfs_attr_init(&vdentry->dentry.attr); vdentry->dentry.attr.name = vdentry->name; vdentry->dentry.attr.mode = 0; vdentry->kobj = _kobj; snprintf(vdentry->name, 15, "%s", _name); if (vdentry->dentry.store) vdentry->dentry.attr.mode |= S_IWUSR; if (vdentry->dentry.show) vdentry->dentry.attr.mode |= S_IRUGO; ret = sysfs_create_file(vdentry->kobj, &vdentry->dentry.attr); if (ret) { pr_err("failed to create %s\n", vdentry->dentry.attr.name); vdentry->ctx = NULL; return ret; } return ret; } int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, struct attribute *attr) { struct mlx4_ib_iov_port *port = &device->iov_ports[port_num - 1]; int ret; ret = sysfs_create_file(port->mcgs_parent, attr); if (ret) pr_err("failed to create %s\n", attr->name); return ret; } void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, struct attribute *attr) { struct mlx4_ib_iov_port *port = &device->iov_ports[port_num - 1]; sysfs_remove_file(port->mcgs_parent, attr); } static int add_port_entries(struct mlx4_ib_dev *device, int port_num) { int i; char buff[11]; struct mlx4_ib_iov_port *port = NULL; int ret = 0 ; struct ib_port_attr attr; memset(&attr, 0, sizeof(attr)); /* get the physical gid and pkey table sizes.*/ ret = __mlx4_ib_query_port(&device->ib_dev, port_num, &attr, 1); if (ret) goto err; port = &device->iov_ports[port_num - 1]; port->dev = device; port->num = port_num; /* Directory structure: * iov - * port num - * admin_guids * gids (operational) * mcg_table */ port->dentr_ar = kzalloc(sizeof (struct mlx4_ib_iov_sysfs_attr_ar), GFP_KERNEL); if (!port->dentr_ar) { ret = -ENOMEM; goto err; } sprintf(buff, "%d", port_num); port->cur_port = kobject_create_and_add(buff, kobject_get(device->ports_parent)); if (!port->cur_port) { ret = -ENOMEM; goto kobj_create_err; } /* admin GUIDs */ port->admin_alias_parent = kobject_create_and_add("admin_guids", kobject_get(port->cur_port)); if (!port->admin_alias_parent) { ret = -ENOMEM; goto err_admin_guids; } for (i = 0 ; i < attr.gid_tbl_len; i++) { sprintf(buff, "%d", i); port->dentr_ar->dentries[i].entry_num = i; ret = create_sysfs_entry(port, &port->dentr_ar->dentries[i], buff, port->admin_alias_parent, show_admin_alias_guid, store_admin_alias_guid); if (ret) goto err_admin_alias_parent; } /* gids subdirectory (operational gids) */ port->gids_parent = kobject_create_and_add("gids", kobject_get(port->cur_port)); if (!port->gids_parent) { ret = -ENOMEM; goto err_gids; } for (i = 0 ; i < attr.gid_tbl_len; i++) { sprintf(buff, "%d", i); port->dentr_ar->dentries[attr.gid_tbl_len + i].entry_num = i; ret = create_sysfs_entry(port, &port->dentr_ar->dentries[attr.gid_tbl_len + i], buff, port->gids_parent, show_port_gid, NULL); if (ret) goto err_gids_parent; } /* physical port pkey table */ port->pkeys_parent = kobject_create_and_add("pkeys", kobject_get(port->cur_port)); if (!port->pkeys_parent) { ret = -ENOMEM; goto err_pkeys; } for (i = 0 ; i < attr.pkey_tbl_len; i++) { sprintf(buff, "%d", i); port->dentr_ar->dentries[2 * attr.gid_tbl_len + i].entry_num = i; ret = create_sysfs_entry(port, &port->dentr_ar->dentries[2 * attr.gid_tbl_len + i], buff, port->pkeys_parent, show_phys_port_pkey, NULL); if (ret) goto err_pkeys_parent; } /* MCGs table */ port->mcgs_parent = kobject_create_and_add("mcgs", kobject_get(port->cur_port)); if (!port->mcgs_parent) { ret = -ENOMEM; goto err_mcgs; } return 0; err_mcgs: kobject_put(port->cur_port); err_pkeys_parent: kobject_put(port->pkeys_parent); err_pkeys: kobject_put(port->cur_port); err_gids_parent: kobject_put(port->gids_parent); err_gids: kobject_put(port->cur_port); err_admin_alias_parent: kobject_put(port->admin_alias_parent); err_admin_guids: kobject_put(port->cur_port); kobject_put(port->cur_port); /* once more for create_and_add buff */ kobj_create_err: kobject_put(device->ports_parent); kfree(port->dentr_ar); err: pr_err("add_port_entries FAILED: for port:%d, error: %d\n", port_num, ret); return ret; } static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max) { /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n * with no ARI only 3 last bits are used so when the fn is higher than 8 * need to add it to the dev num, so count in the last number will be * modulo 8 */ snprintf(name, max, "%.8s%.2d.%d", pci_name(dev->dev->persist->pdev), i / 8, i % 8); } struct mlx4_port { struct kobject kobj; struct mlx4_ib_dev *dev; struct attribute_group pkey_group; struct attribute_group gid_group; struct device_attribute enable_smi_admin; struct device_attribute smi_enabled; int slave; u8 port_num; }; static void mlx4_port_release(struct kobject *kobj) { struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj); struct attribute *a; int i; for (i = 0; (a = p->pkey_group.attrs[i]); ++i) kfree(a); kfree(p->pkey_group.attrs); for (i = 0; (a = p->gid_group.attrs[i]); ++i) kfree(a); kfree(p->gid_group.attrs); kfree(p); } struct port_attribute { struct attribute attr; ssize_t (*show)(struct mlx4_port *, struct port_attribute *, char *buf); ssize_t (*store)(struct mlx4_port *, struct port_attribute *, const char *buf, size_t count); }; static ssize_t port_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct port_attribute *port_attr = container_of(attr, struct port_attribute, attr); struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj); if (!port_attr->show) return -EIO; return port_attr->show(p, port_attr, buf); } static ssize_t port_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t size) { struct port_attribute *port_attr = container_of(attr, struct port_attribute, attr); struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj); if (!port_attr->store) return -EIO; return port_attr->store(p, port_attr, buf, size); } static const struct sysfs_ops port_sysfs_ops = { .show = port_attr_show, .store = port_attr_store, }; static struct kobj_type port_type = { .release = mlx4_port_release, .sysfs_ops = &port_sysfs_ops, }; struct port_table_attribute { struct port_attribute attr; char name[8]; int index; }; static ssize_t show_port_pkey(struct mlx4_port *p, struct port_attribute *attr, char *buf) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); struct pkey_mgt *m = &p->dev->pkeys; u8 key = m->virt2phys_pkey[p->slave][p->port_num - 1][tab_attr->index]; if (key >= p->dev->dev->caps.pkey_table_len[p->port_num]) return sysfs_emit(buf, "none\n"); return sysfs_emit(buf, "%d\n", key); } static ssize_t store_port_pkey(struct mlx4_port *p, struct port_attribute *attr, const char *buf, size_t count) { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); int idx; int err; /* do not allow remapping Dom0 virtual pkey table */ if (p->slave == mlx4_master_func_num(p->dev->dev)) return -EINVAL; if (!strncasecmp(buf, "no", 2)) idx = p->dev->dev->phys_caps.pkey_phys_table_len[p->port_num] - 1; else if (sscanf(buf, "%i", &idx) != 1 || idx >= p->dev->dev->caps.pkey_table_len[p->port_num] || idx < 0) return -EINVAL; p->dev->pkeys.virt2phys_pkey[p->slave][p->port_num - 1] [tab_attr->index] = idx; mlx4_sync_pkey_table(p->dev->dev, p->slave, p->port_num, tab_attr->index, idx); err = mlx4_gen_pkey_eqe(p->dev->dev, p->slave, p->port_num); if (err) { pr_err("mlx4_gen_pkey_eqe failed for slave %d," " port %d, index %d\n", p->slave, p->port_num, idx); return err; } return count; } static ssize_t show_port_gid_idx(struct mlx4_port *p, struct port_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", p->slave); } static struct attribute ** alloc_group_attrs(ssize_t (*show)(struct mlx4_port *, struct port_attribute *, char *buf), ssize_t (*store)(struct mlx4_port *, struct port_attribute *, const char *buf, size_t count), int len) { struct attribute **tab_attr; struct port_table_attribute *element; int i; tab_attr = kcalloc(1 + len, sizeof (struct attribute *), GFP_KERNEL); if (!tab_attr) return NULL; for (i = 0; i < len; i++) { element = kzalloc(sizeof (struct port_table_attribute), GFP_KERNEL); if (!element) goto err; if (snprintf(element->name, sizeof (element->name), "%d", i) >= sizeof (element->name)) { kfree(element); goto err; } sysfs_attr_init(&element->attr.attr); element->attr.attr.name = element->name; if (store) { element->attr.attr.mode = S_IWUSR | S_IRUGO; element->attr.store = store; } else element->attr.attr.mode = S_IRUGO; element->attr.show = show; element->index = i; tab_attr[i] = &element->attr.attr; } return tab_attr; err: while (--i >= 0) kfree(tab_attr[i]); kfree(tab_attr); return NULL; } static ssize_t sysfs_show_smi_enabled(struct device *dev, struct device_attribute *attr, char *buf) { struct mlx4_port *p = container_of(attr, struct mlx4_port, smi_enabled); return sysfs_emit(buf, "%d\n", !!mlx4_vf_smi_enabled(p->dev->dev, p->slave, p->port_num)); } static ssize_t sysfs_show_enable_smi_admin(struct device *dev, struct device_attribute *attr, char *buf) { struct mlx4_port *p = container_of(attr, struct mlx4_port, enable_smi_admin); return sysfs_emit(buf, "%d\n", !!mlx4_vf_get_enable_smi_admin(p->dev->dev, p->slave, p->port_num)); } static ssize_t sysfs_store_enable_smi_admin(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mlx4_port *p = container_of(attr, struct mlx4_port, enable_smi_admin); int enable; if (sscanf(buf, "%i", &enable) != 1 || enable < 0 || enable > 1) return -EINVAL; if (mlx4_vf_set_enable_smi_admin(p->dev->dev, p->slave, p->port_num, enable)) return -EINVAL; return count; } static int add_vf_smi_entries(struct mlx4_port *p) { int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) == IB_LINK_LAYER_ETHERNET; int ret; /* do not display entries if eth transport, or if master */ if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev)) return 0; sysfs_attr_init(&p->smi_enabled.attr); p->smi_enabled.show = sysfs_show_smi_enabled; p->smi_enabled.store = NULL; p->smi_enabled.attr.name = "smi_enabled"; p->smi_enabled.attr.mode = 0444; ret = sysfs_create_file(&p->kobj, &p->smi_enabled.attr); if (ret) { pr_err("failed to create smi_enabled\n"); return ret; } sysfs_attr_init(&p->enable_smi_admin.attr); p->enable_smi_admin.show = sysfs_show_enable_smi_admin; p->enable_smi_admin.store = sysfs_store_enable_smi_admin; p->enable_smi_admin.attr.name = "enable_smi_admin"; p->enable_smi_admin.attr.mode = 0644; ret = sysfs_create_file(&p->kobj, &p->enable_smi_admin.attr); if (ret) { pr_err("failed to create enable_smi_admin\n"); sysfs_remove_file(&p->kobj, &p->smi_enabled.attr); return ret; } return 0; } static void remove_vf_smi_entries(struct mlx4_port *p) { int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) == IB_LINK_LAYER_ETHERNET; if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev)) return; sysfs_remove_file(&p->kobj, &p->smi_enabled.attr); sysfs_remove_file(&p->kobj, &p->enable_smi_admin.attr); } static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave) { struct mlx4_port *p; int i; int ret; int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) == IB_LINK_LAYER_ETHERNET; p = kzalloc(sizeof *p, GFP_KERNEL); if (!p) return -ENOMEM; p->dev = dev; p->port_num = port_num; p->slave = slave; ret = kobject_init_and_add(&p->kobj, &port_type, kobject_get(dev->dev_ports_parent[slave]), "%d", port_num); if (ret) goto err_alloc; p->pkey_group.name = "pkey_idx"; p->pkey_group.attrs = alloc_group_attrs(show_port_pkey, is_eth ? NULL : store_port_pkey, dev->dev->caps.pkey_table_len[port_num]); if (!p->pkey_group.attrs) { ret = -ENOMEM; goto err_alloc; } ret = sysfs_create_group(&p->kobj, &p->pkey_group); if (ret) goto err_free_pkey; p->gid_group.name = "gid_idx"; p->gid_group.attrs = alloc_group_attrs(show_port_gid_idx, NULL, 1); if (!p->gid_group.attrs) { ret = -ENOMEM; goto err_free_pkey; } ret = sysfs_create_group(&p->kobj, &p->gid_group); if (ret) goto err_free_gid; ret = add_vf_smi_entries(p); if (ret) goto err_free_gid; list_add_tail(&p->kobj.entry, &dev->pkeys.pkey_port_list[slave]); return 0; err_free_gid: kfree(p->gid_group.attrs[0]); kfree(p->gid_group.attrs); err_free_pkey: for (i = 0; i < dev->dev->caps.pkey_table_len[port_num]; ++i) kfree(p->pkey_group.attrs[i]); kfree(p->pkey_group.attrs); err_alloc: kobject_put(dev->dev_ports_parent[slave]); kfree(p); return ret; } static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave) { char name[32]; int err; int port; struct kobject *p, *t; struct mlx4_port *mport; struct mlx4_active_ports actv_ports; get_name(dev, name, slave, sizeof name); dev->pkeys.device_parent[slave] = kobject_create_and_add(name, kobject_get(dev->iov_parent)); if (!dev->pkeys.device_parent[slave]) { err = -ENOMEM; goto fail_dev; } INIT_LIST_HEAD(&dev->pkeys.pkey_port_list[slave]); dev->dev_ports_parent[slave] = kobject_create_and_add("ports", kobject_get(dev->pkeys.device_parent[slave])); if (!dev->dev_ports_parent[slave]) { err = -ENOMEM; goto err_ports; } actv_ports = mlx4_get_active_ports(dev->dev, slave); for (port = 1; port <= dev->dev->caps.num_ports; ++port) { if (!test_bit(port - 1, actv_ports.ports)) continue; err = add_port(dev, port, slave); if (err) goto err_add; } return 0; err_add: list_for_each_entry_safe(p, t, &dev->pkeys.pkey_port_list[slave], entry) { list_del(&p->entry); mport = container_of(p, struct mlx4_port, kobj); sysfs_remove_group(p, &mport->pkey_group); sysfs_remove_group(p, &mport->gid_group); remove_vf_smi_entries(mport); kobject_put(p); } kobject_put(dev->dev_ports_parent[slave]); err_ports: kobject_put(dev->pkeys.device_parent[slave]); /* extra put for the device_parent create_and_add */ kobject_put(dev->pkeys.device_parent[slave]); fail_dev: kobject_put(dev->iov_parent); return err; } static int register_pkey_tree(struct mlx4_ib_dev *device) { int i; if (!mlx4_is_master(device->dev)) return 0; for (i = 0; i <= device->dev->persist->num_vfs; ++i) register_one_pkey_tree(device, i); return 0; } static void unregister_pkey_tree(struct mlx4_ib_dev *device) { int slave; struct kobject *p, *t; struct mlx4_port *port; if (!mlx4_is_master(device->dev)) return; for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) { list_for_each_entry_safe(p, t, &device->pkeys.pkey_port_list[slave], entry) { list_del(&p->entry); port = container_of(p, struct mlx4_port, kobj); sysfs_remove_group(p, &port->pkey_group); sysfs_remove_group(p, &port->gid_group); remove_vf_smi_entries(port); kobject_put(p); kobject_put(device->dev_ports_parent[slave]); } kobject_put(device->dev_ports_parent[slave]); kobject_put(device->pkeys.device_parent[slave]); kobject_put(device->pkeys.device_parent[slave]); kobject_put(device->iov_parent); } } int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev) { unsigned int i; int ret = 0; if (!mlx4_is_master(dev->dev)) return 0; dev->iov_parent = kobject_create_and_add("iov", &dev->ib_dev.dev.kobj); if (!dev->iov_parent) { ret = -ENOMEM; goto err; } dev->ports_parent = kobject_create_and_add("ports", kobject_get(dev->iov_parent)); if (!dev->ports_parent) { ret = -ENOMEM; goto err_ports; } rdma_for_each_port(&dev->ib_dev, i) { ret = add_port_entries(dev, i); if (ret) goto err_add_entries; } ret = register_pkey_tree(dev); if (ret) goto err_add_entries; return 0; err_add_entries: kobject_put(dev->ports_parent); err_ports: kobject_put(dev->iov_parent); err: pr_err("mlx4_ib_device_register_sysfs error (%d)\n", ret); return ret; } static void unregister_alias_guid_tree(struct mlx4_ib_dev *device) { struct mlx4_ib_iov_port *p; int i; if (!mlx4_is_master(device->dev)) return; for (i = 0; i < device->dev->caps.num_ports; i++) { p = &device->iov_ports[i]; kobject_put(p->admin_alias_parent); kobject_put(p->gids_parent); kobject_put(p->pkeys_parent); kobject_put(p->mcgs_parent); kobject_put(p->cur_port); kobject_put(p->cur_port); kobject_put(p->cur_port); kobject_put(p->cur_port); kobject_put(p->cur_port); kobject_put(p->dev->ports_parent); kfree(p->dentr_ar); } } void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device) { unregister_alias_guid_tree(device); unregister_pkey_tree(device); kobject_put(device->ports_parent); kobject_put(device->iov_parent); kobject_put(device->iov_parent); }
linux-master
drivers/infiniband/hw/mlx4/sysfs.c
/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/log2.h> #include <linux/etherdevice.h> #include <net/ip.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <rdma/ib_cache.h> #include <rdma/ib_pack.h> #include <rdma/ib_addr.h> #include <rdma/ib_mad.h> #include <rdma/uverbs_ioctl.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/qp.h> #include "mlx4_ib.h" #include <rdma/mlx4-abi.h> static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq); static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq); static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state, struct ib_udata *udata); enum { MLX4_IB_ACK_REQ_FREQ = 8, }; enum { MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83, MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, MLX4_IB_LINK_TYPE_IB = 0, MLX4_IB_LINK_TYPE_ETH = 1 }; enum { MLX4_IB_MIN_SQ_STRIDE = 6, MLX4_IB_CACHE_LINE_SIZE = 64, }; enum { MLX4_RAW_QP_MTU = 7, MLX4_RAW_QP_MSGMAX = 31, }; #ifndef ETH_ALEN #define ETH_ALEN 6 #endif static const __be32 mlx4_ib_opcode[] = { [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), [IB_WR_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), }; enum mlx4_ib_source_type { MLX4_IB_QP_SRC = 0, MLX4_IB_RWQ_SRC = 1, }; struct mlx4_ib_qp_event_work { struct work_struct work; struct mlx4_qp *qp; enum mlx4_event type; }; static struct workqueue_struct *mlx4_ib_qp_event_wq; static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { if (!mlx4_is_master(dev->dev)) return 0; return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX; } static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { int proxy_sqp = 0; int real_sqp = 0; int i; /* PPF or Native -- real SQP */ real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) && qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); if (real_sqp) return 1; /* VF or PF -- proxy SQP */ if (mlx4_is_mfunc(dev->dev)) { for (i = 0; i < dev->dev->caps.num_ports; i++) { if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { proxy_sqp = 1; break; } } } if (proxy_sqp) return 1; return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP); } /* used for INIT/CLOSE port logic */ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { int proxy_qp0 = 0; int real_qp0 = 0; int i; /* PPF or Native -- real QP0 */ real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) && qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); if (real_qp0) return 1; /* VF or PF -- proxy QP0 */ if (mlx4_is_mfunc(dev->dev)) { for (i = 0; i < dev->dev->caps.num_ports; i++) { if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) { proxy_qp0 = 1; break; } } } return proxy_qp0; } static void *get_wqe(struct mlx4_ib_qp *qp, int offset) { return mlx4_buf_offset(&qp->buf, offset); } static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) { return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); } static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) { return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); } /* * Stamp a SQ WQE so that it is invalid if prefetched by marking the * first four bytes of every 64 byte chunk with 0xffffffff, except for * the very first chunk of the WQE. */ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) { __be32 *wqe; int i; int s; void *buf; struct mlx4_wqe_ctrl_seg *ctrl; buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); ctrl = (struct mlx4_wqe_ctrl_seg *)buf; s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4; for (i = 64; i < s; i += 64) { wqe = buf + i; *wqe = cpu_to_be32(0xffffffff); } } static void mlx4_ib_handle_qp_event(struct work_struct *_work) { struct mlx4_ib_qp_event_work *qpe_work = container_of(_work, struct mlx4_ib_qp_event_work, work); struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; struct ib_event event = {}; event.device = ibqp->device; event.element.qp = ibqp; switch (qpe_work->type) { case MLX4_EVENT_TYPE_PATH_MIG: event.event = IB_EVENT_PATH_MIG; break; case MLX4_EVENT_TYPE_COMM_EST: event.event = IB_EVENT_COMM_EST; break; case MLX4_EVENT_TYPE_SQ_DRAINED: event.event = IB_EVENT_SQ_DRAINED; break; case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: event.event = IB_EVENT_QP_LAST_WQE_REACHED; break; case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: event.event = IB_EVENT_QP_FATAL; break; case MLX4_EVENT_TYPE_PATH_MIG_FAILED: event.event = IB_EVENT_PATH_MIG_ERR; break; case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: event.event = IB_EVENT_QP_REQ_ERR; break; case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: event.event = IB_EVENT_QP_ACCESS_ERR; break; default: pr_warn("Unexpected event type %d on QP %06x\n", qpe_work->type, qpe_work->qp->qpn); goto out; } ibqp->event_handler(&event, ibqp->qp_context); out: mlx4_put_qp(qpe_work->qp); kfree(qpe_work); } static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) { struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; struct mlx4_ib_qp_event_work *qpe_work; if (type == MLX4_EVENT_TYPE_PATH_MIG) to_mibqp(qp)->port = to_mibqp(qp)->alt_port; if (!ibqp->event_handler) goto out_no_handler; qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC); if (!qpe_work) goto out_no_handler; qpe_work->qp = qp; qpe_work->type = type; INIT_WORK(&qpe_work->work, mlx4_ib_handle_qp_event); queue_work(mlx4_ib_qp_event_wq, &qpe_work->work); return; out_no_handler: mlx4_put_qp(qp); } static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type) { pr_warn_ratelimited("Unexpected event type %d on WQ 0x%06x. Events are not supported for WQs\n", type, qp->qpn); } static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) { /* * UD WQEs must have a datagram segment. * RC and UC WQEs might have a remote address segment. * MLX WQEs need two extra inline data segments (for the UD * header and space for the ICRC). */ switch (type) { case MLX4_IB_QPT_UD: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg) + ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0); case MLX4_IB_QPT_PROXY_SMI_OWNER: case MLX4_IB_QPT_PROXY_SMI: case MLX4_IB_QPT_PROXY_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg) + 64; case MLX4_IB_QPT_TUN_SMI_OWNER: case MLX4_IB_QPT_TUN_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg); case MLX4_IB_QPT_UC: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_raddr_seg); case MLX4_IB_QPT_RC: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_masked_atomic_seg) + sizeof (struct mlx4_wqe_raddr_seg); case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + ALIGN(MLX4_IB_UD_HEADER_SIZE + DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE, MLX4_INLINE_ALIGN) * sizeof (struct mlx4_wqe_inline_seg), sizeof (struct mlx4_wqe_data_seg)) + ALIGN(4 + sizeof (struct mlx4_wqe_inline_seg), sizeof (struct mlx4_wqe_data_seg)); default: return sizeof (struct mlx4_wqe_ctrl_seg); } } static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, bool is_user, bool has_rq, struct mlx4_ib_qp *qp, u32 inl_recv_sz) { /* Sanity check RQ size before proceeding */ if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) return -EINVAL; if (!has_rq) { if (cap->max_recv_wr || inl_recv_sz) return -EINVAL; qp->rq.wqe_cnt = qp->rq.max_gs = 0; } else { u32 max_inl_recv_sz = dev->dev->caps.max_rq_sg * sizeof(struct mlx4_wqe_data_seg); u32 wqe_size; /* HW requires >= 1 RQ entry with >= 1 gather entry */ if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge || inl_recv_sz > max_inl_recv_sz)) return -EINVAL; qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg); qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz)); } /* leave userspace return values as they were, so as not to break ABI */ if (is_user) { cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; cap->max_recv_sge = qp->rq.max_gs; } else { cap->max_recv_wr = qp->rq.max_post = min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); cap->max_recv_sge = min(qp->rq.max_gs, min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)); } return 0; } static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) { int s; /* Sanity check SQ size before proceeding */ if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || cap->max_inline_data + send_wqe_overhead(type, qp->flags) + sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) return -EINVAL; /* * For MLX transport we need 2 extra S/G entries: * one for the header and one for the checksum at the end */ if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI || type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) && cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) return -EINVAL; s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + send_wqe_overhead(type, qp->flags); if (s > dev->dev->caps.max_sq_desc_sz) return -EINVAL; qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); /* * We need to leave 2 KB + 1 WR of headroom in the SQ to * allow HW to prefetch. */ qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift); qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + qp->sq_spare_wqes); qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, (1 << qp->sq.wqe_shift)) - send_wqe_overhead(type, qp->flags)) / sizeof (struct mlx4_wqe_data_seg); qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); if (qp->rq.wqe_shift > qp->sq.wqe_shift) { qp->rq.offset = 0; qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; } else { qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; qp->sq.offset = 0; } cap->max_send_wr = qp->sq.max_post = qp->sq.wqe_cnt - qp->sq_spare_wqes; cap->max_send_sge = min(qp->sq.max_gs, min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)); /* We don't support inline sends for kernel QPs (yet) */ cap->max_inline_data = 0; return 0; } static int set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) { u32 cnt; /* Sanity check SQ size before proceeding */ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || cnt > dev->dev->caps.max_wqes) return -EINVAL; if (ucmd->log_sq_stride > ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) return -EINVAL; qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; qp->sq.wqe_shift = ucmd->log_sq_stride; qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); return 0; } static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) { int i; qp->sqp_proxy_rcv = kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf), GFP_KERNEL); if (!qp->sqp_proxy_rcv) return -ENOMEM; for (i = 0; i < qp->rq.wqe_cnt; i++) { qp->sqp_proxy_rcv[i].addr = kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr), GFP_KERNEL); if (!qp->sqp_proxy_rcv[i].addr) goto err; qp->sqp_proxy_rcv[i].map = ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { kfree(qp->sqp_proxy_rcv[i].addr); goto err; } } return 0; err: while (i > 0) { --i; ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); kfree(qp->sqp_proxy_rcv[i].addr); } kfree(qp->sqp_proxy_rcv); qp->sqp_proxy_rcv = NULL; return -ENOMEM; } static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) { int i; for (i = 0; i < qp->rq.wqe_cnt; i++) { ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); kfree(qp->sqp_proxy_rcv[i].addr); } kfree(qp->sqp_proxy_rcv); } static bool qp_has_rq(struct ib_qp_init_attr *attr) { if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) return false; return !attr->srq; } static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn) { int i; for (i = 0; i < dev->caps.num_ports; i++) { if (qpn == dev->caps.spec_qps[i].qp0_proxy) return !!dev->caps.spec_qps[i].qp0_qkey; } return 0; } static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { mutex_lock(&dev->counters_table[qp->port - 1].mutex); mlx4_counter_free(dev->dev, qp->counter_index->index); list_del(&qp->counter_index->list); mutex_unlock(&dev->counters_table[qp->port - 1].mutex); kfree(qp->counter_index); qp->counter_index = NULL; } static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, struct ib_qp_init_attr *init_attr, struct mlx4_ib_create_qp_rss *ucmd) { rss_ctx->base_qpn_tbl_sz = init_attr->rwq_ind_tbl->ind_tbl[0]->wq_num | (init_attr->rwq_ind_tbl->log_ind_tbl_size << 24); if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) && (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) { memcpy(rss_ctx->rss_key, ucmd->rx_hash_key, MLX4_EN_RSS_KEY_SIZE); } else { pr_debug("RX Hash function is not supported\n"); return (-EOPNOTSUPP); } if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 | MLX4_IB_RX_HASH_DST_IPV4 | MLX4_IB_RX_HASH_SRC_IPV6 | MLX4_IB_RX_HASH_DST_IPV6 | MLX4_IB_RX_HASH_SRC_PORT_TCP | MLX4_IB_RX_HASH_DST_PORT_TCP | MLX4_IB_RX_HASH_SRC_PORT_UDP | MLX4_IB_RX_HASH_DST_PORT_UDP | MLX4_IB_RX_HASH_INNER)) { pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", ucmd->rx_hash_fields_mask); return (-EOPNOTSUPP); } if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) && (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { rss_ctx->flags = MLX4_RSS_IPV4; } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) || (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { pr_debug("RX Hash fields_mask is not supported - both IPv4 SRC and DST must be set\n"); return (-EOPNOTSUPP); } if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) && (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) { rss_ctx->flags |= MLX4_RSS_IPV6; } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) || (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) { pr_debug("RX Hash fields_mask is not supported - both IPv6 SRC and DST must be set\n"); return (-EOPNOTSUPP); } if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) && (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) { if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS)) { pr_debug("RX Hash fields_mask for UDP is not supported\n"); return (-EOPNOTSUPP); } if (rss_ctx->flags & MLX4_RSS_IPV4) rss_ctx->flags |= MLX4_RSS_UDP_IPV4; if (rss_ctx->flags & MLX4_RSS_IPV6) rss_ctx->flags |= MLX4_RSS_UDP_IPV6; if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) { pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); return (-EOPNOTSUPP); } } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) || (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) { pr_debug("RX Hash fields_mask is not supported - both UDP SRC and DST must be set\n"); return (-EOPNOTSUPP); } if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) && (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { if (rss_ctx->flags & MLX4_RSS_IPV4) rss_ctx->flags |= MLX4_RSS_TCP_IPV4; if (rss_ctx->flags & MLX4_RSS_IPV6) rss_ctx->flags |= MLX4_RSS_TCP_IPV6; if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) { pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); return (-EOPNOTSUPP); } } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) || (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); return (-EOPNOTSUPP); } if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) { if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { /* * Hash according to inner headers if exist, otherwise * according to outer headers. */ rss_ctx->flags |= MLX4_RSS_BY_INNER_HEADERS_IPONLY; } else { pr_debug("RSS Hash for inner headers isn't supported\n"); return (-EOPNOTSUPP); } } return 0; } static int create_qp_rss(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *init_attr, struct mlx4_ib_create_qp_rss *ucmd, struct mlx4_ib_qp *qp) { int qpn; int err; qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage); if (err) return err; err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn; INIT_LIST_HEAD(&qp->gid_list); INIT_LIST_HEAD(&qp->steering_rules); qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; qp->state = IB_QPS_RESET; /* Set dummy send resources to be compatible with HV and PRM */ qp->sq_no_prefetch = 1; qp->sq.wqe_cnt = 1; qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE; qp->mtt = (to_mqp( (struct ib_qp *)init_attr->rwq_ind_tbl->ind_tbl[0]))->mtt; qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL); if (!qp->rss_ctx) { err = -ENOMEM; goto err_qp_alloc; } err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd); if (err) goto err; return 0; err: kfree(qp->rss_ctx); err_qp_alloc: mlx4_qp_remove(dev->dev, &qp->mqp); mlx4_qp_free(dev->dev, &qp->mqp); err_qpn: mlx4_qp_release_range(dev->dev, qpn, 1); return err; } static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct mlx4_ib_create_qp_rss ucmd = {}; size_t required_cmd_sz; int err; if (!udata) { pr_debug("RSS QP with NULL udata\n"); return -EINVAL; } if (udata->outlen) return -EOPNOTSUPP; required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1); if (udata->inlen < required_cmd_sz) { pr_debug("invalid inlen\n"); return -EINVAL; } if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) { pr_debug("copy failed\n"); return -EFAULT; } if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved))) return -EOPNOTSUPP; if (ucmd.comp_mask || ucmd.reserved1) return -EOPNOTSUPP; if (udata->inlen > sizeof(ucmd) && !ib_is_udata_cleared(udata, sizeof(ucmd), udata->inlen - sizeof(ucmd))) { pr_debug("inlen is not supported\n"); return -EOPNOTSUPP; } if (init_attr->qp_type != IB_QPT_RAW_PACKET) { pr_debug("RSS QP with unsupported QP type %d\n", init_attr->qp_type); return -EOPNOTSUPP; } if (init_attr->create_flags) { pr_debug("RSS QP doesn't support create flags\n"); return -EOPNOTSUPP; } if (init_attr->send_cq || init_attr->cap.max_send_wr) { pr_debug("RSS QP with unsupported send attributes\n"); return -EOPNOTSUPP; } qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp); if (err) return err; qp->ibqp.qp_num = qp->mqp.qpn; return 0; } /* * This function allocates a WQN from a range which is consecutive and aligned * to its size. In case the range is full, then it creates a new range and * allocates WQN from it. The new range will be used for following allocations. */ static int mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext *context, struct mlx4_ib_qp *qp, int range_size, int *wqn) { struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device); struct mlx4_wqn_range *range; int err = 0; mutex_lock(&context->wqn_ranges_mutex); range = list_first_entry_or_null(&context->wqn_ranges_list, struct mlx4_wqn_range, list); if (!range || (range->refcount == range->size) || range->dirty) { range = kzalloc(sizeof(*range), GFP_KERNEL); if (!range) { err = -ENOMEM; goto out; } err = mlx4_qp_reserve_range(dev->dev, range_size, range_size, &range->base_wqn, 0, qp->mqp.usage); if (err) { kfree(range); goto out; } range->size = range_size; list_add(&range->list, &context->wqn_ranges_list); } else if (range_size != 1) { /* * Requesting a new range (>1) when last range is still open, is * not valid. */ err = -EINVAL; goto out; } qp->wqn_range = range; *wqn = range->base_wqn + range->refcount; range->refcount++; out: mutex_unlock(&context->wqn_ranges_mutex); return err; } static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context, struct mlx4_ib_qp *qp, bool dirty_release) { struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device); struct mlx4_wqn_range *range; mutex_lock(&context->wqn_ranges_mutex); range = qp->wqn_range; range->refcount--; if (!range->refcount) { mlx4_qp_release_range(dev->dev, range->base_wqn, range->size); list_del(&range->list); kfree(range); } else if (dirty_release) { /* * A range which one of its WQNs is destroyed, won't be able to be * reused for further WQN allocations. * The next created WQ will allocate a new range. */ range->dirty = true; } mutex_unlock(&context->wqn_ranges_mutex); } static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct mlx4_ib_qp *qp) { struct mlx4_ib_dev *dev = to_mdev(pd->device); int qpn; int err; struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext); struct mlx4_ib_cq *mcq; unsigned long flags; int range_size; struct mlx4_ib_create_wq wq; size_t copy_len; int shift; int n; qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); INIT_LIST_HEAD(&qp->gid_list); INIT_LIST_HEAD(&qp->steering_rules); qp->state = IB_QPS_RESET; copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen); if (ib_copy_from_udata(&wq, udata, copy_len)) { err = -EFAULT; goto err; } if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] || wq.reserved[2]) { pr_debug("user command isn't supported\n"); err = -EOPNOTSUPP; goto err; } if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) { pr_debug("WQN range size must be equal or smaller than %d\n", dev->dev->caps.max_rss_tbl_sz); err = -EOPNOTSUPP; goto err; } range_size = 1 << wq.log_range_size; if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) qp->flags |= MLX4_IB_QP_SCATTER_FCS; err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz); if (err) goto err; qp->sq_no_prefetch = 1; qp->sq.wqe_cnt = 1; qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0); if (IS_ERR(qp->umem)) { err = PTR_ERR(qp->umem); goto err; } shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); if (err) goto err_mtt; err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db); if (err) goto err_mtt; qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); if (err) goto err_wrid; err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn; /* * Hardware wants QPN written in big-endian order (after * shifting) for send doorbell. Precompute this value to save * a little bit when posting sends. */ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); qp->mqp.event = mlx4_ib_wq_event; spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq)); /* Maintain device to QPs access, needed for further handling * via reset flow */ list_add_tail(&qp->qps_list, &dev->qp_list); /* Maintain CQ to QPs access, needed for further handling * via reset flow */ mcq = to_mcq(init_attr->send_cq); list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); mcq = to_mcq(init_attr->recv_cq); list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq)); spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); return 0; err_qpn: mlx4_ib_release_wqn(context, qp, 0); err_wrid: mlx4_ib_db_unmap_user(context, &qp->db); err_mtt: mlx4_mtt_cleanup(dev->dev, &qp->mtt); err_buf: ib_umem_release(qp->umem); err: return err; } static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) { struct mlx4_ib_dev *dev = to_mdev(pd->device); int qpn; int err; struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext); enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; struct mlx4_ib_cq *mcq; unsigned long flags; /* When tunneling special qps, we use a plain UD qp */ if (sqpn) { if (mlx4_is_mfunc(dev->dev) && (!mlx4_is_master(dev->dev) || !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) { if (init_attr->qp_type == IB_QPT_GSI) qp_type = MLX4_IB_QPT_PROXY_GSI; else { if (mlx4_is_master(dev->dev) || qp0_enabled_vf(dev->dev, sqpn)) qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER; else qp_type = MLX4_IB_QPT_PROXY_SMI; } } qpn = sqpn; /* add extra sg entry for tunneling */ init_attr->cap.max_recv_sge++; } else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) { struct mlx4_ib_qp_tunnel_init_attr *tnl_init = container_of(init_attr, struct mlx4_ib_qp_tunnel_init_attr, init_attr); if ((tnl_init->proxy_qp_type != IB_QPT_SMI && tnl_init->proxy_qp_type != IB_QPT_GSI) || !mlx4_is_master(dev->dev)) return -EINVAL; if (tnl_init->proxy_qp_type == IB_QPT_GSI) qp_type = MLX4_IB_QPT_TUN_GSI; else if (tnl_init->slave == mlx4_master_func_num(dev->dev) || mlx4_vf_smi_enabled(dev->dev, tnl_init->slave, tnl_init->port)) qp_type = MLX4_IB_QPT_TUN_SMI_OWNER; else qp_type = MLX4_IB_QPT_TUN_SMI; /* we are definitely in the PPF here, since we are creating * tunnel QPs. base_tunnel_sqpn is therefore valid. */ qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave + tnl_init->proxy_qp_type * 2 + tnl_init->port - 1; sqpn = qpn; } if (init_attr->qp_type == IB_QPT_SMI || init_attr->qp_type == IB_QPT_GSI || qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); if (!qp->sqp) return -ENOMEM; } qp->mlx4_ib_qp_type = qp_type; spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); INIT_LIST_HEAD(&qp->gid_list); INIT_LIST_HEAD(&qp->steering_rules); qp->state = IB_QPS_RESET; if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); if (udata) { struct mlx4_ib_create_qp ucmd; size_t copy_len; int shift; int n; copy_len = sizeof(struct mlx4_ib_create_qp); if (ib_copy_from_udata(&ucmd, udata, copy_len)) { err = -EFAULT; goto err; } qp->inl_recv_sz = ucmd.inl_recv_sz; if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) { if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { pr_debug("scatter FCS is unsupported\n"); err = -EOPNOTSUPP; goto err; } qp->flags |= MLX4_IB_QP_SCATTER_FCS; } err = set_rq_size(dev, &init_attr->cap, udata, qp_has_rq(init_attr), qp, qp->inl_recv_sz); if (err) goto err; qp->sq_no_prefetch = ucmd.sq_no_prefetch; err = set_user_sq_size(dev, qp, &ucmd); if (err) goto err; qp->umem = ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0); if (IS_ERR(qp->umem)) { err = PTR_ERR(qp->umem); goto err; } shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); if (err) goto err_mtt; if (qp_has_rq(init_attr)) { err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db); if (err) goto err_mtt; } qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; } else { err = set_rq_size(dev, &init_attr->cap, udata, qp_has_rq(init_attr), qp, 0); if (err) goto err; qp->sq_no_prefetch = 0; if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) qp->flags |= MLX4_IB_QP_LSO; if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) qp->flags |= MLX4_IB_QP_NETIF; else { err = -EINVAL; goto err; } } err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); if (err) goto err; if (qp_has_rq(init_attr)) { err = mlx4_db_alloc(dev->dev, &qp->db, 0); if (err) goto err; *qp->db.db = 0; } if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { err = -ENOMEM; goto err_db; } err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, &qp->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); if (err) goto err_mtt; qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; } qp->mqp.usage = MLX4_RES_USAGE_DRIVER; } if (sqpn) { if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { if (alloc_proxy_bufs(pd->device, qp)) { err = -ENOMEM; goto err_wrid; } } } else { /* Raw packet QPNs may not have bits 6,7 set in their qp_num; * otherwise, the WQE BlueFlame setup flow wrongly causes * VLAN insertion. */ if (init_attr->qp_type == IB_QPT_RAW_PACKET) err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, (init_attr->cap.max_send_wr ? MLX4_RESERVE_ETH_BF_QP : 0) | (init_attr->cap.max_recv_wr ? MLX4_RESERVE_A0_QP : 0), qp->mqp.usage); else if (qp->flags & MLX4_IB_QP_NETIF) err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn); else err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage); if (err) goto err_proxy; } if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn; if (init_attr->qp_type == IB_QPT_XRC_TGT) qp->mqp.qpn |= (1 << 23); /* * Hardware wants QPN written in big-endian order (after * shifting) for send doorbell. Precompute this value to save * a little bit when posting sends. */ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); qp->mqp.event = mlx4_ib_qp_event; spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq)); /* Maintain device to QPs access, needed for further handling * via reset flow */ list_add_tail(&qp->qps_list, &dev->qp_list); /* Maintain CQ to QPs access, needed for further handling * via reset flow */ mcq = to_mcq(init_attr->send_cq); list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); mcq = to_mcq(init_attr->recv_cq); list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq)); spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); return 0; err_qpn: if (!sqpn) { if (qp->flags & MLX4_IB_QP_NETIF) mlx4_ib_steer_qp_free(dev, qpn, 1); else mlx4_qp_release_range(dev->dev, qpn, 1); } err_proxy: if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) free_proxy_bufs(pd->device, qp); err_wrid: if (udata) { if (qp_has_rq(init_attr)) mlx4_ib_db_unmap_user(context, &qp->db); } else { kvfree(qp->sq.wrid); kvfree(qp->rq.wrid); } err_mtt: mlx4_mtt_cleanup(dev->dev, &qp->mtt); err_buf: if (!qp->umem) mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); ib_umem_release(qp->umem); err_db: if (!udata && qp_has_rq(init_attr)) mlx4_db_free(dev->dev, &qp->db); err: kfree(qp->sqp); return err; } static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) { switch (state) { case IB_QPS_RESET: return MLX4_QP_STATE_RST; case IB_QPS_INIT: return MLX4_QP_STATE_INIT; case IB_QPS_RTR: return MLX4_QP_STATE_RTR; case IB_QPS_RTS: return MLX4_QP_STATE_RTS; case IB_QPS_SQD: return MLX4_QP_STATE_SQD; case IB_QPS_SQE: return MLX4_QP_STATE_SQER; case IB_QPS_ERR: return MLX4_QP_STATE_ERR; default: return -1; } } static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { if (send_cq == recv_cq) { spin_lock(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_lock(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { spin_lock(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) __releases(&send_cq->lock) __releases(&recv_cq->lock) { if (send_cq == recv_cq) { __release(&recv_cq->lock); spin_unlock(&send_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_unlock(&recv_cq->lock); spin_unlock(&send_cq->lock); } else { spin_unlock(&send_cq->lock); spin_unlock(&recv_cq->lock); } } static void del_gid_entries(struct mlx4_ib_qp *qp) { struct mlx4_ib_gid_entry *ge, *tmp; list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { list_del(&ge->list); kfree(ge); } } static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) { if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); else return to_mpd(qp->ibqp.pd); } static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) { switch (qp->ibqp.qp_type) { case IB_QPT_XRC_TGT: *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); *recv_cq = *send_cq; break; case IB_QPT_XRC_INI: *send_cq = to_mcq(qp->ibqp.send_cq); *recv_cq = *send_cq; break; default: *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) : to_mcq(qp->ibwq.cq); *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) : *recv_cq; break; } } static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { if (qp->state != IB_QPS_RESET) { int i; for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size); i++) { struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i]; struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); mutex_lock(&wq->mutex); wq->rss_usecnt--; mutex_unlock(&wq->mutex); } if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) pr_warn("modify QP %06x to RESET failed.\n", qp->mqp.qpn); } mlx4_qp_remove(dev->dev, &qp->mqp); mlx4_qp_free(dev->dev, &qp->mqp); mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); del_gid_entries(qp); } static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, struct ib_udata *udata) { struct mlx4_ib_cq *send_cq, *recv_cq; unsigned long flags; if (qp->state != IB_QPS_RESET) { if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) pr_warn("modify QP %06x to RESET failed.\n", qp->mqp.qpn); if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); qp->pri.smac = 0; qp->pri.smac_port = 0; } if (qp->alt.smac) { mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); qp->alt.smac = 0; } if (qp->pri.vid < 0x1000) { mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); qp->pri.vid = 0xFFFF; qp->pri.candidate_vid = 0xFFFF; qp->pri.update_vid = 0; } if (qp->alt.vid < 0x1000) { mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); qp->alt.vid = 0xFFFF; qp->alt.candidate_vid = 0xFFFF; qp->alt.update_vid = 0; } } get_cqs(qp, src, &send_cq, &recv_cq); spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); mlx4_ib_lock_cqs(send_cq, recv_cq); /* del from lists under both locks above to protect reset flow paths */ list_del(&qp->qps_list); list_del(&qp->cq_send_list); list_del(&qp->cq_recv_list); if (!udata) { __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); if (send_cq != recv_cq) __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); } mlx4_qp_remove(dev->dev, &qp->mqp); mlx4_ib_unlock_cqs(send_cq, recv_cq); spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); mlx4_qp_free(dev->dev, &qp->mqp); if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { if (qp->flags & MLX4_IB_QP_NETIF) mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); else if (src == MLX4_IB_RWQ_SRC) mlx4_ib_release_wqn( rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext), qp, 1); else mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); } mlx4_mtt_cleanup(dev->dev, &qp->mtt); if (udata) { if (qp->rq.wqe_cnt) { struct mlx4_ib_ucontext *mcontext = rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext); mlx4_ib_db_unmap_user(mcontext, &qp->db); } } else { kvfree(qp->sq.wrid); kvfree(qp->rq.wrid); if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) free_proxy_bufs(&dev->ib_dev, qp); mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); if (qp->rq.wqe_cnt) mlx4_db_free(dev->dev, &qp->db); } ib_umem_release(qp->umem); del_gid_entries(qp); } static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr) { /* Native or PPF */ if (!mlx4_is_mfunc(dev->dev) || (mlx4_is_master(dev->dev) && attr->create_flags & MLX4_IB_SRIOV_SQP)) { return dev->dev->phys_caps.base_sqpn + (attr->qp_type == IB_QPT_SMI ? 0 : 2) + attr->port_num - 1; } /* PF or VF -- creating proxies */ if (attr->qp_type == IB_QPT_SMI) return dev->dev->caps.spec_qps[attr->port_num - 1].qp0_proxy; else return dev->dev->caps.spec_qps[attr->port_num - 1].qp1_proxy; } static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { int err; int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; u16 xrcdn = 0; if (init_attr->rwq_ind_tbl) return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata); /* * We only support LSO, vendor flag1, and multicast loopback blocking, * and only for kernel UD QPs. */ if (init_attr->create_flags & ~(MLX4_IB_QP_LSO | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK | MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP | MLX4_IB_QP_NETIF | MLX4_IB_QP_CREATE_ROCE_V2_GSI)) return -EOPNOTSUPP; if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { if (init_attr->qp_type != IB_QPT_UD) return -EINVAL; } if (init_attr->create_flags) { if (udata && init_attr->create_flags & ~(sup_u_create_flags)) return -EINVAL; if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | MLX4_IB_QP_CREATE_ROCE_V2_GSI | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) && init_attr->qp_type != IB_QPT_UD) || (init_attr->create_flags & MLX4_IB_SRIOV_SQP && init_attr->qp_type > IB_QPT_GSI) || (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI && init_attr->qp_type != IB_QPT_GSI)) return -EINVAL; } switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: pd = to_mxrcd(init_attr->xrcd)->pd; xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; fallthrough; case IB_QPT_XRC_INI: if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) return -ENOSYS; init_attr->recv_cq = init_attr->send_cq; fallthrough; case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: case IB_QPT_UD: qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; err = create_qp_common(pd, init_attr, udata, 0, qp); if (err) return err; qp->ibqp.qp_num = qp->mqp.qpn; qp->xrcdn = xrcdn; break; case IB_QPT_SMI: case IB_QPT_GSI: { int sqpn; if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) { int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev, 1, 1, &sqpn, 0, MLX4_RES_USAGE_DRIVER); if (res) return res; } else { sqpn = get_sqp_num(to_mdev(pd->device), init_attr); } qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; err = create_qp_common(pd, init_attr, udata, sqpn, qp); if (err) return err; if (init_attr->create_flags & (MLX4_IB_SRIOV_SQP | MLX4_IB_SRIOV_TUNNEL_QP)) /* Internal QP created with ib_create_qp */ rdma_restrack_no_track(&qp->ibqp.res); qp->port = init_attr->port_num; qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI ? sqpn : 1; break; } default: /* Don't support raw QPs */ return -EOPNOTSUPP; } return 0; } int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct ib_device *device = ibqp->device; struct mlx4_ib_dev *dev = to_mdev(device); struct mlx4_ib_qp *qp = to_mqp(ibqp); struct ib_pd *pd = ibqp->pd; int ret; mutex_init(&qp->mutex); ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata); if (ret) return ret; if (init_attr->qp_type == IB_QPT_GSI && !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) { struct mlx4_ib_sqp *sqp = qp->sqp; int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num); if (is_eth && dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) { init_attr->create_flags |= MLX4_IB_QP_CREATE_ROCE_V2_GSI; sqp->roce_v2_gsi = ib_create_qp(pd, init_attr); if (IS_ERR(sqp->roce_v2_gsi)) { pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi)); sqp->roce_v2_gsi = NULL; } else { to_mqp(sqp->roce_v2_gsi)->flags |= MLX4_IB_ROCE_V2_GSI_QP; } init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI; } } return 0; } static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(qp->device); struct mlx4_ib_qp *mqp = to_mqp(qp); if (is_qp0(dev, mqp)) mlx4_CLOSE_PORT(dev->dev, mqp->port); if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI && dev->qp1_proxy[mqp->port - 1] == mqp) { mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); dev->qp1_proxy[mqp->port - 1] = NULL; mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); } if (mqp->counter_index) mlx4_ib_free_qp_counter(dev, mqp); if (qp->rwq_ind_tbl) { destroy_qp_rss(dev, mqp); } else { destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata); } kfree(mqp->sqp); return 0; } int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) { struct mlx4_ib_qp *mqp = to_mqp(qp); if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { struct mlx4_ib_sqp *sqp = mqp->sqp; if (sqp->roce_v2_gsi) ib_destroy_qp(sqp->roce_v2_gsi); } return _mlx4_ib_destroy_qp(qp, udata); } static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type) { switch (type) { case MLX4_IB_QPT_RC: return MLX4_QP_ST_RC; case MLX4_IB_QPT_UC: return MLX4_QP_ST_UC; case MLX4_IB_QPT_UD: return MLX4_QP_ST_UD; case MLX4_IB_QPT_XRC_INI: case MLX4_IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: case MLX4_IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX; case MLX4_IB_QPT_PROXY_SMI_OWNER: case MLX4_IB_QPT_TUN_SMI_OWNER: return (mlx4_is_mfunc(dev->dev) ? MLX4_QP_ST_MLX : -1); case MLX4_IB_QPT_PROXY_SMI: case MLX4_IB_QPT_TUN_SMI: case MLX4_IB_QPT_PROXY_GSI: case MLX4_IB_QPT_TUN_GSI: return (mlx4_is_mfunc(dev->dev) ? MLX4_QP_ST_UD : -1); default: return -1; } } static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) { u8 dest_rd_atomic; u32 access_flags; u32 hw_access_flags = 0; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) dest_rd_atomic = attr->max_dest_rd_atomic; else dest_rd_atomic = qp->resp_depth; if (attr_mask & IB_QP_ACCESS_FLAGS) access_flags = attr->qp_access_flags; else access_flags = qp->atomic_rd_en; if (!dest_rd_atomic) access_flags &= IB_ACCESS_REMOTE_WRITE; if (access_flags & IB_ACCESS_REMOTE_READ) hw_access_flags |= MLX4_QP_BIT_RRE; if (access_flags & IB_ACCESS_REMOTE_ATOMIC) hw_access_flags |= MLX4_QP_BIT_RAE; if (access_flags & IB_ACCESS_REMOTE_WRITE) hw_access_flags |= MLX4_QP_BIT_RWE; return cpu_to_be32(hw_access_flags); } static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr, int attr_mask) { if (attr_mask & IB_QP_PKEY_INDEX) sqp->pkey_index = attr->pkey_index; if (attr_mask & IB_QP_QKEY) sqp->qkey = attr->qkey; if (attr_mask & IB_QP_SQ_PSN) sqp->send_psn = attr->sq_psn; } static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port) { path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); } static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct rdma_ah_attr *ah, u64 smac, u16 vlan_tag, struct mlx4_qp_path *path, struct mlx4_roce_smac_vlan_info *smac_info, u8 port) { int vidx; int smac_index; int err; path->grh_mylmc = rdma_ah_get_path_bits(ah) & 0x7f; path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah)); if (rdma_ah_get_static_rate(ah)) { path->static_rate = rdma_ah_get_static_rate(ah) + MLX4_STAT_RATE_OFFSET; while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && !(1 << path->static_rate & dev->dev->caps.stat_rate_support)) --path->static_rate; } else path->static_rate = 0; if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) { const struct ib_global_route *grh = rdma_ah_read_grh(ah); int real_sgid_index = mlx4_ib_gid_index_to_real_index(dev, grh->sgid_attr); if (real_sgid_index < 0) return real_sgid_index; if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) { pr_err("sgid_index (%u) too large. max is %d\n", real_sgid_index, dev->dev->caps.gid_table_len[port] - 1); return -1; } path->grh_mylmc |= 1 << 7; path->mgid_index = real_sgid_index; path->hop_limit = grh->hop_limit; path->tclass_flowlabel = cpu_to_be32((grh->traffic_class << 20) | (grh->flow_label)); memcpy(path->rgid, grh->dgid.raw, 16); } if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) { if (!(rdma_ah_get_ah_flags(ah) & IB_AH_GRH)) return -1; path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 7) << 3); path->feup |= MLX4_FEUP_FORCE_ETH_UP; if (vlan_tag < 0x1000) { if (smac_info->vid < 0x1000) { /* both valid vlan ids */ if (smac_info->vid != vlan_tag) { /* different VIDs. unreg old and reg new */ err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); if (err) return err; smac_info->candidate_vid = vlan_tag; smac_info->candidate_vlan_index = vidx; smac_info->candidate_vlan_port = port; smac_info->update_vid = 1; path->vlan_index = vidx; } else { path->vlan_index = smac_info->vlan_index; } } else { /* no current vlan tag in qp */ err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); if (err) return err; smac_info->candidate_vid = vlan_tag; smac_info->candidate_vlan_index = vidx; smac_info->candidate_vlan_port = port; smac_info->update_vid = 1; path->vlan_index = vidx; } path->feup |= MLX4_FVL_FORCE_ETH_VLAN; path->fl = 1 << 6; } else { /* have current vlan tag. unregister it at modify-qp success */ if (smac_info->vid < 0x1000) { smac_info->candidate_vid = 0xFFFF; smac_info->update_vid = 1; } } /* get smac_index for RoCE use. * If no smac was yet assigned, register one. * If one was already assigned, but the new mac differs, * unregister the old one and register the new one. */ if ((!smac_info->smac && !smac_info->smac_port) || smac_info->smac != smac) { /* register candidate now, unreg if needed, after success */ smac_index = mlx4_register_mac(dev->dev, port, smac); if (smac_index >= 0) { smac_info->candidate_smac_index = smac_index; smac_info->candidate_smac = smac; smac_info->candidate_smac_port = port; } else { return -EINVAL; } } else { smac_index = smac_info->smac_index; } memcpy(path->dmac, ah->roce.dmac, 6); path->ackto = MLX4_IB_LINK_TYPE_ETH; /* put MAC table smac index for IBoE */ path->grh_mylmc = (u8) (smac_index) | 0x80; } else { path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 0xf) << 2); } return 0; } static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port, u16 vlan_id, u8 *smac) { return _mlx4_set_path(dev, &qp->ah_attr, ether_addr_to_u64(smac), vlan_id, path, &mqp->pri, port); } static int mlx4_set_alt_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port) { return _mlx4_set_path(dev, &qp->alt_ah_attr, 0, 0xffff, path, &mqp->alt, port); } static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { struct mlx4_ib_gid_entry *ge, *tmp; list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { ge->added = 1; ge->port = qp->port; } } } static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_qp_context *context) { u64 u64_mac; int smac_index; u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); if (!qp->pri.smac && !qp->pri.smac_port) { smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); if (smac_index >= 0) { qp->pri.candidate_smac_index = smac_index; qp->pri.candidate_smac = u64_mac; qp->pri.candidate_smac_port = qp->port; context->pri_path.grh_mylmc = 0x80 | (u8) smac_index; } else { return -ENOENT; } } return 0; } static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { struct counter_index *new_counter_index; int err; u32 tmp_idx; if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) != IB_LINK_LAYER_ETHERNET || !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) || !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK)) return 0; err = mlx4_counter_alloc(dev->dev, &tmp_idx, MLX4_RES_USAGE_DRIVER); if (err) return err; new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL); if (!new_counter_index) { mlx4_counter_free(dev->dev, tmp_idx); return -ENOMEM; } new_counter_index->index = tmp_idx; new_counter_index->allocated = 1; qp->counter_index = new_counter_index; mutex_lock(&dev->counters_table[qp->port - 1].mutex); list_add_tail(&new_counter_index->list, &dev->counters_table[qp->port - 1].counters_list); mutex_unlock(&dev->counters_table[qp->port - 1].mutex); return 0; } enum { MLX4_QPC_ROCE_MODE_1 = 0, MLX4_QPC_ROCE_MODE_2 = 2, MLX4_QPC_ROCE_MODE_UNDEFINED = 0xff }; static u8 gid_type_to_qpc(enum ib_gid_type gid_type) { switch (gid_type) { case IB_GID_TYPE_ROCE: return MLX4_QPC_ROCE_MODE_1; case IB_GID_TYPE_ROCE_UDP_ENCAP: return MLX4_QPC_ROCE_MODE_2; default: return MLX4_QPC_ROCE_MODE_UNDEFINED; } } /* * Go over all RSS QP's childes (WQs) and apply their HW state according to * their logic state if the RSS QP is the first RSS QP associated for the WQ. */ static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num, struct ib_udata *udata) { int err = 0; int i; for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) { struct ib_wq *ibwq = ind_tbl->ind_tbl[i]; struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); mutex_lock(&wq->mutex); /* Mlx4_ib restrictions: * WQ's is associated to a port according to the RSS QP it is * associates to. * In case the WQ is associated to a different port by another * RSS QP, return a failure. */ if ((wq->rss_usecnt > 0) && (wq->port != port_num)) { err = -EINVAL; mutex_unlock(&wq->mutex); break; } wq->port = port_num; if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) { err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY, udata); if (err) { mutex_unlock(&wq->mutex); break; } } wq->rss_usecnt++; mutex_unlock(&wq->mutex); } if (i && err) { int j; for (j = (i - 1); j >= 0; j--) { struct ib_wq *ibwq = ind_tbl->ind_tbl[j]; struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); mutex_lock(&wq->mutex); if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY)) if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET, udata)) pr_warn("failed to reverse WQN=0x%06x\n", ibwq->wq_num); wq->rss_usecnt--; mutex_unlock(&wq->mutex); } } return err; } static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, struct ib_udata *udata) { int i; for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) { struct ib_wq *ibwq = ind_tbl->ind_tbl[i]; struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); mutex_lock(&wq->mutex); if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY)) if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET, udata)) pr_warn("failed to reverse WQN=%x\n", ibwq->wq_num); wq->rss_usecnt--; mutex_unlock(&wq->mutex); } } static void fill_qp_rss_context(struct mlx4_qp_context *context, struct mlx4_ib_qp *qp) { struct mlx4_rss_context *rss_context; rss_context = (void *)context + offsetof(struct mlx4_qp_context, pri_path) + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz); rss_context->default_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff); if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6)) rss_context->base_qpn_udp = rss_context->default_qpn; rss_context->flags = qp->rss_ctx->flags; /* Currently support just toeplitz */ rss_context->hash_fn = MLX4_RSS_HASH_TOP; memcpy(rss_context->rss_key, qp->rss_ctx->rss_key, MLX4_EN_RSS_KEY_SIZE); } static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state, struct ib_udata *udata) { struct ib_srq *ibsrq; const struct ib_gid_attr *gid_attr = NULL; struct ib_rwq_ind_table *rwq_ind_tbl; enum ib_qp_type qp_type; struct mlx4_ib_dev *dev; struct mlx4_ib_qp *qp; struct mlx4_ib_pd *pd; struct mlx4_ib_cq *send_cq, *recv_cq; struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext); struct mlx4_qp_context *context; enum mlx4_qp_optpar optpar = 0; int sqd_event; int steer_qp = 0; int err = -EINVAL; int counter_index; if (src_type == MLX4_IB_RWQ_SRC) { struct ib_wq *ibwq; ibwq = (struct ib_wq *)src; ibsrq = NULL; rwq_ind_tbl = NULL; qp_type = IB_QPT_RAW_PACKET; qp = to_mqp((struct ib_qp *)ibwq); dev = to_mdev(ibwq->device); pd = to_mpd(ibwq->pd); } else { struct ib_qp *ibqp; ibqp = (struct ib_qp *)src; ibsrq = ibqp->srq; rwq_ind_tbl = ibqp->rwq_ind_tbl; qp_type = ibqp->qp_type; qp = to_mqp(ibqp); dev = to_mdev(ibqp->device); pd = get_pd(qp); } /* APM is not supported under RoCE */ if (attr_mask & IB_QP_ALT_PATH && rdma_port_get_link_layer(&dev->ib_dev, qp->port) == IB_LINK_LAYER_ETHERNET) return -ENOTSUPP; context = kzalloc(sizeof *context, GFP_KERNEL); if (!context) return -ENOMEM; context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); if (!(attr_mask & IB_QP_PATH_MIG_STATE)) context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); else { optpar |= MLX4_QP_OPTPAR_PM_STATE; switch (attr->path_mig_state) { case IB_MIG_MIGRATED: context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); break; case IB_MIG_REARM: context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11); break; case IB_MIG_ARMED: context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11); break; } } if (qp->inl_recv_sz) context->param3 |= cpu_to_be32(1 << 25); if (qp->flags & MLX4_IB_QP_SCATTER_FCS) context->param3 |= cpu_to_be32(1 << 29); if (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI) context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; else if (qp_type == IB_QPT_RAW_PACKET) context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX; else if (qp_type == IB_QPT_UD) { if (qp->flags & MLX4_IB_QP_LSO) context->mtu_msgmax = (IB_MTU_4096 << 5) | ilog2(dev->dev->caps.max_gso_sz); else context->mtu_msgmax = (IB_MTU_4096 << 5) | 13; } else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { pr_err("path MTU (%u) is invalid\n", attr->path_mtu); goto out; } context->mtu_msgmax = (attr->path_mtu << 5) | ilog2(dev->dev->caps.max_msg_sz); } if (!rwq_ind_tbl) { /* PRM RSS receive side should be left zeros */ if (qp->rq.wqe_cnt) context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; context->rq_size_stride |= qp->rq.wqe_shift - 4; } if (qp->sq.wqe_cnt) context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; context->sq_size_stride |= qp->sq.wqe_shift - 4; if (new_state == IB_QPS_RESET && qp->counter_index) mlx4_ib_free_qp_counter(dev, qp); if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { context->sq_size_stride |= !!qp->sq_no_prefetch << 7; context->xrcd = cpu_to_be32((u32) qp->xrcdn); if (qp_type == IB_QPT_RAW_PACKET) context->param3 |= cpu_to_be32(1 << 30); } if (ucontext) context->usr_page = cpu_to_be32( mlx4_to_hw_uar_index(dev->dev, ucontext->uar.index)); else context->usr_page = cpu_to_be32( mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index)); if (attr_mask & IB_QP_DEST_QPN) context->remote_qpn = cpu_to_be32(attr->dest_qp_num); if (attr_mask & IB_QP_PORT) { if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD && !(attr_mask & IB_QP_AV)) { mlx4_set_sched(&context->pri_path, attr->port_num); optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE; } } if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { err = create_qp_lb_counter(dev, qp); if (err) goto out; counter_index = dev->counters_table[qp->port - 1].default_counter; if (qp->counter_index) counter_index = qp->counter_index->index; if (counter_index != -1) { context->pri_path.counter_index = counter_index; optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX; if (qp->counter_index) { context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB; context->pri_path.vlan_control |= MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER; } } else context->pri_path.counter_index = MLX4_SINK_COUNTER_INDEX(dev->dev); if (qp->flags & MLX4_IB_QP_NETIF) { mlx4_ib_steer_qp_reg(dev, qp, 1); steer_qp = 1; } if (qp_type == IB_QPT_GSI) { enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ? IB_GID_TYPE_ROCE_UDP_ENCAP : IB_GID_TYPE_ROCE; u8 qpc_roce_mode = gid_type_to_qpc(gid_type); context->rlkey_roce_mode |= (qpc_roce_mode << 6); } } if (attr_mask & IB_QP_PKEY_INDEX) { if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) context->pri_path.disable_pkey_check = 0x40; context->pri_path.pkey_index = attr->pkey_index; optpar |= MLX4_QP_OPTPAR_PKEY_INDEX; } if (attr_mask & IB_QP_AV) { u8 port_num = mlx4_is_bonded(dev->dev) ? 1 : attr_mask & IB_QP_PORT ? attr->port_num : qp->port; u16 vlan = 0xffff; u8 smac[ETH_ALEN]; int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) && rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; if (is_eth) { gid_attr = attr->ah_attr.grh.sgid_attr; err = rdma_read_gid_l2_fields(gid_attr, &vlan, &smac[0]); if (err) goto out; } if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, port_num, vlan, smac)) goto out; optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE); if (is_eth && (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)) { u8 qpc_roce_mode = gid_type_to_qpc(gid_attr->gid_type); if (qpc_roce_mode == MLX4_QPC_ROCE_MODE_UNDEFINED) { err = -EINVAL; goto out; } context->rlkey_roce_mode |= (qpc_roce_mode << 6); } } if (attr_mask & IB_QP_TIMEOUT) { context->pri_path.ackto |= attr->timeout << 3; optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT; } if (attr_mask & IB_QP_ALT_PATH) { if (attr->alt_port_num == 0 || attr->alt_port_num > dev->dev->caps.num_ports) goto out; if (attr->alt_pkey_index >= dev->dev->caps.pkey_table_len[attr->alt_port_num]) goto out; if (mlx4_set_alt_path(dev, attr, attr_mask, qp, &context->alt_path, attr->alt_port_num)) goto out; context->alt_path.pkey_index = attr->alt_pkey_index; context->alt_path.ackto = attr->alt_timeout << 3; optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; } context->pd = cpu_to_be32(pd->pdn); if (!rwq_ind_tbl) { context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); get_cqs(qp, src_type, &send_cq, &recv_cq); } else { /* Set dummy CQs to be compatible with HV and PRM */ send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq); recv_cq = send_cq; } context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); /* Set "fast registration enabled" for all kernel QPs */ if (!ucontext) context->params1 |= cpu_to_be32(1 << 11); if (attr_mask & IB_QP_RNR_RETRY) { context->params1 |= cpu_to_be32(attr->rnr_retry << 13); optpar |= MLX4_QP_OPTPAR_RNR_RETRY; } if (attr_mask & IB_QP_RETRY_CNT) { context->params1 |= cpu_to_be32(attr->retry_cnt << 16); optpar |= MLX4_QP_OPTPAR_RETRY_COUNT; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic) context->params1 |= cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); optpar |= MLX4_QP_OPTPAR_SRA_MAX; } if (attr_mask & IB_QP_SQ_PSN) context->next_send_psn = cpu_to_be32(attr->sq_psn); if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr->max_dest_rd_atomic) context->params2 |= cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); optpar |= MLX4_QP_OPTPAR_RRA_MAX; } if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE; } if (ibsrq) context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC); if (attr_mask & IB_QP_MIN_RNR_TIMER) { context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT; } if (attr_mask & IB_QP_RQ_PSN) context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */ if (attr_mask & IB_QP_QKEY) { if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) context->qkey = cpu_to_be32(IB_QP_SET_QKEY); else { if (mlx4_is_mfunc(dev->dev) && !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && (attr->qkey & MLX4_RESERVED_QKEY_MASK) == MLX4_RESERVED_QKEY_BASE) { pr_err("Cannot use reserved QKEY" " 0x%x (range 0xffff0000..0xffffffff" " is reserved)\n", attr->qkey); err = -EINVAL; goto out; } context->qkey = cpu_to_be32(attr->qkey); } optpar |= MLX4_QP_OPTPAR_Q_KEY; } if (ibsrq) context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibsrq)->msrq.srqn); if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->db_rec_addr = cpu_to_be64(qp->db.dma); if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR && (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI || qp_type == IB_QPT_UD || qp_type == IB_QPT_RAW_PACKET)) { context->pri_path.sched_queue = (qp->port - 1) << 6; if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) { context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) context->pri_path.fl = 0x80; } else { if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) context->pri_path.fl = 0x80; context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE; } if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == IB_LINK_LAYER_ETHERNET) { if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) context->pri_path.feup = 1 << 7; /* don't fsm */ /* handle smac_index */ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { err = handle_eth_ud_smac_index(dev, qp, context); if (err) { err = -EINVAL; goto out; } if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) dev->qp1_proxy[qp->port - 1] = qp; } } } if (qp_type == IB_QPT_RAW_PACKET) { context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | MLX4_IB_LINK_TYPE_ETH; if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { /* set QP to receive both tunneled & non-tunneled packets */ if (!rwq_ind_tbl) context->srqn = cpu_to_be32(7 << 28); } } if (qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { int is_eth = rdma_port_get_link_layer( &dev->ib_dev, qp->port) == IB_LINK_LAYER_ETHERNET; if (is_eth) { context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH; optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH; } } if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1; else sqd_event = 0; if (!ucontext && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->rlkey_roce_mode |= (1 << 4); /* * Before passing a kernel QP to the HW, make sure that the * ownership bits of the send queue are set and the SQ * headroom is stamped so that the hardware doesn't start * processing stale work requests. */ if (!ucontext && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { struct mlx4_wqe_ctrl_seg *ctrl; int i; for (i = 0; i < qp->sq.wqe_cnt; ++i) { ctrl = get_send_wqe(qp, i); ctrl->owner_opcode = cpu_to_be32(1 << 31); ctrl->qpn_vlan.fence_size = 1 << (qp->sq.wqe_shift - 4); stamp_send_wqe(qp, i); } } if (rwq_ind_tbl && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { fill_qp_rss_context(context, qp); context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET); } err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), to_mlx4_state(new_state), context, optpar, sqd_event, &qp->mqp); if (err) goto out; qp->state = new_state; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->atomic_rd_en = attr->qp_access_flags; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->resp_depth = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_PORT) { qp->port = attr->port_num; update_mcg_macs(dev, qp); } if (attr_mask & IB_QP_ALT_PATH) qp->alt_port = attr->alt_port_num; if (is_sqp(dev, qp)) store_sqp_attrs(qp->sqp, attr, attr_mask); /* * If we moved QP0 to RTR, bring the IB link up; if we moved * QP0 to RESET or ERROR, bring the link back down. */ if (is_qp0(dev, qp)) { if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) if (mlx4_INIT_PORT(dev->dev, qp->port)) pr_warn("INIT_PORT failed for port %d\n", qp->port); if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) mlx4_CLOSE_PORT(dev->dev, qp->port); } /* * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET) { if (!ucontext) { mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, ibsrq ? to_msrq(ibsrq) : NULL); if (send_cq != recv_cq) mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); qp->rq.head = 0; qp->rq.tail = 0; qp->sq.head = 0; qp->sq.tail = 0; qp->sq_next_wqe = 0; if (qp->rq.wqe_cnt) *qp->db.db = 0; if (qp->flags & MLX4_IB_QP_NETIF) mlx4_ib_steer_qp_reg(dev, qp, 0); } if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); qp->pri.smac = 0; qp->pri.smac_port = 0; } if (qp->alt.smac) { mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); qp->alt.smac = 0; } if (qp->pri.vid < 0x1000) { mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); qp->pri.vid = 0xFFFF; qp->pri.candidate_vid = 0xFFFF; qp->pri.update_vid = 0; } if (qp->alt.vid < 0x1000) { mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); qp->alt.vid = 0xFFFF; qp->alt.candidate_vid = 0xFFFF; qp->alt.update_vid = 0; } } out: if (err && qp->counter_index) mlx4_ib_free_qp_counter(dev, qp); if (err && steer_qp) mlx4_ib_steer_qp_reg(dev, qp, 0); kfree(context); if (qp->pri.candidate_smac || (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { if (err) { mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); } else { if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); qp->pri.smac = qp->pri.candidate_smac; qp->pri.smac_index = qp->pri.candidate_smac_index; qp->pri.smac_port = qp->pri.candidate_smac_port; } qp->pri.candidate_smac = 0; qp->pri.candidate_smac_index = 0; qp->pri.candidate_smac_port = 0; } if (qp->alt.candidate_smac) { if (err) { mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); } else { if (qp->alt.smac) mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); qp->alt.smac = qp->alt.candidate_smac; qp->alt.smac_index = qp->alt.candidate_smac_index; qp->alt.smac_port = qp->alt.candidate_smac_port; } qp->alt.candidate_smac = 0; qp->alt.candidate_smac_index = 0; qp->alt.candidate_smac_port = 0; } if (qp->pri.update_vid) { if (err) { if (qp->pri.candidate_vid < 0x1000) mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, qp->pri.candidate_vid); } else { if (qp->pri.vid < 0x1000) mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); qp->pri.vid = qp->pri.candidate_vid; qp->pri.vlan_port = qp->pri.candidate_vlan_port; qp->pri.vlan_index = qp->pri.candidate_vlan_index; } qp->pri.candidate_vid = 0xFFFF; qp->pri.update_vid = 0; } if (qp->alt.update_vid) { if (err) { if (qp->alt.candidate_vid < 0x1000) mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, qp->alt.candidate_vid); } else { if (qp->alt.vid < 0x1000) mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); qp->alt.vid = qp->alt.candidate_vid; qp->alt.vlan_port = qp->alt.candidate_vlan_port; qp->alt.vlan_index = qp->alt.candidate_vlan_index; } qp->alt.candidate_vid = 0xFFFF; qp->alt.update_vid = 0; } return err; } enum { MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK = (IB_QP_STATE | IB_QP_PORT), }; static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { pr_debug("qpn 0x%x: invalid attribute mask specified " "for transition %d to %d. qp_type %d," " attr_mask 0x%x\n", ibqp->qp_num, cur_state, new_state, ibqp->qp_type, attr_mask); goto out; } if (ibqp->rwq_ind_tbl) { if (!(((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) || ((cur_state == IB_QPS_INIT) && (new_state == IB_QPS_RTR)))) { pr_debug("qpn 0x%x: RSS QP unsupported transition %d to %d\n", ibqp->qp_num, cur_state, new_state); err = -EOPNOTSUPP; goto out; } if (attr_mask & ~MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK) { pr_debug("qpn 0x%x: RSS QP unsupported attribute mask 0x%x for transition %d to %d\n", ibqp->qp_num, attr_mask, cur_state, new_state); err = -EOPNOTSUPP; goto out; } } if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) { if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) { if ((ibqp->qp_type == IB_QPT_RC) || (ibqp->qp_type == IB_QPT_UD) || (ibqp->qp_type == IB_QPT_UC) || (ibqp->qp_type == IB_QPT_RAW_PACKET) || (ibqp->qp_type == IB_QPT_XRC_INI)) { attr->port_num = mlx4_ib_bond_next_port(dev); } } else { /* no sense in changing port_num * when ports are bonded */ attr_mask &= ~IB_QP_PORT; } } if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->num_ports)) { pr_debug("qpn 0x%x: invalid port number (%d) specified " "for transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->port_num, cur_state, new_state, ibqp->qp_type); goto out; } if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) && (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) != IB_LINK_LAYER_ETHERNET)) goto out; if (attr_mask & IB_QP_PKEY_INDEX) { int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) { pr_debug("qpn 0x%x: invalid pkey index (%d) specified " "for transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->pkey_index, cur_state, new_state, ibqp->qp_type); goto out; } } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. " "Transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->max_rd_atomic, cur_state, new_state, ibqp->qp_type); goto out; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. " "Transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->max_dest_rd_atomic, cur_state, new_state, ibqp->qp_type); goto out; } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; goto out; } if (ibqp->rwq_ind_tbl && (new_state == IB_QPS_INIT)) { err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num, udata); if (err) goto out; } err = __mlx4_ib_modify_qp(ibqp, MLX4_IB_QP_SRC, attr, attr_mask, cur_state, new_state, udata); if (ibqp->rwq_ind_tbl && err) bring_down_rss_rwqs(ibqp->rwq_ind_tbl, udata); if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) attr->port_num = 1; out: mutex_unlock(&qp->mutex); return err; } int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct mlx4_ib_qp *mqp = to_mqp(ibqp); int ret; if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata); if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { struct mlx4_ib_sqp *sqp = mqp->sqp; int err = 0; if (sqp->roce_v2_gsi) err = ib_modify_qp(sqp->roce_v2_gsi, attr, attr_mask); if (err) pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n", err); } return ret; } static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey) { int i; for (i = 0; i < dev->caps.num_ports; i++) { if (qpn == dev->caps.spec_qps[i].qp0_proxy || qpn == dev->caps.spec_qps[i].qp0_tunnel) { *qkey = dev->caps.spec_qps[i].qp0_qkey; return 0; } } return -EINVAL; } static int build_sriov_qp0_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device); struct mlx4_ib_sqp *sqp = qp->sqp; struct ib_device *ib_dev = qp->ibqp.device; struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->ah); u16 pkey; u32 qkey; int send_size; int header_size; int spc; int err; int i; if (wr->wr.opcode != IB_WR_SEND) return -EINVAL; send_size = 0; for (i = 0; i < wr->wr.num_sge; ++i) send_size += wr->wr.sg_list[i].length; /* for proxy-qp0 sends, need to add in size of tunnel header */ /* for tunnel-qp0 sends, tunnel header is already in s/g list */ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) send_size += sizeof (struct mlx4_ib_tunnel_header); ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header); if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { sqp->ud_header.lrh.service_level = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; sqp->ud_header.lrh.destination_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); } mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); /* force loopback */ mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR); mlx->rlid = sqp->ud_header.lrh.destination_lid; sqp->ud_header.lrh.virtual_lane = 0; sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey); if (err) return err; sqp->ud_header.bth.pkey = cpu_to_be16(pkey); if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); else sqp->ud_header.bth.destination_qpn = cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); if (mlx4_is_master(mdev->dev)) { if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey)) return -EINVAL; } else { if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey)) return -EINVAL; } sqp->ud_header.deth.qkey = cpu_to_be32(qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn); sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); /* * Inline data segments may not cross a 64 byte boundary. If * our UD header is bigger than the space available up to the * next 64 byte boundary in the WQE, use two inline data * segments to hold the UD header. */ spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); if (header_size <= spc) { inl->byte_count = cpu_to_be32(1 << 31 | header_size); memcpy(inl + 1, sqp->header_buf, header_size); i = 1; } else { inl->byte_count = cpu_to_be32(1 << 31 | spc); memcpy(inl + 1, sqp->header_buf, spc); inl = (void *) (inl + 1) + spc; memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); /* * Need a barrier here to make sure all the data is * visible before the byte_count field is set. * Otherwise the HCA prefetcher could grab the 64-byte * chunk with this inline segment and get a valid (!= * 0xffffffff) byte count but stale data, and end up * generating a packet with bad headers. * * The first inline segment's byte_count field doesn't * need a barrier, because it comes after a * control/MLX segment and therefore is at an offset * of 16 mod 64. */ wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); i = 2; } *mlx_seg_len = ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); return 0; } static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num) { union sl2vl_tbl_to_u64 tmp_vltab; u8 vl; if (sl > 15) return 0xf; tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]); vl = tmp_vltab.sl8[sl >> 1]; if (sl & 1) vl &= 0x0f; else vl >>= 4; return vl; } static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num, int index, union ib_gid *gid, enum ib_gid_type *gid_type) { struct mlx4_ib_iboe *iboe = &ibdev->iboe; struct mlx4_port_gid_table *port_gid_table; unsigned long flags; port_gid_table = &iboe->gids[port_num - 1]; spin_lock_irqsave(&iboe->lock, flags); memcpy(gid, &port_gid_table->gids[index].gid, sizeof(*gid)); *gid_type = port_gid_table->gids[index].gid_type; spin_unlock_irqrestore(&iboe->lock, flags); if (rdma_is_zero_gid(gid)) return -ENOENT; return 0; } #define MLX4_ROCEV2_QP1_SPORT 0xC000 static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct mlx4_ib_sqp *sqp = qp->sqp; struct ib_device *ib_dev = qp->ibqp.device; struct mlx4_ib_dev *ibdev = to_mdev(ib_dev); struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_ctrl_seg *ctrl = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->ah); union ib_gid sgid; u16 pkey; int send_size; int header_size; int spc; int i; int err = 0; u16 vlan = 0xffff; bool is_eth; bool is_vlan = false; bool is_grh; bool is_udp = false; int ip_version = 0; send_size = 0; for (i = 0; i < wr->wr.num_sge; ++i) send_size += wr->wr.sg_list[i].length; is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET; is_grh = mlx4_ib_ah_grh_present(ah); if (is_eth) { enum ib_gid_type gid_type; if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { /* When multi-function is enabled, the ib_core gid * indexes don't necessarily match the hw ones, so * we must use our own cache */ err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, ah->av.ib.gid_index, &sgid.raw[0]); if (err) return err; } else { err = fill_gid_by_hw_index(ibdev, qp->port, ah->av.ib.gid_index, &sgid, &gid_type); if (!err) { is_udp = gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; if (is_udp) { if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) ip_version = 4; else ip_version = 6; is_grh = false; } } else { return err; } } if (ah->av.eth.vlan != cpu_to_be16(0xffff)) { vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff; is_vlan = true; } } err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, ip_version, is_udp, 0, &sqp->ud_header); if (err) return err; if (!is_eth) { sqp->ud_header.lrh.service_level = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid; sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); } if (is_grh || (ip_version == 6)) { sqp->ud_header.grh.traffic_class = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; sqp->ud_header.grh.flow_label = ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; if (is_eth) { memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); } else { if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { /* When multi-function is enabled, the ib_core gid * indexes don't necessarily match the hw ones, so * we must use our own cache */ sqp->ud_header.grh.source_gid.global .subnet_prefix = cpu_to_be64(atomic64_read( &(to_mdev(ib_dev) ->sriov .demux[qp->port - 1] .subnet_prefix))); sqp->ud_header.grh.source_gid.global .interface_id = to_mdev(ib_dev) ->sriov.demux[qp->port - 1] .guid_cache[ah->av.ib.gid_index]; } else { sqp->ud_header.grh.source_gid = ah->ibah.sgid_attr->gid; } } memcpy(sqp->ud_header.grh.destination_gid.raw, ah->av.ib.dgid, 16); } if (ip_version == 4) { sqp->ud_header.ip4.tos = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; sqp->ud_header.ip4.id = 0; sqp->ud_header.ip4.frag_off = htons(IP_DF); sqp->ud_header.ip4.ttl = ah->av.eth.hop_limit; memcpy(&sqp->ud_header.ip4.saddr, sgid.raw + 12, 4); memcpy(&sqp->ud_header.ip4.daddr, ah->av.ib.dgid + 12, 4); sqp->ud_header.ip4.check = ib_ud_ip4_csum(&sqp->ud_header); } if (is_udp) { sqp->ud_header.udp.dport = htons(ROCE_V2_UDP_DPORT); sqp->ud_header.udp.sport = htons(MLX4_ROCEV2_QP1_SPORT); sqp->ud_header.udp.csum = 0; } mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); if (!is_eth) { mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) | (sqp->ud_header.lrh.service_level << 8)); if (ah->av.ib.port_pd & cpu_to_be32(0x80000000)) mlx->flags |= cpu_to_be32(0x1); /* force loopback */ mlx->rlid = sqp->ud_header.lrh.destination_lid; } switch (wr->wr.opcode) { case IB_WR_SEND: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; break; case IB_WR_SEND_WITH_IMM: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; sqp->ud_header.immediate_present = 1; sqp->ud_header.immediate_data = wr->wr.ex.imm_data; break; default: return -EINVAL; } if (is_eth) { u16 ether_type; u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; ether_type = (!is_udp) ? ETH_P_IBOE: (ip_version == 4 ? ETH_P_IP : ETH_P_IPV6); mlx->sched_prio = cpu_to_be16(pcp); ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac); ether_addr_copy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac); memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2); memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); if (!is_vlan) { sqp->ud_header.eth.type = cpu_to_be16(ether_type); } else { sqp->ud_header.vlan.type = cpu_to_be16(ether_type); sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); } } else { sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : sl_to_vl(to_mdev(ib_dev), sqp->ud_header.lrh.service_level, qp->port); if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15) return -EINVAL; if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; } sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); if (!qp->ibqp.qp_num) err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index, &pkey); else err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index, &pkey); if (err) return err; sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? sqp->qkey : wr->remote_qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); if (0) { pr_err("built UD header of size %d:\n", header_size); for (i = 0; i < header_size / 4; ++i) { if (i % 8 == 0) pr_err(" [%02x] ", i * 4); pr_cont(" %08x", be32_to_cpu(((__be32 *) sqp->header_buf)[i])); if ((i + 1) % 8 == 0) pr_cont("\n"); } pr_err("\n"); } /* * Inline data segments may not cross a 64 byte boundary. If * our UD header is bigger than the space available up to the * next 64 byte boundary in the WQE, use two inline data * segments to hold the UD header. */ spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); if (header_size <= spc) { inl->byte_count = cpu_to_be32(1 << 31 | header_size); memcpy(inl + 1, sqp->header_buf, header_size); i = 1; } else { inl->byte_count = cpu_to_be32(1 << 31 | spc); memcpy(inl + 1, sqp->header_buf, spc); inl = (void *) (inl + 1) + spc; memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); /* * Need a barrier here to make sure all the data is * visible before the byte_count field is set. * Otherwise the HCA prefetcher could grab the 64-byte * chunk with this inline segment and get a valid (!= * 0xffffffff) byte count but stale data, and end up * generating a packet with bad headers. * * The first inline segment's byte_count field doesn't * need a barrier, because it comes after a * control/MLX segment and therefore is at an offset * of 16 mod 64. */ wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); i = 2; } *mlx_seg_len = ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); return 0; } static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) { unsigned cur; struct mlx4_ib_cq *cq; cur = wq->head - wq->tail; if (likely(cur + nreq < wq->max_post)) return 0; cq = to_mcq(ib_cq); spin_lock(&cq->lock); cur = wq->head - wq->tail; spin_unlock(&cq->lock); return cur + nreq >= wq->max_post; } static __be32 convert_access(int acc) { return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) | (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) | (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); } static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg, const struct ib_reg_wr *wr) { struct mlx4_ib_mr *mr = to_mmr(wr->mr); fseg->flags = convert_access(wr->access); fseg->mem_key = cpu_to_be32(wr->key); fseg->buf_list = cpu_to_be64(mr->page_map); fseg->start_addr = cpu_to_be64(mr->ibmr.iova); fseg->reg_len = cpu_to_be64(mr->ibmr.length); fseg->offset = 0; /* XXX -- is this just for ZBVA? */ fseg->page_size = cpu_to_be32(ilog2(mr->ibmr.page_size)); fseg->reserved[0] = 0; fseg->reserved[1] = 0; } static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) { memset(iseg, 0, sizeof(*iseg)); iseg->mem_key = cpu_to_be32(rkey); } static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, u64 remote_addr, u32 rkey) { rseg->raddr = cpu_to_be64(remote_addr); rseg->rkey = cpu_to_be32(rkey); rseg->reserved = 0; } static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, const struct ib_atomic_wr *wr) { if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { aseg->swap_add = cpu_to_be64(wr->swap); aseg->compare = cpu_to_be64(wr->compare_add); } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { aseg->swap_add = cpu_to_be64(wr->compare_add); aseg->compare = cpu_to_be64(wr->compare_add_mask); } else { aseg->swap_add = cpu_to_be64(wr->compare_add); aseg->compare = 0; } } static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, const struct ib_atomic_wr *wr) { aseg->swap_add = cpu_to_be64(wr->swap); aseg->swap_add_mask = cpu_to_be64(wr->swap_mask); aseg->compare = cpu_to_be64(wr->compare_add); aseg->compare_mask = cpu_to_be64(wr->compare_add_mask); } static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, const struct ib_ud_wr *wr) { memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av)); dseg->dqpn = cpu_to_be32(wr->remote_qpn); dseg->qkey = cpu_to_be32(wr->remote_qkey); dseg->vlan = to_mah(wr->ah)->av.eth.vlan; memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6); } static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, struct mlx4_wqe_datagram_seg *dseg, const struct ib_ud_wr *wr, enum mlx4_ib_qp_type qpt) { union mlx4_ext_av *av = &to_mah(wr->ah)->av; struct mlx4_av sqp_av = {0}; int port = *((u8 *) &av->ib.port_pd) & 0x3; /* force loopback */ sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000); sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */ sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel & cpu_to_be32(0xf0000000); memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av)); if (qpt == MLX4_IB_QPT_PROXY_GSI) dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp1_tunnel); else dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp0_tunnel); /* Use QKEY from the QP context, which is set by master */ dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); } static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct mlx4_wqe_inline_seg *inl = wqe; struct mlx4_ib_tunnel_header hdr; struct mlx4_ib_ah *ah = to_mah(wr->ah); int spc; int i; memcpy(&hdr.av, &ah->av, sizeof hdr.av); hdr.remote_qpn = cpu_to_be32(wr->remote_qpn); hdr.pkey_index = cpu_to_be16(wr->pkey_index); hdr.qkey = cpu_to_be32(wr->remote_qkey); memcpy(hdr.mac, ah->av.eth.mac, 6); hdr.vlan = ah->av.eth.vlan; spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); if (sizeof (hdr) <= spc) { memcpy(inl + 1, &hdr, sizeof (hdr)); wmb(); inl->byte_count = cpu_to_be32(1 << 31 | sizeof (hdr)); i = 1; } else { memcpy(inl + 1, &hdr, spc); wmb(); inl->byte_count = cpu_to_be32(1 << 31 | spc); inl = (void *) (inl + 1) + spc; memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc); wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (sizeof (hdr) - spc)); i = 2; } *mlx_seg_len = ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16); } static void set_mlx_icrc_seg(void *dseg) { u32 *t = dseg; struct mlx4_wqe_inline_seg *iseg = dseg; t[1] = 0; /* * Need a barrier here before writing the byte_count field to * make sure that all the data is visible before the * byte_count field is set. Otherwise, if the segment begins * a new cacheline, the HCA prefetcher could grab the 64-byte * chunk and get a valid (!= * 0xffffffff) byte count but * stale data, and end up sending the wrong data. */ wmb(); iseg->byte_count = cpu_to_be32((1 << 31) | 4); } static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); /* * Need a barrier here before writing the byte_count field to * make sure that all the data is visible before the * byte_count field is set. Otherwise, if the segment begins * a new cacheline, the HCA prefetcher could grab the 64-byte * chunk and get a valid (!= * 0xffffffff) byte count but * stale data, and end up sending the wrong data. */ wmb(); dseg->byte_count = cpu_to_be32(sg->length); } static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->byte_count = cpu_to_be32(sg->length); dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); } static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) { unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16); if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) *blh = cpu_to_be32(1 << 6); if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) return -EINVAL; memcpy(wqe->header, wr->header, wr->hlen); *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen); *lso_seg_len = halign; return 0; } static __be32 send_ieth(const struct ib_send_wr *wr) { switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM: return wr->ex.imm_data; case IB_WR_SEND_WITH_INV: return cpu_to_be32(wr->ex.invalidate_rkey); default: return 0; } } static void add_zero_len_inline(void *wqe) { struct mlx4_wqe_inline_seg *inl = wqe; memset(wqe, 0, 16); inl->byte_count = cpu_to_be32(1 << 31); } static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr, bool drain) { struct mlx4_ib_qp *qp = to_mqp(ibqp); void *wqe; struct mlx4_wqe_ctrl_seg *ctrl; struct mlx4_wqe_data_seg *dseg; unsigned long flags; int nreq; int err = 0; unsigned ind; int size; unsigned seglen; __be32 dummy; __be32 *lso_wqe; __be32 lso_hdr_sz; __be32 blh; int i; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { struct mlx4_ib_sqp *sqp = qp->sqp; if (sqp->roce_v2_gsi) { struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah); enum ib_gid_type gid_type; union ib_gid gid; if (!fill_gid_by_hw_index(mdev, qp->port, ah->av.ib.gid_index, &gid, &gid_type)) qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? to_mqp(sqp->roce_v2_gsi) : qp; else pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n", ah->av.ib.gid_index); } } spin_lock_irqsave(&qp->sq.lock, flags); if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR && !drain) { err = -EIO; *bad_wr = wr; nreq = 0; goto out; } ind = qp->sq_next_wqe; for (nreq = 0; wr; ++nreq, wr = wr->next) { lso_wqe = &dummy; blh = 0; if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { err = -ENOMEM; *bad_wr = wr; goto out; } if (unlikely(wr->num_sge > qp->sq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; ctrl->srcrb_flags = (wr->send_flags & IB_SEND_SIGNALED ? cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) | (wr->send_flags & IB_SEND_SOLICITED ? cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) | ((wr->send_flags & IB_SEND_IP_CSUM) ? cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) | qp->sq_signal_bits; ctrl->imm = send_ieth(wr); wqe += sizeof *ctrl; size = sizeof *ctrl / 16; switch (qp->mlx4_ib_qp_type) { case MLX4_IB_QPT_RC: case MLX4_IB_QPT_UC: switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, atomic_wr(wr)->rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); set_atomic_seg(wqe, atomic_wr(wr)); wqe += sizeof (struct mlx4_wqe_atomic_seg); size += (sizeof (struct mlx4_wqe_raddr_seg) + sizeof (struct mlx4_wqe_atomic_seg)) / 16; break; case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, atomic_wr(wr)->rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); set_masked_atomic_seg(wqe, atomic_wr(wr)); wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); size += (sizeof (struct mlx4_wqe_raddr_seg) + sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16; break; case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); size += sizeof (struct mlx4_wqe_raddr_seg) / 16; break; case IB_WR_LOCAL_INV: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_local_inv_seg(wqe, wr->ex.invalidate_rkey); wqe += sizeof (struct mlx4_wqe_local_inval_seg); size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; break; case IB_WR_REG_MR: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_reg_seg(wqe, reg_wr(wr)); wqe += sizeof(struct mlx4_wqe_fmr_seg); size += sizeof(struct mlx4_wqe_fmr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case MLX4_IB_QPT_TUN_SMI_OWNER: err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; } wqe += seglen; size += seglen / 16; break; case MLX4_IB_QPT_TUN_SMI: case MLX4_IB_QPT_TUN_GSI: /* this is a UD qp used in MAD responses to slaves. */ set_datagram_seg(wqe, ud_wr(wr)); /* set the forced-loopback bit in the data seg av */ *(__be32 *) wqe |= cpu_to_be32(0x80000000); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; break; case MLX4_IB_QPT_UD: set_datagram_seg(wqe, ud_wr(wr)); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; if (wr->opcode == IB_WR_LSO) { err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, &lso_hdr_sz, &blh); if (unlikely(err)) { *bad_wr = wr; goto out; } lso_wqe = (__be32 *) wqe; wqe += seglen; size += seglen / 16; } break; case MLX4_IB_QPT_PROXY_SMI_OWNER: err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; } wqe += seglen; size += seglen / 16; /* to start tunnel header on a cache-line boundary */ add_zero_len_inline(wqe); wqe += 16; size++; build_tunnel_header(ud_wr(wr), wqe, &seglen); wqe += seglen; size += seglen / 16; break; case MLX4_IB_QPT_PROXY_SMI: case MLX4_IB_QPT_PROXY_GSI: /* If we are tunneling special qps, this is a UD qp. * In this case we first add a UD segment targeting * the tunnel qp, and then add a header with address * information */ set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, ud_wr(wr), qp->mlx4_ib_qp_type); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; build_tunnel_header(ud_wr(wr), wqe, &seglen); wqe += seglen; size += seglen / 16; break; case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; } wqe += seglen; size += seglen / 16; break; default: break; } /* * Write data segments in reverse order, so as to * overwrite cacheline stamp last within each * cacheline. This avoids issues with WQE * prefetching. */ dseg = wqe; dseg += wr->num_sge - 1; size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); /* Add one more inline data segment for ICRC for MLX sends */ if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) { set_mlx_icrc_seg(dseg + 1); size += sizeof (struct mlx4_wqe_data_seg) / 16; } for (i = wr->num_sge - 1; i >= 0; --i, --dseg) set_data_seg(dseg, wr->sg_list + i); /* * Possibly overwrite stamping in cacheline with LSO * segment only after making sure all data segments * are written. */ wmb(); *lso_wqe = lso_hdr_sz; ctrl->qpn_vlan.fence_size = (wr->send_flags & IB_SEND_FENCE ? MLX4_WQE_CTRL_FENCE : 0) | size; /* * Make sure descriptor is fully written before * setting ownership bit (because HW can start * executing as soon as we do). */ wmb(); if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { *bad_wr = wr; err = -EINVAL; goto out; } ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; /* * We can improve latency by not stamping the last * send queue WQE until after ringing the doorbell, so * only stamp here if there are still more WQEs to post. */ if (wr->next) stamp_send_wqe(qp, ind + qp->sq_spare_wqes); ind++; } out: if (likely(nreq)) { qp->sq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); writel_relaxed(qp->doorbell_qpn, to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL); stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1); qp->sq_next_wqe = ind; } spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { return _mlx4_ib_post_send(ibqp, wr, bad_wr, false); } static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr, bool drain) { struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_wqe_data_seg *scat; unsigned long flags; int err = 0; int nreq; int ind; int max_gs; int i; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); max_gs = qp->rq.max_gs; spin_lock_irqsave(&qp->rq.lock, flags); if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR && !drain) { err = -EIO; *bad_wr = wr; nreq = 0; goto out; } ind = qp->rq.head & (qp->rq.wqe_cnt - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { err = -ENOMEM; *bad_wr = wr; goto out; } if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } scat = get_recv_wqe(qp, ind); if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { ib_dma_sync_single_for_device(ibqp->device, qp->sqp_proxy_rcv[ind].map, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); scat->byte_count = cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr)); /* use dma lkey from upper layer entry */ scat->lkey = cpu_to_be32(wr->sg_list->lkey); scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); scat++; max_gs--; } for (i = 0; i < wr->num_sge; ++i) __set_data_seg(scat + i, wr->sg_list + i); if (i < max_gs) { scat[i].byte_count = 0; scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); scat[i].addr = 0; } qp->rq.wrid[ind] = wr->wr_id; ind = (ind + 1) & (qp->rq.wqe_cnt - 1); } out: if (likely(nreq)) { qp->rq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); } spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { return _mlx4_ib_post_recv(ibqp, wr, bad_wr, false); } static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state) { switch (mlx4_state) { case MLX4_QP_STATE_RST: return IB_QPS_RESET; case MLX4_QP_STATE_INIT: return IB_QPS_INIT; case MLX4_QP_STATE_RTR: return IB_QPS_RTR; case MLX4_QP_STATE_RTS: return IB_QPS_RTS; case MLX4_QP_STATE_SQ_DRAINING: case MLX4_QP_STATE_SQD: return IB_QPS_SQD; case MLX4_QP_STATE_SQER: return IB_QPS_SQE; case MLX4_QP_STATE_ERR: return IB_QPS_ERR; default: return -1; } } static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state) { switch (mlx4_mig_state) { case MLX4_QP_PM_ARMED: return IB_MIG_ARMED; case MLX4_QP_PM_REARM: return IB_MIG_REARM; case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED; default: return -1; } } static int to_ib_qp_access_flags(int mlx4_flags) { int ib_flags = 0; if (mlx4_flags & MLX4_QP_BIT_RRE) ib_flags |= IB_ACCESS_REMOTE_READ; if (mlx4_flags & MLX4_QP_BIT_RWE) ib_flags |= IB_ACCESS_REMOTE_WRITE; if (mlx4_flags & MLX4_QP_BIT_RAE) ib_flags |= IB_ACCESS_REMOTE_ATOMIC; return ib_flags; } static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev, struct rdma_ah_attr *ah_attr, struct mlx4_qp_path *path) { struct mlx4_dev *dev = ibdev->dev; u8 port_num = path->sched_queue & 0x40 ? 2 : 1; memset(ah_attr, 0, sizeof(*ah_attr)); if (port_num == 0 || port_num > dev->caps.num_ports) return; ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num); if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) | ((path->sched_queue & 4) << 1)); else rdma_ah_set_sl(ah_attr, (path->sched_queue >> 2) & 0xf); rdma_ah_set_port_num(ah_attr, port_num); rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid)); rdma_ah_set_path_bits(ah_attr, path->grh_mylmc & 0x7f); rdma_ah_set_static_rate(ah_attr, path->static_rate ? path->static_rate - 5 : 0); if (path->grh_mylmc & (1 << 7)) { rdma_ah_set_grh(ah_attr, NULL, be32_to_cpu(path->tclass_flowlabel) & 0xfffff, path->mgid_index, path->hop_limit, (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff); rdma_ah_set_dgid_raw(ah_attr, path->rgid); } } int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_qp_context context; int mlx4_state; int err = 0; if (ibqp->rwq_ind_tbl) return -EOPNOTSUPP; mutex_lock(&qp->mutex); if (qp->state == IB_QPS_RESET) { qp_attr->qp_state = IB_QPS_RESET; goto done; } err = mlx4_qp_query(dev->dev, &qp->mqp, &context); if (err) { err = -EINVAL; goto out; } mlx4_state = be32_to_cpu(context.flags) >> 28; qp->state = to_ib_qp_state(mlx4_state); qp_attr->qp_state = qp->state; qp_attr->path_mtu = context.mtu_msgmax >> 5; qp_attr->path_mig_state = to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); qp_attr->qkey = be32_to_cpu(context.qkey); qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff; qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff; qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff; qp_attr->qp_access_flags = to_ib_qp_access_flags(be32_to_cpu(context.params2)); if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC || qp->ibqp.qp_type == IB_QPT_XRC_INI || qp->ibqp.qp_type == IB_QPT_XRC_TGT) { to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path); to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path); qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; qp_attr->alt_port_num = rdma_ah_get_port_num(&qp_attr->alt_ah_attr); } qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; if (qp_attr->qp_state == IB_QPS_INIT) qp_attr->port_num = qp->port; else qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7); qp_attr->max_dest_rd_atomic = 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7); qp_attr->min_rnr_timer = (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f; qp_attr->timeout = context.pri_path.ackto >> 3; qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7; qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7; qp_attr->alt_timeout = context.alt_path.ackto >> 3; done: qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; qp_attr->cap.max_recv_sge = qp->rq.max_gs; if (!ibqp->uobject) { qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; qp_attr->cap.max_send_sge = qp->sq.max_gs; } else { qp_attr->cap.max_send_wr = 0; qp_attr->cap.max_send_sge = 0; } /* * We don't support inline sends for kernel QPs (yet), and we * don't know what userspace's value should be. */ qp_attr->cap.max_inline_data = 0; qp_init_attr->cap = qp_attr->cap; qp_init_attr->create_flags = 0; if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; if (qp->flags & MLX4_IB_QP_LSO) qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; if (qp->flags & MLX4_IB_QP_NETIF) qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP; qp_init_attr->sq_sig_type = qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; out: mutex_unlock(&qp->mutex); return err; } struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr, struct ib_udata *udata) { struct mlx4_dev *dev = to_mdev(pd->device)->dev; struct ib_qp_init_attr ib_qp_init_attr = {}; struct mlx4_ib_qp *qp; struct mlx4_ib_create_wq ucmd; int err, required_cmd_sz; if (!udata) return ERR_PTR(-EINVAL); required_cmd_sz = offsetof(typeof(ucmd), comp_mask) + sizeof(ucmd.comp_mask); if (udata->inlen < required_cmd_sz) { pr_debug("invalid inlen\n"); return ERR_PTR(-EINVAL); } if (udata->inlen > sizeof(ucmd) && !ib_is_udata_cleared(udata, sizeof(ucmd), udata->inlen - sizeof(ucmd))) { pr_debug("inlen is not supported\n"); return ERR_PTR(-EOPNOTSUPP); } if (udata->outlen) return ERR_PTR(-EOPNOTSUPP); if (init_attr->wq_type != IB_WQT_RQ) { pr_debug("unsupported wq type %d\n", init_attr->wq_type); return ERR_PTR(-EOPNOTSUPP); } if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS || !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { pr_debug("unsupported create_flags %u\n", init_attr->create_flags); return ERR_PTR(-EOPNOTSUPP); } qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); mutex_init(&qp->mutex); qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; ib_qp_init_attr.qp_context = init_attr->wq_context; ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET; ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr; ib_qp_init_attr.cap.max_recv_sge = init_attr->max_sge; ib_qp_init_attr.recv_cq = init_attr->cq; ib_qp_init_attr.send_cq = ib_qp_init_attr.recv_cq; /* Dummy CQ */ if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS; err = create_rq(pd, &ib_qp_init_attr, udata, qp); if (err) { kfree(qp); return ERR_PTR(err); } qp->ibwq.event_handler = init_attr->event_handler; qp->ibwq.wq_num = qp->mqp.qpn; qp->ibwq.state = IB_WQS_RESET; return &qp->ibwq; } static int ib_wq2qp_state(enum ib_wq_state state) { switch (state) { case IB_WQS_RESET: return IB_QPS_RESET; case IB_WQS_RDY: return IB_QPS_RTR; default: return IB_QPS_ERR; } } static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state, struct ib_udata *udata) { struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); enum ib_qp_state qp_cur_state; enum ib_qp_state qp_new_state; int attr_mask; int err; /* ib_qp.state represents the WQ HW state while ib_wq.state represents * the WQ logic state. */ qp_cur_state = qp->state; qp_new_state = ib_wq2qp_state(new_state); if (ib_wq2qp_state(new_state) == qp_cur_state) return 0; if (new_state == IB_WQS_RDY) { struct ib_qp_attr attr = {}; attr.port_num = qp->port; attr_mask = IB_QP_PORT; err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, &attr, attr_mask, IB_QPS_RESET, IB_QPS_INIT, udata); if (err) { pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n", ibwq->wq_num); return err; } qp_cur_state = IB_QPS_INIT; } attr_mask = 0; err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, attr_mask, qp_cur_state, qp_new_state, udata); if (err && (qp_cur_state == IB_QPS_INIT)) { qp_new_state = IB_QPS_RESET; if (__mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, attr_mask, IB_QPS_INIT, IB_QPS_RESET, udata)) { pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n", ibwq->wq_num); qp_new_state = IB_QPS_INIT; } } qp->state = qp_new_state; return err; } int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr, u32 wq_attr_mask, struct ib_udata *udata) { struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); struct mlx4_ib_modify_wq ucmd = {}; size_t required_cmd_sz; enum ib_wq_state cur_state, new_state; int err = 0; required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved); if (udata->inlen < required_cmd_sz) return -EINVAL; if (udata->inlen > sizeof(ucmd) && !ib_is_udata_cleared(udata, sizeof(ucmd), udata->inlen - sizeof(ucmd))) return -EOPNOTSUPP; if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) return -EFAULT; if (ucmd.comp_mask || ucmd.reserved) return -EOPNOTSUPP; if (wq_attr_mask & IB_WQ_FLAGS) return -EOPNOTSUPP; cur_state = wq_attr->curr_wq_state; new_state = wq_attr->wq_state; if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR)) return -EINVAL; if ((new_state == IB_WQS_ERR) && (cur_state == IB_WQS_RESET)) return -EINVAL; /* Need to protect against the parent RSS which also may modify WQ * state. */ mutex_lock(&qp->mutex); /* Can update HW state only if a RSS QP has already associated to this * WQ, so we can apply its port on the WQ. */ if (qp->rss_usecnt) err = _mlx4_ib_modify_wq(ibwq, new_state, udata); if (!err) ibwq->state = new_state; mutex_unlock(&qp->mutex); return err; } int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibwq->device); struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); if (qp->counter_index) mlx4_ib_free_qp_counter(dev, qp); destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata); kfree(qp); return 0; } int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table, struct ib_rwq_ind_table_init_attr *init_attr, struct ib_udata *udata) { struct mlx4_ib_create_rwq_ind_tbl_resp resp = {}; unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size; struct ib_device *device = rwq_ind_table->device; unsigned int base_wqn; size_t min_resp_len; int i, err = 0; if (udata->inlen > 0 && !ib_is_udata_cleared(udata, 0, udata->inlen)) return -EOPNOTSUPP; min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); if (udata->outlen && udata->outlen < min_resp_len) return -EINVAL; if (ind_tbl_size > device->attrs.rss_caps.max_rwq_indirection_table_size) { pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n", ind_tbl_size, device->attrs.rss_caps.max_rwq_indirection_table_size); return -EINVAL; } base_wqn = init_attr->ind_tbl[0]->wq_num; if (base_wqn % ind_tbl_size) { pr_debug("WQN=0x%x isn't aligned with indirection table size\n", base_wqn); return -EINVAL; } for (i = 1; i < ind_tbl_size; i++) { if (++base_wqn != init_attr->ind_tbl[i]->wq_num) { pr_debug("indirection table's WQNs aren't consecutive\n"); return -EINVAL; } } if (udata->outlen) { resp.response_length = offsetof(typeof(resp), response_length) + sizeof(resp.response_length); err = ib_copy_to_udata(udata, &resp, resp.response_length); } return err; } struct mlx4_ib_drain_cqe { struct ib_cqe cqe; struct completion done; }; static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) { struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct mlx4_ib_drain_cqe, cqe); complete(&cqe->done); } /* This function returns only once the drained WR was completed */ static void handle_drain_completion(struct ib_cq *cq, struct mlx4_ib_drain_cqe *sdrain, struct mlx4_ib_dev *dev) { struct mlx4_dev *mdev = dev->dev; if (cq->poll_ctx == IB_POLL_DIRECT) { while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0) ib_process_cq_direct(cq, -1); return; } if (mdev->persist->state == MLX4_DEVICE_STATE_INTERNAL_ERROR) { struct mlx4_ib_cq *mcq = to_mcq(cq); bool triggered = false; unsigned long flags; spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); /* Make sure that the CQ handler won't run if wasn't run yet */ if (!mcq->mcq.reset_notify_added) mcq->mcq.reset_notify_added = 1; else triggered = true; spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); if (triggered) { /* Wait for any scheduled/running task to be ended */ switch (cq->poll_ctx) { case IB_POLL_SOFTIRQ: irq_poll_disable(&cq->iop); irq_poll_enable(&cq->iop); break; case IB_POLL_WORKQUEUE: cancel_work_sync(&cq->work); break; default: WARN_ON_ONCE(1); } } /* Run the CQ handler - this makes sure that the drain WR will * be processed if wasn't processed yet. */ mcq->mcq.comp(&mcq->mcq); } wait_for_completion(&sdrain->done); } void mlx4_ib_drain_sq(struct ib_qp *qp) { struct ib_cq *cq = qp->send_cq; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct mlx4_ib_drain_cqe sdrain; const struct ib_send_wr *bad_swr; struct ib_rdma_wr swr = { .wr = { .next = NULL, { .wr_cqe = &sdrain.cqe, }, .opcode = IB_WR_RDMA_WRITE, }, }; int ret; struct mlx4_ib_dev *dev = to_mdev(qp->device); struct mlx4_dev *mdev = dev->dev; ret = ib_modify_qp(qp, &attr, IB_QP_STATE); if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) { WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); return; } sdrain.cqe.done = mlx4_ib_drain_qp_done; init_completion(&sdrain.done); ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true); if (ret) { WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); return; } handle_drain_completion(cq, &sdrain, dev); } void mlx4_ib_drain_rq(struct ib_qp *qp) { struct ib_cq *cq = qp->recv_cq; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct mlx4_ib_drain_cqe rdrain; struct ib_recv_wr rwr = {}; const struct ib_recv_wr *bad_rwr; int ret; struct mlx4_ib_dev *dev = to_mdev(qp->device); struct mlx4_dev *mdev = dev->dev; ret = ib_modify_qp(qp, &attr, IB_QP_STATE); if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) { WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); return; } rwr.wr_cqe = &rdrain.cqe; rdrain.cqe.done = mlx4_ib_drain_qp_done; init_completion(&rdrain.done); ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true); if (ret) { WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); return; } handle_drain_completion(cq, &rdrain, dev); } int mlx4_ib_qp_event_init(void) { mlx4_ib_qp_event_wq = alloc_ordered_workqueue("mlx4_ib_qp_event_wq", 0); if (!mlx4_ib_qp_event_wq) return -ENOMEM; return 0; } void mlx4_ib_qp_event_cleanup(void) { destroy_workqueue(mlx4_ib_qp_event_wq); }
linux-master
drivers/infiniband/hw/mlx4/qp.c
/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <rdma/uverbs_ioctl.h> #include "mlx4_ib.h" struct mlx4_ib_user_db_page { struct list_head list; struct ib_umem *umem; unsigned long user_virt; int refcnt; }; int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt, struct mlx4_db *db) { struct mlx4_ib_user_db_page *page; int err = 0; struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext); mutex_lock(&context->db_page_mutex); list_for_each_entry(page, &context->db_page_list, list) if (page->user_virt == (virt & PAGE_MASK)) goto found; page = kmalloc(sizeof *page, GFP_KERNEL); if (!page) { err = -ENOMEM; goto out; } page->user_virt = (virt & PAGE_MASK); page->refcnt = 0; page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, PAGE_SIZE, 0); if (IS_ERR(page->umem)) { err = PTR_ERR(page->umem); kfree(page); goto out; } list_add(&page->list, &context->db_page_list); found: db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + (virt & ~PAGE_MASK); db->u.user_page = page; ++page->refcnt; out: mutex_unlock(&context->db_page_mutex); return err; } void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db) { mutex_lock(&context->db_page_mutex); if (!--db->u.user_page->refcnt) { list_del(&db->u.user_page->list); ib_umem_release(db->u.user_page->umem); kfree(db->u.user_page); } mutex_unlock(&context->db_page_mutex); }
linux-master
drivers/infiniband/hw/mlx4/doorbell.c
/* * Copyright (c) 2012 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> #include <rdma/ib_cache.h> #include <rdma/ib_sa.h> #include <linux/mlx4/cmd.h> #include <linux/rbtree.h> #include <linux/delay.h> #include "mlx4_ib.h" #define MAX_VFS 80 #define MAX_PEND_REQS_PER_FUNC 4 #define MAD_TIMEOUT_MS 2000 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg) #define mcg_error(fmt, arg...) pr_err(fmt, ##arg) #define mcg_warn_group(group, format, arg...) \ pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\ (group)->name, group->demux->port, ## arg) #define mcg_debug_group(group, format, arg...) \ pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\ (group)->name, (group)->demux->port, ## arg) #define mcg_error_group(group, format, arg...) \ pr_err(" %16s: " format, (group)->name, ## arg) static union ib_gid mgid0; static struct workqueue_struct *clean_wq; enum mcast_state { MCAST_NOT_MEMBER = 0, MCAST_MEMBER, }; enum mcast_group_state { MCAST_IDLE, MCAST_JOIN_SENT, MCAST_LEAVE_SENT, MCAST_RESP_READY }; struct mcast_member { enum mcast_state state; uint8_t join_state; int num_pend_reqs; struct list_head pending; }; struct ib_sa_mcmember_data { union ib_gid mgid; union ib_gid port_gid; __be32 qkey; __be16 mlid; u8 mtusel_mtu; u8 tclass; __be16 pkey; u8 ratesel_rate; u8 lifetmsel_lifetm; __be32 sl_flowlabel_hoplimit; u8 scope_join_state; u8 proxy_join; u8 reserved[2]; } __packed __aligned(4); struct mcast_group { struct ib_sa_mcmember_data rec; struct rb_node node; struct list_head mgid0_list; struct mlx4_ib_demux_ctx *demux; struct mcast_member func[MAX_VFS]; struct mutex lock; struct work_struct work; struct list_head pending_list; int members[3]; enum mcast_group_state state; enum mcast_group_state prev_state; struct ib_sa_mad response_sa_mad; __be64 last_req_tid; char name[33]; /* MGID string */ struct device_attribute dentry; /* refcount is the reference count for the following: 1. Each queued request 2. Each invocation of the worker thread 3. Membership of the port at the SA */ atomic_t refcount; /* delayed work to clean pending SM request */ struct delayed_work timeout_work; struct list_head cleanup_list; }; struct mcast_req { int func; struct ib_sa_mad sa_mad; struct list_head group_list; struct list_head func_list; struct mcast_group *group; int clean; }; #define safe_atomic_dec(ref) \ do {\ if (atomic_dec_and_test(ref)) \ mcg_warn_group(group, "did not expect to reach zero\n"); \ } while (0) static const char *get_state_string(enum mcast_group_state state) { switch (state) { case MCAST_IDLE: return "MCAST_IDLE"; case MCAST_JOIN_SENT: return "MCAST_JOIN_SENT"; case MCAST_LEAVE_SENT: return "MCAST_LEAVE_SENT"; case MCAST_RESP_READY: return "MCAST_RESP_READY"; } return "Invalid State"; } static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx, union ib_gid *mgid) { struct rb_node *node = ctx->mcg_table.rb_node; struct mcast_group *group; int ret; while (node) { group = rb_entry(node, struct mcast_group, node); ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); if (!ret) return group; if (ret < 0) node = node->rb_left; else node = node->rb_right; } return NULL; } static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx, struct mcast_group *group) { struct rb_node **link = &ctx->mcg_table.rb_node; struct rb_node *parent = NULL; struct mcast_group *cur_group; int ret; while (*link) { parent = *link; cur_group = rb_entry(parent, struct mcast_group, node); ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, sizeof group->rec.mgid); if (ret < 0) link = &(*link)->rb_left; else if (ret > 0) link = &(*link)->rb_right; else return cur_group; } rb_link_node(&group->node, parent, link); rb_insert_color(&group->node, &ctx->mcg_table); return NULL; } static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad) { struct mlx4_ib_dev *dev = ctx->dev; struct rdma_ah_attr ah_attr; unsigned long flags; spin_lock_irqsave(&dev->sm_lock, flags); if (!dev->sm_ah[ctx->port - 1]) { /* port is not yet Active, sm_ah not ready */ spin_unlock_irqrestore(&dev->sm_lock, flags); return -EAGAIN; } mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); spin_unlock_irqrestore(&dev->sm_lock, flags); return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, NULL, 0xffff, mad); } static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad) { struct mlx4_ib_dev *dev = ctx->dev; struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1]; struct ib_wc wc; struct rdma_ah_attr ah_attr; /* Our agent might not yet be registered when mads start to arrive */ if (!agent) return -EAGAIN; rdma_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index)) return -EINVAL; wc.sl = 0; wc.dlid_path_bits = 0; wc.port_num = ctx->port; wc.slid = rdma_ah_get_dlid(&ah_attr); /* opensm lid */ wc.src_qp = 1; return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad); } static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad) { struct ib_sa_mad mad; struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data; int ret; /* we rely on a mad request as arrived from a VF */ memcpy(&mad, sa_mad, sizeof mad); /* fix port GID to be the real one (slave 0) */ sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0]; /* assign our own TID */ mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); /* set timeout handler */ if (!ret) { /* calls mlx4_ib_mcg_timeout_handler */ queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, msecs_to_jiffies(MAD_TIMEOUT_MS)); } return ret; } static int send_leave_to_wire(struct mcast_group *group, u8 join_state) { struct ib_sa_mad mad; struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data; int ret; memset(&mad, 0, sizeof mad); mad.mad_hdr.base_version = 1; mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; mad.mad_hdr.class_version = 2; mad.mad_hdr.method = IB_SA_METHOD_DELETE; mad.mad_hdr.status = cpu_to_be16(0); mad.mad_hdr.class_specific = cpu_to_be16(0); mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); mad.mad_hdr.attr_mod = cpu_to_be32(0); mad.sa_hdr.sm_key = 0x0; mad.sa_hdr.attr_offset = cpu_to_be16(7); mad.sa_hdr.comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE; *sa_data = group->rec; sa_data->scope_join_state = join_state; ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); if (ret) group->state = MCAST_IDLE; /* set timeout handler */ if (!ret) { /* calls mlx4_ib_mcg_timeout_handler */ queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, msecs_to_jiffies(MAD_TIMEOUT_MS)); } return ret; } static int send_reply_to_slave(int slave, struct mcast_group *group, struct ib_sa_mad *req_sa_mad, u16 status) { struct ib_sa_mad mad; struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data; struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data; int ret; memset(&mad, 0, sizeof mad); mad.mad_hdr.base_version = 1; mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; mad.mad_hdr.class_version = 2; mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; mad.mad_hdr.status = cpu_to_be16(status); mad.mad_hdr.class_specific = cpu_to_be16(0); mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid; *(u8 *)&mad.mad_hdr.tid = 0; /* resetting tid to 0 */ mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); mad.mad_hdr.attr_mod = cpu_to_be32(0); mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key; mad.sa_hdr.attr_offset = cpu_to_be16(7); mad.sa_hdr.comp_mask = 0; /* ignored on responses, see IBTA spec */ *sa_data = group->rec; /* reconstruct VF's requested join_state and port_gid */ sa_data->scope_join_state &= 0xf0; sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f); memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid); ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad); return ret; } static int check_selector(ib_sa_comp_mask comp_mask, ib_sa_comp_mask selector_mask, ib_sa_comp_mask value_mask, u8 src_value, u8 dst_value) { int err; u8 selector = dst_value >> 6; dst_value &= 0x3f; src_value &= 0x3f; if (!(comp_mask & selector_mask) || !(comp_mask & value_mask)) return 0; switch (selector) { case IB_SA_GT: err = (src_value <= dst_value); break; case IB_SA_LT: err = (src_value >= dst_value); break; case IB_SA_EQ: err = (src_value != dst_value); break; default: err = 0; break; } return err; } static u16 cmp_rec(struct ib_sa_mcmember_data *src, struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask) { /* src is group record, dst is request record */ /* MGID must already match */ /* Port_GID we always replace to our Port_GID, so it is a match */ #define MAD_STATUS_REQ_INVALID 0x0200 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) return MAD_STATUS_REQ_INVALID; if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) return MAD_STATUS_REQ_INVALID; if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR, IB_SA_MCMEMBER_REC_MTU, src->mtusel_mtu, dst->mtusel_mtu)) return MAD_STATUS_REQ_INVALID; if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS && src->tclass != dst->tclass) return MAD_STATUS_REQ_INVALID; if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) return MAD_STATUS_REQ_INVALID; if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR, IB_SA_MCMEMBER_REC_RATE, src->ratesel_rate, dst->ratesel_rate)) return MAD_STATUS_REQ_INVALID; if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR, IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME, src->lifetmsel_lifetm, dst->lifetmsel_lifetm)) return MAD_STATUS_REQ_INVALID; if (comp_mask & IB_SA_MCMEMBER_REC_SL && (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) != (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000)) return MAD_STATUS_REQ_INVALID; if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL && (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) != (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00)) return MAD_STATUS_REQ_INVALID; if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT && (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) != (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff)) return MAD_STATUS_REQ_INVALID; if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && (src->scope_join_state & 0xf0) != (dst->scope_join_state & 0xf0)) return MAD_STATUS_REQ_INVALID; /* join_state checked separately, proxy_join ignored */ return 0; } /* release group, return 1 if this was last release and group is destroyed * timout work is canceled sync */ static int release_group(struct mcast_group *group, int from_timeout_handler) { struct mlx4_ib_demux_ctx *ctx = group->demux; int nzgroup; mutex_lock(&ctx->mcg_table_lock); mutex_lock(&group->lock); if (atomic_dec_and_test(&group->refcount)) { if (!from_timeout_handler) { if (group->state != MCAST_IDLE && !cancel_delayed_work(&group->timeout_work)) { atomic_inc(&group->refcount); mutex_unlock(&group->lock); mutex_unlock(&ctx->mcg_table_lock); return 0; } } nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0); if (nzgroup) del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); if (!list_empty(&group->pending_list)) mcg_warn_group(group, "releasing a group with non empty pending list\n"); if (nzgroup) rb_erase(&group->node, &ctx->mcg_table); list_del_init(&group->mgid0_list); mutex_unlock(&group->lock); mutex_unlock(&ctx->mcg_table_lock); kfree(group); return 1; } else { mutex_unlock(&group->lock); mutex_unlock(&ctx->mcg_table_lock); } return 0; } static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) { int i; for (i = 0; i < 3; i++, join_state >>= 1) if (join_state & 0x1) group->members[i] += inc; } static u8 get_leave_state(struct mcast_group *group) { u8 leave_state = 0; int i; for (i = 0; i < 3; i++) if (!group->members[i]) leave_state |= (1 << i); return leave_state & (group->rec.scope_join_state & 0xf); } static int join_group(struct mcast_group *group, int slave, u8 join_mask) { int ret = 0; u8 join_state; /* remove bits that slave is already member of, and adjust */ join_state = join_mask & (~group->func[slave].join_state); adjust_membership(group, join_state, 1); group->func[slave].join_state |= join_state; if (group->func[slave].state != MCAST_MEMBER && join_state) { group->func[slave].state = MCAST_MEMBER; ret = 1; } return ret; } static int leave_group(struct mcast_group *group, int slave, u8 leave_state) { int ret = 0; adjust_membership(group, leave_state, -1); group->func[slave].join_state &= ~leave_state; if (!group->func[slave].join_state) { group->func[slave].state = MCAST_NOT_MEMBER; ret = 1; } return ret; } static int check_leave(struct mcast_group *group, int slave, u8 leave_mask) { if (group->func[slave].state != MCAST_MEMBER) return MAD_STATUS_REQ_INVALID; /* make sure we're not deleting unset bits */ if (~group->func[slave].join_state & leave_mask) return MAD_STATUS_REQ_INVALID; if (!leave_mask) return MAD_STATUS_REQ_INVALID; return 0; } static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct mcast_group *group; struct mcast_req *req = NULL; group = container_of(delay, typeof(*group), timeout_work); mutex_lock(&group->lock); if (group->state == MCAST_JOIN_SENT) { if (!list_empty(&group->pending_list)) { req = list_first_entry(&group->pending_list, struct mcast_req, group_list); list_del(&req->group_list); list_del(&req->func_list); --group->func[req->func].num_pend_reqs; mutex_unlock(&group->lock); kfree(req); if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) { if (release_group(group, 1)) return; } else { kfree(group); return; } mutex_lock(&group->lock); } else mcg_warn_group(group, "DRIVER BUG\n"); } else if (group->state == MCAST_LEAVE_SENT) { if (group->rec.scope_join_state & 0xf) group->rec.scope_join_state &= 0xf0; group->state = MCAST_IDLE; mutex_unlock(&group->lock); if (release_group(group, 1)) return; mutex_lock(&group->lock); } else mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state)); group->state = MCAST_IDLE; atomic_inc(&group->refcount); if (!queue_work(group->demux->mcg_wq, &group->work)) safe_atomic_dec(&group->refcount); mutex_unlock(&group->lock); } static int handle_leave_req(struct mcast_group *group, u8 leave_mask, struct mcast_req *req) { u16 status; if (req->clean) leave_mask = group->func[req->func].join_state; status = check_leave(group, req->func, leave_mask); if (!status) leave_group(group, req->func, leave_mask); if (!req->clean) send_reply_to_slave(req->func, group, &req->sa_mad, status); --group->func[req->func].num_pend_reqs; list_del(&req->group_list); list_del(&req->func_list); kfree(req); return 1; } static int handle_join_req(struct mcast_group *group, u8 join_mask, struct mcast_req *req) { u8 group_join_state = group->rec.scope_join_state & 0xf; int ref = 0; u16 status; struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; if (join_mask == (group_join_state & join_mask)) { /* port's membership need not change */ status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask); if (!status) join_group(group, req->func, join_mask); --group->func[req->func].num_pend_reqs; send_reply_to_slave(req->func, group, &req->sa_mad, status); list_del(&req->group_list); list_del(&req->func_list); kfree(req); ++ref; } else { /* port's membership needs to be updated */ group->prev_state = group->state; if (send_join_to_wire(group, &req->sa_mad)) { --group->func[req->func].num_pend_reqs; list_del(&req->group_list); list_del(&req->func_list); kfree(req); ref = 1; group->state = group->prev_state; } else group->state = MCAST_JOIN_SENT; } return ref; } static void mlx4_ib_mcg_work_handler(struct work_struct *work) { struct mcast_group *group; struct mcast_req *req = NULL; struct ib_sa_mcmember_data *sa_data; u8 req_join_state; int rc = 1; /* release_count - this is for the scheduled work */ u16 status; u8 method; group = container_of(work, typeof(*group), work); mutex_lock(&group->lock); /* First, let's see if a response from SM is waiting regarding this group. * If so, we need to update the group's REC. If this is a bad response, we * may need to send a bad response to a VF waiting for it. If VF is waiting * and this is a good response, the VF will be answered later in this func. */ if (group->state == MCAST_RESP_READY) { /* cancels mlx4_ib_mcg_timeout_handler */ cancel_delayed_work(&group->timeout_work); status = be16_to_cpu(group->response_sa_mad.mad_hdr.status); method = group->response_sa_mad.mad_hdr.method; if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) { mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n", be64_to_cpu(group->response_sa_mad.mad_hdr.tid), be64_to_cpu(group->last_req_tid)); group->state = group->prev_state; goto process_requests; } if (status) { if (!list_empty(&group->pending_list)) req = list_first_entry(&group->pending_list, struct mcast_req, group_list); if (method == IB_MGMT_METHOD_GET_RESP) { if (req) { send_reply_to_slave(req->func, group, &req->sa_mad, status); --group->func[req->func].num_pend_reqs; list_del(&req->group_list); list_del(&req->func_list); kfree(req); ++rc; } else mcg_warn_group(group, "no request for failed join\n"); } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing) ++rc; } else { u8 resp_join_state; u8 cur_join_state; resp_join_state = ((struct ib_sa_mcmember_data *) group->response_sa_mad.data)->scope_join_state & 0xf; cur_join_state = group->rec.scope_join_state & 0xf; if (method == IB_MGMT_METHOD_GET_RESP) { /* successfull join */ if (!cur_join_state && resp_join_state) --rc; } else if (!resp_join_state) ++rc; memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec); } group->state = MCAST_IDLE; } process_requests: /* We should now go over pending join/leave requests, as long as we are idle. */ while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) { req = list_first_entry(&group->pending_list, struct mcast_req, group_list); sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; req_join_state = sa_data->scope_join_state & 0xf; /* For a leave request, we will immediately answer the VF, and * update our internal counters. The actual leave will be sent * to SM later, if at all needed. We dequeue the request now. */ if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE) rc += handle_leave_req(group, req_join_state, req); else rc += handle_join_req(group, req_join_state, req); } /* Handle leaves */ if (group->state == MCAST_IDLE) { req_join_state = get_leave_state(group); if (req_join_state) { group->rec.scope_join_state &= ~req_join_state; group->prev_state = group->state; if (send_leave_to_wire(group, req_join_state)) { group->state = group->prev_state; ++rc; } else group->state = MCAST_LEAVE_SENT; } } if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) goto process_requests; mutex_unlock(&group->lock); while (rc--) release_group(group, 0); } static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx, __be64 tid, union ib_gid *new_mgid) { struct mcast_group *group = NULL, *cur_group, *n; struct mcast_req *req; mutex_lock(&ctx->mcg_table_lock); list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) { mutex_lock(&group->lock); if (group->last_req_tid == tid) { if (memcmp(new_mgid, &mgid0, sizeof mgid0)) { group->rec.mgid = *new_mgid; sprintf(group->name, "%016llx%016llx", be64_to_cpu(group->rec.mgid.global.subnet_prefix), be64_to_cpu(group->rec.mgid.global.interface_id)); list_del_init(&group->mgid0_list); cur_group = mcast_insert(ctx, group); if (cur_group) { /* A race between our code and SM. Silently cleaning the new one */ req = list_first_entry(&group->pending_list, struct mcast_req, group_list); --group->func[req->func].num_pend_reqs; list_del(&req->group_list); list_del(&req->func_list); kfree(req); mutex_unlock(&group->lock); mutex_unlock(&ctx->mcg_table_lock); release_group(group, 0); return NULL; } atomic_inc(&group->refcount); add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); mutex_unlock(&group->lock); mutex_unlock(&ctx->mcg_table_lock); return group; } else { struct mcast_req *tmp1, *tmp2; list_del(&group->mgid0_list); if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE) cancel_delayed_work_sync(&group->timeout_work); list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) { list_del(&tmp1->group_list); kfree(tmp1); } mutex_unlock(&group->lock); mutex_unlock(&ctx->mcg_table_lock); kfree(group); return NULL; } } mutex_unlock(&group->lock); } mutex_unlock(&ctx->mcg_table_lock); return NULL; } static ssize_t sysfs_show_group(struct device *dev, struct device_attribute *attr, char *buf); static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx, union ib_gid *mgid, int create) { struct mcast_group *group, *cur_group; int is_mgid0; int i; is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0); if (!is_mgid0) { group = mcast_find(ctx, mgid); if (group) goto found; } if (!create) return ERR_PTR(-ENOENT); group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) return ERR_PTR(-ENOMEM); group->demux = ctx; group->rec.mgid = *mgid; INIT_LIST_HEAD(&group->pending_list); INIT_LIST_HEAD(&group->mgid0_list); for (i = 0; i < MAX_VFS; ++i) INIT_LIST_HEAD(&group->func[i].pending); INIT_WORK(&group->work, mlx4_ib_mcg_work_handler); INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler); mutex_init(&group->lock); sprintf(group->name, "%016llx%016llx", be64_to_cpu(group->rec.mgid.global.subnet_prefix), be64_to_cpu(group->rec.mgid.global.interface_id)); sysfs_attr_init(&group->dentry.attr); group->dentry.show = sysfs_show_group; group->dentry.store = NULL; group->dentry.attr.name = group->name; group->dentry.attr.mode = 0400; group->state = MCAST_IDLE; if (is_mgid0) { list_add(&group->mgid0_list, &ctx->mcg_mgid0_list); goto found; } cur_group = mcast_insert(ctx, group); if (cur_group) { mcg_warn("group just showed up %s - confused\n", cur_group->name); kfree(group); return ERR_PTR(-EINVAL); } add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); found: atomic_inc(&group->refcount); return group; } static void queue_req(struct mcast_req *req) { struct mcast_group *group = req->group; atomic_inc(&group->refcount); /* for the request */ atomic_inc(&group->refcount); /* for scheduling the work */ list_add_tail(&req->group_list, &group->pending_list); list_add_tail(&req->func_list, &group->func[req->func].pending); /* calls mlx4_ib_mcg_work_handler */ if (!queue_work(group->demux->mcg_wq, &group->work)) safe_atomic_dec(&group->refcount); } int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave, struct ib_sa_mad *mad) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data; struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; struct mcast_group *group; switch (mad->mad_hdr.method) { case IB_MGMT_METHOD_GET_RESP: case IB_SA_METHOD_DELETE_RESP: mutex_lock(&ctx->mcg_table_lock); group = acquire_group(ctx, &rec->mgid, 0); mutex_unlock(&ctx->mcg_table_lock); if (IS_ERR(group)) { if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) { __be64 tid = mad->mad_hdr.tid; *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */ group = search_relocate_mgid0_group(ctx, tid, &rec->mgid); } else group = NULL; } if (!group) return 1; mutex_lock(&group->lock); group->response_sa_mad = *mad; group->prev_state = group->state; group->state = MCAST_RESP_READY; /* calls mlx4_ib_mcg_work_handler */ atomic_inc(&group->refcount); if (!queue_work(ctx->mcg_wq, &group->work)) safe_atomic_dec(&group->refcount); mutex_unlock(&group->lock); release_group(group, 0); return 1; /* consumed */ case IB_MGMT_METHOD_SET: case IB_SA_METHOD_GET_TABLE: case IB_SA_METHOD_GET_TABLE_RESP: case IB_SA_METHOD_DELETE: return 0; /* not consumed, pass-through to guest over tunnel */ default: mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n", port, mad->mad_hdr.method); return 1; /* consumed */ } } int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave, struct ib_sa_mad *sa_mad) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data; struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; struct mcast_group *group; struct mcast_req *req; int may_create = 0; if (ctx->flushing) return -EAGAIN; switch (sa_mad->mad_hdr.method) { case IB_MGMT_METHOD_SET: may_create = 1; fallthrough; case IB_SA_METHOD_DELETE: req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) return -ENOMEM; req->func = slave; req->sa_mad = *sa_mad; mutex_lock(&ctx->mcg_table_lock); group = acquire_group(ctx, &rec->mgid, may_create); mutex_unlock(&ctx->mcg_table_lock); if (IS_ERR(group)) { kfree(req); return PTR_ERR(group); } mutex_lock(&group->lock); if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) { mutex_unlock(&group->lock); mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n", port, slave, MAX_PEND_REQS_PER_FUNC); release_group(group, 0); kfree(req); return -ENOMEM; } ++group->func[slave].num_pend_reqs; req->group = group; queue_req(req); mutex_unlock(&group->lock); release_group(group, 0); return 1; /* consumed */ case IB_SA_METHOD_GET_TABLE: case IB_MGMT_METHOD_GET_RESP: case IB_SA_METHOD_GET_TABLE_RESP: case IB_SA_METHOD_DELETE_RESP: return 0; /* not consumed, pass-through */ default: mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n", port, slave, sa_mad->mad_hdr.method); return 1; /* consumed */ } } static ssize_t sysfs_show_group(struct device *dev, struct device_attribute *attr, char *buf) { struct mcast_group *group = container_of(attr, struct mcast_group, dentry); struct mcast_req *req = NULL; char state_str[40]; char pending_str[40]; int len; int i; u32 hoplimit; if (group->state == MCAST_IDLE) scnprintf(state_str, sizeof(state_str), "%s", get_state_string(group->state)); else scnprintf(state_str, sizeof(state_str), "%s(TID=0x%llx)", get_state_string(group->state), be64_to_cpu(group->last_req_tid)); if (list_empty(&group->pending_list)) { scnprintf(pending_str, sizeof(pending_str), "No"); } else { req = list_first_entry(&group->pending_list, struct mcast_req, group_list); scnprintf(pending_str, sizeof(pending_str), "Yes(TID=0x%llx)", be64_to_cpu(req->sa_mad.mad_hdr.tid)); } len = sysfs_emit(buf, "%1d [%02d,%02d,%02d] %4d %4s %5s ", group->rec.scope_join_state & 0xf, group->members[2], group->members[1], group->members[0], atomic_read(&group->refcount), pending_str, state_str); for (i = 0; i < MAX_VFS; i++) { if (group->func[i].state == MCAST_MEMBER) len += sysfs_emit_at(buf, len, "%d[%1x] ", i, group->func[i].join_state); } hoplimit = be32_to_cpu(group->rec.sl_flowlabel_hoplimit); len += sysfs_emit_at(buf, len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x %4x %4x %2x %2x)\n", be16_to_cpu(group->rec.pkey), be32_to_cpu(group->rec.qkey), (group->rec.mtusel_mtu & 0xc0) >> 6, (group->rec.mtusel_mtu & 0x3f), group->rec.tclass, (group->rec.ratesel_rate & 0xc0) >> 6, (group->rec.ratesel_rate & 0x3f), (hoplimit & 0xf0000000) >> 28, (hoplimit & 0x0fffff00) >> 8, (hoplimit & 0x000000ff), group->rec.proxy_join); return len; } int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx) { char name[20]; atomic_set(&ctx->tid, 0); sprintf(name, "mlx4_ib_mcg%d", ctx->port); ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); if (!ctx->mcg_wq) return -ENOMEM; mutex_init(&ctx->mcg_table_lock); ctx->mcg_table = RB_ROOT; INIT_LIST_HEAD(&ctx->mcg_mgid0_list); ctx->flushing = 0; return 0; } static void force_clean_group(struct mcast_group *group) { struct mcast_req *req, *tmp ; list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) { list_del(&req->group_list); kfree(req); } del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr); rb_erase(&group->node, &group->demux->mcg_table); kfree(group); } static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq) { int i; struct rb_node *p; struct mcast_group *group; unsigned long end; int count; for (i = 0; i < MAX_VFS; ++i) clean_vf_mcast(ctx, i); end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000); do { count = 0; mutex_lock(&ctx->mcg_table_lock); for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) ++count; mutex_unlock(&ctx->mcg_table_lock); if (!count) break; usleep_range(1000, 2000); } while (time_after(end, jiffies)); flush_workqueue(ctx->mcg_wq); if (destroy_wq) destroy_workqueue(ctx->mcg_wq); mutex_lock(&ctx->mcg_table_lock); while ((p = rb_first(&ctx->mcg_table)) != NULL) { group = rb_entry(p, struct mcast_group, node); if (atomic_read(&group->refcount)) mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group); force_clean_group(group); } mutex_unlock(&ctx->mcg_table_lock); } struct clean_work { struct work_struct work; struct mlx4_ib_demux_ctx *ctx; int destroy_wq; }; static void mcg_clean_task(struct work_struct *work) { struct clean_work *cw = container_of(work, struct clean_work, work); _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq); cw->ctx->flushing = 0; kfree(cw); } void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq) { struct clean_work *work; if (ctx->flushing) return; ctx->flushing = 1; if (destroy_wq) { _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq); ctx->flushing = 0; return; } work = kmalloc(sizeof *work, GFP_KERNEL); if (!work) { ctx->flushing = 0; return; } work->ctx = ctx; work->destroy_wq = destroy_wq; INIT_WORK(&work->work, mcg_clean_task); queue_work(clean_wq, &work->work); } static void build_leave_mad(struct mcast_req *req) { struct ib_sa_mad *mad = &req->sa_mad; mad->mad_hdr.method = IB_SA_METHOD_DELETE; } static void clear_pending_reqs(struct mcast_group *group, int vf) { struct mcast_req *req, *tmp, *group_first = NULL; int clear; int pend = 0; if (!list_empty(&group->pending_list)) group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list); list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) { clear = 1; if (group_first == req && (group->state == MCAST_JOIN_SENT || group->state == MCAST_LEAVE_SENT)) { clear = cancel_delayed_work(&group->timeout_work); pend = !clear; group->state = MCAST_IDLE; } if (clear) { --group->func[vf].num_pend_reqs; list_del(&req->group_list); list_del(&req->func_list); kfree(req); atomic_dec(&group->refcount); } } if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) { mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n", list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs); } } static int push_deleteing_req(struct mcast_group *group, int slave) { struct mcast_req *req; struct mcast_req *pend_req; if (!group->func[slave].join_state) return 0; req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) return -ENOMEM; if (!list_empty(&group->func[slave].pending)) { pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list); if (pend_req->clean) { kfree(req); return 0; } } req->clean = 1; req->func = slave; req->group = group; ++group->func[slave].num_pend_reqs; build_leave_mad(req); queue_req(req); return 0; } void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave) { struct mcast_group *group; struct rb_node *p; mutex_lock(&ctx->mcg_table_lock); for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) { group = rb_entry(p, struct mcast_group, node); mutex_lock(&group->lock); if (atomic_read(&group->refcount)) { /* clear pending requests of this VF */ clear_pending_reqs(group, slave); push_deleteing_req(group, slave); } mutex_unlock(&group->lock); } mutex_unlock(&ctx->mcg_table_lock); } int mlx4_ib_mcg_init(void) { clean_wq = alloc_ordered_workqueue("mlx4_ib_mcg", WQ_MEM_RECLAIM); if (!clean_wq) return -ENOMEM; return 0; } void mlx4_ib_mcg_destroy(void) { destroy_workqueue(clean_wq); }
linux-master
drivers/infiniband/hw/mlx4/mcg.c
/* * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <net/ipv6.h> #include <net/addrconf.h> #include <net/devlink.h> #include <rdma/ib_smi.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> #include <net/bonding.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/cmd.h> #include <linux/mlx4/qp.h> #include "mlx4_ib.h" #include <rdma/mlx4-abi.h> #define DRV_NAME MLX4_IB_DRV_NAME #define DRV_VERSION "4.0-0" #define MLX4_IB_FLOW_MAX_PRIO 0xFFF #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF #define MLX4_IB_CARD_REV_A0 0xA0 MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); MODULE_LICENSE("Dual BSD/GPL"); int mlx4_ib_sm_guid_assign = 0; module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444); MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)"); static const char mlx4_ib_version[] = DRV_NAME ": Mellanox ConnectX InfiniBand driver v" DRV_VERSION "\n"; static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num); static int mlx4_ib_event(struct notifier_block *this, unsigned long event, void *param); static struct workqueue_struct *wq; static int check_flow_steering_support(struct mlx4_dev *dev) { int eth_num_ports = 0; int ib_num_ports = 0; int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED; if (dmfs) { int i; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) eth_num_ports++; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) ib_num_ports++; dmfs &= (!ib_num_ports || (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) && (!eth_num_ports || (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)); if (ib_num_ports && mlx4_is_mfunc(dev)) { pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n"); dmfs = 0; } } return dmfs; } static int num_ib_ports(struct mlx4_dev *dev) { int ib_ports = 0; int i; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) ib_ports++; return ib_ports; } static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u32 port_num) { struct mlx4_ib_dev *ibdev = to_mdev(device); struct net_device *dev, *ret = NULL; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if (dev->dev.parent != ibdev->ib_dev.dev.parent || dev->dev_port + 1 != port_num) continue; if (mlx4_is_bonded(ibdev->dev)) { struct net_device *upper; upper = netdev_master_upper_dev_get_rcu(dev); if (upper) { struct net_device *active; active = bond_option_active_slave_get_rcu(netdev_priv(upper)); if (active) dev = active; } } dev_hold(dev); ret = dev; break; } rcu_read_unlock(); return ret; } static int mlx4_ib_update_gids_v1(struct gid_entry *gids, struct mlx4_ib_dev *ibdev, u32 port_num) { struct mlx4_cmd_mailbox *mailbox; int err; struct mlx4_dev *dev = ibdev->dev; int i; union ib_gid *gid_tbl; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return -ENOMEM; gid_tbl = mailbox->buf; for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid)); err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | port_num, 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); if (mlx4_is_bonded(dev)) err += mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | 2, 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(dev, mailbox); return err; } static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, struct mlx4_ib_dev *ibdev, u32 port_num) { struct mlx4_cmd_mailbox *mailbox; int err; struct mlx4_dev *dev = ibdev->dev; int i; struct { union ib_gid gid; __be32 rsrvd1[2]; __be16 rsrvd2; u8 type; u8 version; __be32 rsrvd3; } *gid_tbl; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return -ENOMEM; gid_tbl = mailbox->buf; for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid)); if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { gid_tbl[i].version = 2; if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) gid_tbl[i].type = 1; } } err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_ROCE_ADDR << 8 | port_num, 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); if (mlx4_is_bonded(dev)) err += mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_ROCE_ADDR << 8 | 2, 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(dev, mailbox); return err; } static int mlx4_ib_update_gids(struct gid_entry *gids, struct mlx4_ib_dev *ibdev, u32 port_num) { if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num); return mlx4_ib_update_gids_v1(gids, ibdev, port_num); } static void free_gid_entry(struct gid_entry *entry) { memset(&entry->gid, 0, sizeof(entry->gid)); kfree(entry->ctx); entry->ctx = NULL; } static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context) { struct mlx4_ib_dev *ibdev = to_mdev(attr->device); struct mlx4_ib_iboe *iboe = &ibdev->iboe; struct mlx4_port_gid_table *port_gid_table; int free = -1, found = -1; int ret = 0; int hw_update = 0; int i; struct gid_entry *gids; u16 vlan_id = 0xffff; u8 mac[ETH_ALEN]; if (!rdma_cap_roce_gid_table(attr->device, attr->port_num)) return -EINVAL; if (attr->port_num > MLX4_MAX_PORTS) return -EINVAL; if (!context) return -EINVAL; ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]); if (ret) return ret; port_gid_table = &iboe->gids[attr->port_num - 1]; spin_lock_bh(&iboe->lock); for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { if (!memcmp(&port_gid_table->gids[i].gid, &attr->gid, sizeof(attr->gid)) && port_gid_table->gids[i].gid_type == attr->gid_type && port_gid_table->gids[i].vlan_id == vlan_id) { found = i; break; } if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid)) free = i; /* HW has space */ } if (found < 0) { if (free < 0) { ret = -ENOSPC; } else { port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC); if (!port_gid_table->gids[free].ctx) { ret = -ENOMEM; } else { *context = port_gid_table->gids[free].ctx; port_gid_table->gids[free].gid = attr->gid; port_gid_table->gids[free].gid_type = attr->gid_type; port_gid_table->gids[free].vlan_id = vlan_id; port_gid_table->gids[free].ctx->real_index = free; port_gid_table->gids[free].ctx->refcount = 1; hw_update = 1; } } } else { struct gid_cache_context *ctx = port_gid_table->gids[found].ctx; *context = ctx; ctx->refcount++; } if (!ret && hw_update) { gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids), GFP_ATOMIC); if (!gids) { ret = -ENOMEM; *context = NULL; free_gid_entry(&port_gid_table->gids[free]); } else { for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); gids[i].gid_type = port_gid_table->gids[i].gid_type; } } } spin_unlock_bh(&iboe->lock); if (!ret && hw_update) { ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); if (ret) { spin_lock_bh(&iboe->lock); *context = NULL; free_gid_entry(&port_gid_table->gids[free]); spin_unlock_bh(&iboe->lock); } kfree(gids); } return ret; } static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context) { struct gid_cache_context *ctx = *context; struct mlx4_ib_dev *ibdev = to_mdev(attr->device); struct mlx4_ib_iboe *iboe = &ibdev->iboe; struct mlx4_port_gid_table *port_gid_table; int ret = 0; int hw_update = 0; struct gid_entry *gids; if (!rdma_cap_roce_gid_table(attr->device, attr->port_num)) return -EINVAL; if (attr->port_num > MLX4_MAX_PORTS) return -EINVAL; port_gid_table = &iboe->gids[attr->port_num - 1]; spin_lock_bh(&iboe->lock); if (ctx) { ctx->refcount--; if (!ctx->refcount) { unsigned int real_index = ctx->real_index; free_gid_entry(&port_gid_table->gids[real_index]); hw_update = 1; } } if (!ret && hw_update) { int i; gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids), GFP_ATOMIC); if (!gids) { ret = -ENOMEM; } else { for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); gids[i].gid_type = port_gid_table->gids[i].gid_type; } } } spin_unlock_bh(&iboe->lock); if (!ret && hw_update) { ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); kfree(gids); } return ret; } int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev, const struct ib_gid_attr *attr) { struct mlx4_ib_iboe *iboe = &ibdev->iboe; struct gid_cache_context *ctx = NULL; struct mlx4_port_gid_table *port_gid_table; int real_index = -EINVAL; int i; unsigned long flags; u32 port_num = attr->port_num; if (port_num > MLX4_MAX_PORTS) return -EINVAL; if (mlx4_is_bonded(ibdev->dev)) port_num = 1; if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num)) return attr->index; spin_lock_irqsave(&iboe->lock, flags); port_gid_table = &iboe->gids[port_num - 1]; for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) if (!memcmp(&port_gid_table->gids[i].gid, &attr->gid, sizeof(attr->gid)) && attr->gid_type == port_gid_table->gids[i].gid_type) { ctx = port_gid_table->gids[i].ctx; break; } if (ctx) real_index = ctx->real_index; spin_unlock_irqrestore(&iboe->lock, flags); return real_index; } static int mlx4_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct ib_smp *in_mad; struct ib_smp *out_mad; int err; int have_ib_ports; struct mlx4_uverbs_ex_query_device cmd; struct mlx4_uverbs_ex_query_device_resp resp = {}; struct mlx4_clock_params clock_params; if (uhw->inlen) { if (uhw->inlen < sizeof(cmd)) return -EINVAL; err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd)); if (err) return err; if (cmd.comp_mask) return -EINVAL; if (cmd.reserved) return -EINVAL; } resp.response_length = offsetof(typeof(resp), response_length) + sizeof(resp.response_length); in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); err = -ENOMEM; if (!in_mad || !out_mad) goto out; ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; memset(props, 0, sizeof *props); have_ib_ports = num_ib_ports(dev->dev); props->fw_ver = dev->dev->caps.fw_ver; props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN; props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; if (dev->dev->caps.max_gso_sz && (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) props->kernel_cap_flags |= IBK_UD_TSO; if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY; if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) && (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) props->device_cap_flags |= IB_DEVICE_XRC; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW) props->device_cap_flags |= IB_DEVICE_MEM_WINDOW; if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B) props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; else props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; } if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; props->vendor_part_id = dev->dev->persist->pdev->device; props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&props->sys_image_guid, out_mad->data + 4, 8); props->max_mr_size = ~0ull; props->page_size_cap = dev->dev->caps.page_size_cap; props->max_qp = dev->dev->quotas.qp; props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; props->max_send_sge = min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); props->max_recv_sge = min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); props->max_sge_rd = MLX4_MAX_SGE_RD; props->max_cq = dev->dev->quotas.cq; props->max_cqe = dev->dev->caps.max_cqes; props->max_mr = dev->dev->quotas.mpt; props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds; props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma; props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq = dev->dev->quotas.srq; props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; props->max_srq_sge = dev->dev->caps.max_srq_sge; props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES; props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->masked_atomic_cap = props->atomic_cap; props->max_pkeys = dev->dev->caps.pkey_table_len[1]; props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; props->timestamp_mask = 0xFFFFFFFFFFFFULL; props->max_ah = INT_MAX; if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET || mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) { if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) { props->rss_caps.max_rwq_indirection_tables = props->max_qp; props->rss_caps.max_rwq_indirection_table_size = dev->dev->caps.max_rss_tbl_sz; props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; props->max_wq_type_rq = props->max_qp; } if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS; } props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT; props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD; if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { resp.response_length += sizeof(resp.hca_core_clock_offset); if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) { resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET; resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; } } if (uhw->outlen >= resp.response_length + sizeof(resp.max_inl_recv_sz)) { resp.response_length += sizeof(resp.max_inl_recv_sz); resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg * sizeof(struct mlx4_wqe_data_seg); } if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) { if (props->rss_caps.supported_qpts) { resp.rss_caps.rx_hash_function = MLX4_IB_RX_HASH_FUNC_TOEPLITZ; resp.rss_caps.rx_hash_fields_mask = MLX4_IB_RX_HASH_SRC_IPV4 | MLX4_IB_RX_HASH_DST_IPV4 | MLX4_IB_RX_HASH_SRC_IPV6 | MLX4_IB_RX_HASH_DST_IPV6 | MLX4_IB_RX_HASH_SRC_PORT_TCP | MLX4_IB_RX_HASH_DST_PORT_TCP | MLX4_IB_RX_HASH_SRC_PORT_UDP | MLX4_IB_RX_HASH_DST_PORT_UDP; if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) resp.rss_caps.rx_hash_fields_mask |= MLX4_IB_RX_HASH_INNER; } resp.response_length = offsetof(typeof(resp), rss_caps) + sizeof(resp.rss_caps); } if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) { if (dev->dev->caps.max_gso_sz && ((mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) || (mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET))) { resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz; resp.tso_caps.supported_qpts |= 1 << IB_QPT_RAW_PACKET; } resp.response_length = offsetof(typeof(resp), tso_caps) + sizeof(resp.tso_caps); } if (uhw->outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); if (err) goto out; } out: kfree(in_mad); kfree(out_mad); return err; } static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num) { struct mlx4_dev *dev = to_mdev(device)->dev; return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ? IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; } static int ib_link_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props, int netw_view) { struct ib_smp *in_mad; struct ib_smp *out_mad; int ext_active_speed; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in_mad, out_mad); if (err) goto out; props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); props->lmc = out_mad->data[34] & 0x7; props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); props->sm_sl = out_mad->data[36] & 0xf; props->state = out_mad->data[32] & 0xf; props->phys_state = out_mad->data[33] >> 4; props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); if (netw_view) props->gid_tbl_len = out_mad->data[50]; else props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; props->active_speed = out_mad->data[35] >> 4; props->max_mtu = out_mad->data[41] & 0xf; props->active_mtu = out_mad->data[36] >> 4; props->subnet_timeout = out_mad->data[51] & 0x1f; props->max_vl_num = out_mad->data[37] >> 4; props->init_type_reply = out_mad->data[41] >> 4; /* Check if extended speeds (EDR/FDR/...) are supported */ if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { ext_active_speed = out_mad->data[62] >> 4; switch (ext_active_speed) { case 1: props->active_speed = IB_SPEED_FDR; break; case 2: props->active_speed = IB_SPEED_EDR; break; } } /* If reported active speed is QDR, check if is FDR-10 */ if (props->active_speed == IB_SPEED_QDR) { ib_init_query_mad(in_mad); in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in_mad, out_mad); if (err) goto out; /* Checking LinkSpeedActive for FDR-10 */ if (out_mad->data[15] & 0x1) props->active_speed = IB_SPEED_FDR10; } /* Avoid wrong speed value returned by FW if the IB link is down. */ if (props->state == IB_PORT_DOWN) props->active_speed = IB_SPEED_SDR; out: kfree(in_mad); kfree(out_mad); return err; } static u8 state_to_phys_state(enum ib_port_state state) { return state == IB_PORT_ACTIVE ? IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; } static int eth_link_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { struct mlx4_ib_dev *mdev = to_mdev(ibdev); struct mlx4_ib_iboe *iboe = &mdev->iboe; struct net_device *ndev; enum ib_mtu tmp; struct mlx4_cmd_mailbox *mailbox; int err = 0; int is_bonded = mlx4_is_bonded(mdev->dev); mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); if (err) goto out; props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) || (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? IB_WIDTH_4X : IB_WIDTH_1X; props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? IB_SPEED_FDR : IB_SPEED_QDR; props->port_cap_flags = IB_PORT_CM_SUP; props->ip_gids = true; props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; props->max_msg_sz = mdev->dev->caps.max_msg_sz; if (mdev->dev->caps.pkey_table_len[port]) props->pkey_tbl_len = 1; props->max_mtu = IB_MTU_4096; props->max_vl_num = 2; props->state = IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); props->active_mtu = IB_MTU_256; spin_lock_bh(&iboe->lock); ndev = iboe->netdevs[port - 1]; if (ndev && is_bonded) { rcu_read_lock(); /* required to get upper dev */ ndev = netdev_master_upper_dev_get_rcu(ndev); rcu_read_unlock(); } if (!ndev) goto out_unlock; tmp = iboe_get_mtu(ndev->mtu); props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); out_unlock: spin_unlock_bh(&iboe->lock); out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); return err; } int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props, int netw_view) { int err; /* props being zeroed by the caller, avoid zeroing it here */ err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? ib_link_query_port(ibdev, port, props, netw_view) : eth_link_query_port(ibdev, port, props); return err; } static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { /* returns host view */ return __mlx4_ib_query_port(ibdev, port, props, 0); } int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid, int netw_view) { struct ib_smp *in_mad; struct ib_smp *out_mad; int err = -ENOMEM; struct mlx4_ib_dev *dev = to_mdev(ibdev); int clear = 0; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); if (mlx4_is_mfunc(dev->dev) && netw_view) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(gid->raw, out_mad->data + 8, 8); if (mlx4_is_mfunc(dev->dev) && !netw_view) { if (index) { /* For any index > 0, return the null guid */ err = 0; clear = 1; goto out; } } ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); out: if (clear) memset(gid->raw + 8, 0, 8); kfree(in_mad); kfree(out_mad); return err; } static int mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid) { if (rdma_protocol_ib(ibdev, port)) return __mlx4_ib_query_gid(ibdev, port, index, gid, 0); return 0; } static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port, u64 *sl2vl_tbl) { union sl2vl_tbl_to_u64 sl2vl64; struct ib_smp *in_mad; struct ib_smp *out_mad; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM; int jj; if (mlx4_is_slave(to_mdev(ibdev)->dev)) { *sl2vl_tbl = 0; return 0; } in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); if (!in_mad || !out_mad) goto out; ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE; in_mad->attr_mod = 0; if (mlx4_is_mfunc(to_mdev(ibdev)->dev)) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in_mad, out_mad); if (err) goto out; for (jj = 0; jj < 8; jj++) sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj]; *sl2vl_tbl = sl2vl64.sl64; out: kfree(in_mad); kfree(out_mad); return err; } static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev) { u64 sl2vl; int i; int err; for (i = 1; i <= mdev->dev->caps.num_ports; i++) { if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) continue; err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl); if (err) { pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n", i, err); sl2vl = 0; } atomic64_set(&mdev->sl2vl[i - 1], sl2vl); } } int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey, int netw_view) { struct ib_smp *in_mad; struct ib_smp *out_mad; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in_mad, out_mad); if (err) goto out; *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); out: kfree(in_mad); kfree(out_mad); return err; } static int mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) { return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0); } static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) { struct mlx4_cmd_mailbox *mailbox; unsigned long flags; if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) return -EOPNOTSUPP; if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) return 0; if (mlx4_is_slave(to_mdev(ibdev)->dev)) return -EOPNOTSUPP; spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags); /* * If possible, pass node desc to FW, so it can generate * a 144 trap. If cmd fails, just ignore. */ mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev); if (IS_ERR(mailbox)) return 0; memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX); mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox); return 0; } static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u32 port, int reset_qkey_viols, u32 cap_mask) { struct mlx4_cmd_mailbox *mailbox; int err; mailbox = mlx4_alloc_cmd_mailbox(dev->dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); } else { ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols; ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); } err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(dev->dev, mailbox); return err; } static int mlx4_ib_modify_port(struct ib_device *ibdev, u32 port, int mask, struct ib_port_modify *props) { struct mlx4_ib_dev *mdev = to_mdev(ibdev); u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; struct ib_port_attr attr; u32 cap_mask; int err; /* return OK if this is RoCE. CM calls ib_modify_port() regardless * of whether port link layer is ETH or IB. For ETH ports, qkey * violations and port capabilities are not meaningful. */ if (is_eth) return 0; mutex_lock(&mdev->cap_mask_mutex); err = ib_query_port(ibdev, port, &attr); if (err) goto out; cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & ~props->clr_port_cap_mask; err = mlx4_ib_SET_PORT(mdev, port, !!(mask & IB_PORT_RESET_QKEY_CNTR), cap_mask); out: mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); return err; } static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { struct ib_device *ibdev = uctx->device; struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_ucontext *context = to_mucontext(uctx); struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3; struct mlx4_ib_alloc_ucontext_resp resp; int err; if (!dev->ib_active) return -EAGAIN; if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) { resp_v3.qp_tab_size = dev->dev->caps.num_qps; resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size; resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; } else { resp.dev_caps = dev->dev->caps.userspace_caps; resp.qp_tab_size = dev->dev->caps.num_qps; resp.bf_reg_size = dev->dev->caps.bf_reg_size; resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; resp.cqe_size = dev->dev->caps.cqe_size; } err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); if (err) return err; INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); INIT_LIST_HEAD(&context->wqn_ranges_list); mutex_init(&context->wqn_ranges_mutex); if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3)); else err = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (err) { mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); return -EFAULT; } return err; } static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar); } static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) { } static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct mlx4_ib_dev *dev = to_mdev(context->device); switch (vma->vm_pgoff) { case 0: return rdma_user_mmap_io(context, vma, to_mucontext(context)->uar.pfn, PAGE_SIZE, pgprot_noncached(vma->vm_page_prot), NULL); case 1: if (dev->dev->caps.bf_reg_size == 0) return -EINVAL; return rdma_user_mmap_io( context, vma, to_mucontext(context)->uar.pfn + dev->dev->caps.num_uars, PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot), NULL); case 3: { struct mlx4_clock_params params; int ret; ret = mlx4_get_internal_clock_params(dev->dev, &params); if (ret) return ret; return rdma_user_mmap_io( context, vma, (pci_resource_start(dev->dev->persist->pdev, params.bar) + params.offset) >> PAGE_SHIFT, PAGE_SIZE, pgprot_noncached(vma->vm_page_prot), NULL); } default: return -EINVAL; } } static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct mlx4_ib_pd *pd = to_mpd(ibpd); struct ib_device *ibdev = ibpd->device; int err; err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); if (err) return err; if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); return -EFAULT; } return 0; } static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); return 0; } static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device); struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd); struct ib_cq_init_attr cq_attr = {}; int err; if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) return -EOPNOTSUPP; err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn); if (err) return err; xrcd->pd = ib_alloc_pd(ibxrcd->device, 0); if (IS_ERR(xrcd->pd)) { err = PTR_ERR(xrcd->pd); goto err2; } cq_attr.cqe = 1; xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr); if (IS_ERR(xrcd->cq)) { err = PTR_ERR(xrcd->cq); goto err3; } return 0; err3: ib_dealloc_pd(xrcd->pd); err2: mlx4_xrcd_free(dev->dev, xrcd->xrcdn); return err; } static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) { ib_destroy_cq(to_mxrcd(xrcd)->cq); ib_dealloc_pd(to_mxrcd(xrcd)->pd); mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn); return 0; } static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) { struct mlx4_ib_qp *mqp = to_mqp(ibqp); struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); struct mlx4_ib_gid_entry *ge; ge = kzalloc(sizeof *ge, GFP_KERNEL); if (!ge) return -ENOMEM; ge->gid = *gid; if (mlx4_ib_add_mc(mdev, mqp, gid)) { ge->port = mqp->port; ge->added = 1; } mutex_lock(&mqp->mutex); list_add_tail(&ge->list, &mqp->gid_list); mutex_unlock(&mqp->mutex); return 0; } static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev, struct mlx4_ib_counters *ctr_table) { struct counter_index *counter, *tmp_count; mutex_lock(&ctr_table->mutex); list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list, list) { if (counter->allocated) mlx4_counter_free(ibdev->dev, counter->index); list_del(&counter->list); kfree(counter); } mutex_unlock(&ctr_table->mutex); } int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, union ib_gid *gid) { struct net_device *ndev; int ret = 0; if (!mqp->port) return 0; spin_lock_bh(&mdev->iboe.lock); ndev = mdev->iboe.netdevs[mqp->port - 1]; dev_hold(ndev); spin_unlock_bh(&mdev->iboe.lock); if (ndev) { ret = 1; dev_put(ndev); } return ret; } struct mlx4_ib_steering { struct list_head list; struct mlx4_flow_reg_id reg_id; union ib_gid gid; }; #define LAST_ETH_FIELD vlan_tag #define LAST_IB_FIELD sl #define LAST_IPV4_FIELD dst_ip #define LAST_TCP_UDP_FIELD src_port /* Field is the last supported field */ #define FIELDS_NOT_SUPPORTED(filter, field)\ memchr_inv((void *)&filter.field +\ sizeof(filter.field), 0,\ sizeof(filter) -\ offsetof(typeof(filter), field) -\ sizeof(filter.field)) static int parse_flow_attr(struct mlx4_dev *dev, u32 qp_num, union ib_flow_spec *ib_spec, struct _rule_hw *mlx4_spec) { enum mlx4_net_trans_rule_id type; switch (ib_spec->type) { case IB_FLOW_SPEC_ETH: if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) return -ENOTSUPP; type = MLX4_NET_TRANS_RULE_ID_ETH; memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac, ETH_ALEN); memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac, ETH_ALEN); mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag; mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag; break; case IB_FLOW_SPEC_IB: if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD)) return -ENOTSUPP; type = MLX4_NET_TRANS_RULE_ID_IB; mlx4_spec->ib.l3_qpn = cpu_to_be32(qp_num); mlx4_spec->ib.qpn_mask = cpu_to_be32(MLX4_IB_FLOW_QPN_MASK); break; case IB_FLOW_SPEC_IPV4: if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) return -ENOTSUPP; type = MLX4_NET_TRANS_RULE_ID_IPV4; mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip; mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip; mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip; mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip; break; case IB_FLOW_SPEC_TCP: case IB_FLOW_SPEC_UDP: if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD)) return -ENOTSUPP; type = ib_spec->type == IB_FLOW_SPEC_TCP ? MLX4_NET_TRANS_RULE_ID_TCP : MLX4_NET_TRANS_RULE_ID_UDP; mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port; mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port; mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port; mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port; break; default: return -EINVAL; } if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 || mlx4_hw_rule_sz(dev, type) < 0) return -EINVAL; mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type)); mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2; return mlx4_hw_rule_sz(dev, type); } struct default_rules { __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS]; __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS]; __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS]; __u8 link_layer; }; static const struct default_rules default_table[] = { { .mandatory_fields = {IB_FLOW_SPEC_IPV4}, .mandatory_not_fields = {IB_FLOW_SPEC_ETH}, .rules_create_list = {IB_FLOW_SPEC_IB}, .link_layer = IB_LINK_LAYER_INFINIBAND } }; static int __mlx4_ib_default_rules_match(struct ib_qp *qp, struct ib_flow_attr *flow_attr) { int i, j, k; void *ib_flow; const struct default_rules *pdefault_rules = default_table; u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) { __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS]; memset(&field_types, 0, sizeof(field_types)); if (link_layer != pdefault_rules->link_layer) continue; ib_flow = flow_attr + 1; /* we assume the specs are sorted */ for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS && j < flow_attr->num_of_specs; k++) { union ib_flow_spec *current_flow = (union ib_flow_spec *)ib_flow; /* same layer but different type */ if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) == (pdefault_rules->mandatory_fields[k] & IB_FLOW_SPEC_LAYER_MASK)) && (current_flow->type != pdefault_rules->mandatory_fields[k])) goto out; /* same layer, try match next one */ if (current_flow->type == pdefault_rules->mandatory_fields[k]) { j++; ib_flow += ((union ib_flow_spec *)ib_flow)->size; } } ib_flow = flow_attr + 1; for (j = 0; j < flow_attr->num_of_specs; j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size) for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++) /* same layer and same type */ if (((union ib_flow_spec *)ib_flow)->type == pdefault_rules->mandatory_not_fields[k]) goto out; return i; } out: return -1; } static int __mlx4_ib_create_default_rules( struct mlx4_ib_dev *mdev, struct ib_qp *qp, const struct default_rules *pdefault_rules, struct _rule_hw *mlx4_spec) { int size = 0; int i; for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { union ib_flow_spec ib_spec = {}; int ret; switch (pdefault_rules->rules_create_list[i]) { case 0: /* no rule */ continue; case IB_FLOW_SPEC_IB: ib_spec.type = IB_FLOW_SPEC_IB; ib_spec.size = sizeof(struct ib_flow_spec_ib); break; default: /* invalid rule */ return -EINVAL; } /* We must put empty rule, qpn is being ignored */ ret = parse_flow_attr(mdev->dev, 0, &ib_spec, mlx4_spec); if (ret < 0) { pr_info("invalid parsing\n"); return -EINVAL; } mlx4_spec = (void *)mlx4_spec + ret; size += ret; } return size; } static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, int domain, enum mlx4_net_trans_promisc_mode flow_type, u64 *reg_id) { int ret, i; int size = 0; void *ib_flow; struct mlx4_ib_dev *mdev = to_mdev(qp->device); struct mlx4_cmd_mailbox *mailbox; struct mlx4_net_trans_rule_hw_ctrl *ctrl; int default_flow; if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) { pr_err("Invalid priority value %d\n", flow_attr->priority); return -EINVAL; } if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0) return -EINVAL; mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); ctrl = mailbox->buf; ctrl->prio = cpu_to_be16(domain | flow_attr->priority); ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type); ctrl->port = flow_attr->port; ctrl->qpn = cpu_to_be32(qp->qp_num); ib_flow = flow_attr + 1; size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); /* Add default flows */ default_flow = __mlx4_ib_default_rules_match(qp, flow_attr); if (default_flow >= 0) { ret = __mlx4_ib_create_default_rules( mdev, qp, default_table + default_flow, mailbox->buf + size); if (ret < 0) { mlx4_free_cmd_mailbox(mdev->dev, mailbox); return -EINVAL; } size += ret; } for (i = 0; i < flow_attr->num_of_specs; i++) { ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow, mailbox->buf + size); if (ret < 0) { mlx4_free_cmd_mailbox(mdev->dev, mailbox); return -EINVAL; } ib_flow += ((union ib_flow_spec *) ib_flow)->size; size += ret; } if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR && flow_attr->num_of_specs == 1) { struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1); enum ib_flow_spec_type header_spec = ((union ib_flow_spec *)(flow_attr + 1))->type; if (header_spec == IB_FLOW_SPEC_ETH) mlx4_handle_eth_header_mcast_prio(ctrl, rule_header); } ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (ret == -ENOMEM) pr_err("mcg table is full. Fail to register network rule.\n"); else if (ret == -ENXIO) pr_err("Device managed flow steering is disabled. Fail to register network rule.\n"); else if (ret) pr_err("Invalid argument. Fail to register network rule.\n"); mlx4_free_cmd_mailbox(mdev->dev, mailbox); return ret; } static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) { int err; err = mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) pr_err("Fail to detach network rule. registration id = 0x%llx\n", reg_id); return err; } static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, u64 *reg_id) { void *ib_flow; union ib_flow_spec *ib_spec; struct mlx4_dev *dev = to_mdev(qp->device)->dev; int err = 0; if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) return 0; /* do nothing */ ib_flow = flow_attr + 1; ib_spec = (union ib_flow_spec *)ib_flow; if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1) return 0; /* do nothing */ err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac, flow_attr->port, qp->qp_num, MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff), reg_id); return err; } static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev, struct ib_flow_attr *flow_attr, enum mlx4_net_trans_promisc_mode *type) { int err = 0; if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) || (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) || (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) { return -EOPNOTSUPP; } if (flow_attr->num_of_specs == 0) { type[0] = MLX4_FS_MC_SNIFFER; type[1] = MLX4_FS_UC_SNIFFER; } else { union ib_flow_spec *ib_spec; ib_spec = (union ib_flow_spec *)(flow_attr + 1); if (ib_spec->type != IB_FLOW_SPEC_ETH) return -EINVAL; /* if all is zero than MC and UC */ if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) { type[0] = MLX4_FS_MC_SNIFFER; type[1] = MLX4_FS_UC_SNIFFER; } else { u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01, ib_spec->eth.mask.dst_mac[1], ib_spec->eth.mask.dst_mac[2], ib_spec->eth.mask.dst_mac[3], ib_spec->eth.mask.dst_mac[4], ib_spec->eth.mask.dst_mac[5]}; /* Above xor was only on MC bit, non empty mask is valid * only if this bit is set and rest are zero. */ if (!is_zero_ether_addr(&mac[0])) return -EINVAL; if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac)) type[0] = MLX4_FS_MC_SNIFFER; else type[0] = MLX4_FS_UC_SNIFFER; } } return err; } static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, struct ib_udata *udata) { int err = 0, i = 0, j = 0; struct mlx4_ib_flow *mflow; enum mlx4_net_trans_promisc_mode type[2]; struct mlx4_dev *dev = (to_mdev(qp->device))->dev; int is_bonded = mlx4_is_bonded(dev); if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP) return ERR_PTR(-EOPNOTSUPP); if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && (flow_attr->type != IB_FLOW_ATTR_NORMAL)) return ERR_PTR(-EOPNOTSUPP); if (udata && udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) return ERR_PTR(-EOPNOTSUPP); memset(type, 0, sizeof(type)); mflow = kzalloc(sizeof(*mflow), GFP_KERNEL); if (!mflow) { err = -ENOMEM; goto err_free; } switch (flow_attr->type) { case IB_FLOW_ATTR_NORMAL: /* If dont trap flag (continue match) is set, under specific * condition traffic be replicated to given qp, * without stealing it */ if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) { err = mlx4_ib_add_dont_trap_rule(dev, flow_attr, type); if (err) goto err_free; } else { type[0] = MLX4_FS_REGULAR; } break; case IB_FLOW_ATTR_ALL_DEFAULT: type[0] = MLX4_FS_ALL_DEFAULT; break; case IB_FLOW_ATTR_MC_DEFAULT: type[0] = MLX4_FS_MC_DEFAULT; break; case IB_FLOW_ATTR_SNIFFER: type[0] = MLX4_FS_MIRROR_RX_PORT; type[1] = MLX4_FS_MIRROR_SX_PORT; break; default: err = -EINVAL; goto err_free; } while (i < ARRAY_SIZE(type) && type[i]) { err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS, type[i], &mflow->reg_id[i].id); if (err) goto err_create_flow; if (is_bonded) { /* Application always sees one port so the mirror rule * must be on port #2 */ flow_attr->port = 2; err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS, type[j], &mflow->reg_id[j].mirror); flow_attr->port = 1; if (err) goto err_create_flow; j++; } i++; } if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i].id); if (err) goto err_create_flow; if (is_bonded) { flow_attr->port = 2; err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[j].mirror); flow_attr->port = 1; if (err) goto err_create_flow; j++; } /* function to create mirror rule */ i++; } return &mflow->ibflow; err_create_flow: while (i) { (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i].id); i--; } while (j) { (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[j].mirror); j--; } err_free: kfree(mflow); return ERR_PTR(err); } static int mlx4_ib_destroy_flow(struct ib_flow *flow_id) { int err, ret = 0; int i = 0; struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); struct mlx4_ib_flow *mflow = to_mflow(flow_id); while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) { err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id); if (err) ret = err; if (mflow->reg_id[i].mirror) { err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].mirror); if (err) ret = err; } i++; } kfree(mflow); return ret; } static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { int err; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); struct mlx4_dev *dev = mdev->dev; struct mlx4_ib_qp *mqp = to_mqp(ibqp); struct mlx4_ib_steering *ib_steering = NULL; enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; struct mlx4_flow_reg_id reg_id; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL); if (!ib_steering) return -ENOMEM; } err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), prot, &reg_id.id); if (err) { pr_err("multicast attach op failed, err %d\n", err); goto err_malloc; } reg_id.mirror = 0; if (mlx4_is_bonded(dev)) { err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, (mqp->port == 1) ? 2 : 1, !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), prot, &reg_id.mirror); if (err) goto err_add; } err = add_gid_entry(ibqp, gid); if (err) goto err_add; if (ib_steering) { memcpy(ib_steering->gid.raw, gid->raw, 16); ib_steering->reg_id = reg_id; mutex_lock(&mqp->mutex); list_add(&ib_steering->list, &mqp->steering_rules); mutex_unlock(&mqp->mutex); } return 0; err_add: mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, prot, reg_id.id); if (reg_id.mirror) mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, prot, reg_id.mirror); err_malloc: kfree(ib_steering); return err; } static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) { struct mlx4_ib_gid_entry *ge; struct mlx4_ib_gid_entry *tmp; struct mlx4_ib_gid_entry *ret = NULL; list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { if (!memcmp(raw, ge->gid.raw, 16)) { ret = ge; break; } } return ret; } static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { int err; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); struct mlx4_dev *dev = mdev->dev; struct mlx4_ib_qp *mqp = to_mqp(ibqp); struct net_device *ndev; struct mlx4_ib_gid_entry *ge; struct mlx4_flow_reg_id reg_id = {0, 0}; enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { struct mlx4_ib_steering *ib_steering; mutex_lock(&mqp->mutex); list_for_each_entry(ib_steering, &mqp->steering_rules, list) { if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) { list_del(&ib_steering->list); break; } } mutex_unlock(&mqp->mutex); if (&ib_steering->list == &mqp->steering_rules) { pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n"); return -EINVAL; } reg_id = ib_steering->reg_id; kfree(ib_steering); } err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, prot, reg_id.id); if (err) return err; if (mlx4_is_bonded(dev)) { err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, prot, reg_id.mirror); if (err) return err; } mutex_lock(&mqp->mutex); ge = find_gid_entry(mqp, gid->raw); if (ge) { spin_lock_bh(&mdev->iboe.lock); ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; dev_hold(ndev); spin_unlock_bh(&mdev->iboe.lock); dev_put(ndev); list_del(&ge->list); kfree(ge); } else pr_warn("could not find mgid entry\n"); mutex_unlock(&mqp->mutex); return 0; } static int init_node_data(struct mlx4_ib_dev *dev) { struct ib_smp *in_mad; struct ib_smp *out_mad; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; if (mlx4_is_master(dev->dev)) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); out: kfree(in_mad); kfree(out_mad); return err; } static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx4_ib_dev *dev = rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); return sysfs_emit(buf, "MT%d\n", dev->dev->persist->pdev->device); } static DEVICE_ATTR_RO(hca_type); static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx4_ib_dev *dev = rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); return sysfs_emit(buf, "%x\n", dev->dev->rev_id); } static DEVICE_ATTR_RO(hw_rev); static ssize_t board_id_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx4_ib_dev *dev = rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); return sysfs_emit(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id); } static DEVICE_ATTR_RO(board_id); static struct attribute *mlx4_class_attributes[] = { &dev_attr_hw_rev.attr, &dev_attr_hca_type.attr, &dev_attr_board_id.attr, NULL }; static const struct attribute_group mlx4_attr_group = { .attrs = mlx4_class_attributes, }; struct diag_counter { const char *name; u32 offset; }; #define DIAG_COUNTER(_name, _offset) \ { .name = #_name, .offset = _offset } static const struct diag_counter diag_basic[] = { DIAG_COUNTER(rq_num_lle, 0x00), DIAG_COUNTER(sq_num_lle, 0x04), DIAG_COUNTER(rq_num_lqpoe, 0x08), DIAG_COUNTER(sq_num_lqpoe, 0x0C), DIAG_COUNTER(rq_num_lpe, 0x18), DIAG_COUNTER(sq_num_lpe, 0x1C), DIAG_COUNTER(rq_num_wrfe, 0x20), DIAG_COUNTER(sq_num_wrfe, 0x24), DIAG_COUNTER(sq_num_mwbe, 0x2C), DIAG_COUNTER(sq_num_bre, 0x34), DIAG_COUNTER(sq_num_rire, 0x44), DIAG_COUNTER(rq_num_rire, 0x48), DIAG_COUNTER(sq_num_rae, 0x4C), DIAG_COUNTER(rq_num_rae, 0x50), DIAG_COUNTER(sq_num_roe, 0x54), DIAG_COUNTER(sq_num_tree, 0x5C), DIAG_COUNTER(sq_num_rree, 0x64), DIAG_COUNTER(rq_num_rnr, 0x68), DIAG_COUNTER(sq_num_rnr, 0x6C), DIAG_COUNTER(rq_num_oos, 0x100), DIAG_COUNTER(sq_num_oos, 0x104), }; static const struct diag_counter diag_ext[] = { DIAG_COUNTER(rq_num_dup, 0x130), DIAG_COUNTER(sq_num_to, 0x134), }; static const struct diag_counter diag_device_only[] = { DIAG_COUNTER(num_cqovf, 0x1A0), DIAG_COUNTER(rq_num_udsdprd, 0x118), }; static struct rdma_hw_stats * mlx4_ib_alloc_hw_device_stats(struct ib_device *ibdev) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_diag_counters *diag = dev->diag_counters; if (!diag[0].descs) return NULL; return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); } static struct rdma_hw_stats * mlx4_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_diag_counters *diag = dev->diag_counters; if (!diag[1].descs) return NULL; return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); } static int mlx4_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port, int index) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_diag_counters *diag = dev->diag_counters; u32 hw_value[ARRAY_SIZE(diag_device_only) + ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {}; int ret; int i; ret = mlx4_query_diag_counters(dev->dev, MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS, diag[!!port].offset, hw_value, diag[!!port].num_counters, port); if (ret) return ret; for (i = 0; i < diag[!!port].num_counters; i++) stats->value[i] = hw_value[i]; return diag[!!port].num_counters; } static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev, struct rdma_stat_desc **pdescs, u32 **offset, u32 *num, bool port) { u32 num_counters; num_counters = ARRAY_SIZE(diag_basic); if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) num_counters += ARRAY_SIZE(diag_ext); if (!port) num_counters += ARRAY_SIZE(diag_device_only); *pdescs = kcalloc(num_counters, sizeof(struct rdma_stat_desc), GFP_KERNEL); if (!*pdescs) return -ENOMEM; *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL); if (!*offset) goto err; *num = num_counters; return 0; err: kfree(*pdescs); return -ENOMEM; } static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev, struct rdma_stat_desc *descs, u32 *offset, bool port) { int i; int j; for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) { descs[i].name = diag_basic[i].name; offset[i] = diag_basic[i].offset; } if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) { for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) { descs[j].name = diag_ext[i].name; offset[j] = diag_ext[i].offset; } } if (!port) { for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) { descs[j].name = diag_device_only[i].name; offset[j] = diag_device_only[i].offset; } } } static const struct ib_device_ops mlx4_ib_hw_stats_ops = { .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats, .alloc_hw_port_stats = mlx4_ib_alloc_hw_port_stats, .get_hw_stats = mlx4_ib_get_hw_stats, }; static const struct ib_device_ops mlx4_ib_hw_stats_ops1 = { .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats, .get_hw_stats = mlx4_ib_get_hw_stats, }; static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) { struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; int i; int ret; bool per_port = !!(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT); if (mlx4_is_slave(ibdev->dev)) return 0; for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { /* * i == 1 means we are building port counters, set a different * stats ops without port stats callback. */ if (i && !per_port) { ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops1); return 0; } ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs, &diag[i].offset, &diag[i].num_counters, i); if (ret) goto err_alloc; mlx4_ib_fill_diag_counters(ibdev, diag[i].descs, diag[i].offset, i); } ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops); return 0; err_alloc: if (i) { kfree(diag[i - 1].descs); kfree(diag[i - 1].offset); } return ret; } static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev) { int i; for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { kfree(ibdev->diag_counters[i].offset); kfree(ibdev->diag_counters[i].descs); } } #define MLX4_IB_INVALID_MAC ((u64)-1) static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, struct net_device *dev, int port) { u64 new_smac = 0; u64 release_mac = MLX4_IB_INVALID_MAC; struct mlx4_ib_qp *qp; new_smac = ether_addr_to_u64(dev->dev_addr); atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); /* no need for update QP1 and mac registration in non-SRIOV */ if (!mlx4_is_mfunc(ibdev->dev)) return; mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); qp = ibdev->qp1_proxy[port - 1]; if (qp) { int new_smac_index; u64 old_smac; struct mlx4_update_qp_params update_params; mutex_lock(&qp->mutex); old_smac = qp->pri.smac; if (new_smac == old_smac) goto unlock; new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); if (new_smac_index < 0) goto unlock; update_params.smac_index = new_smac_index; if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, &update_params)) { release_mac = new_smac; goto unlock; } /* if old port was zero, no mac was yet registered for this QP */ if (qp->pri.smac_port) release_mac = old_smac; qp->pri.smac = new_smac; qp->pri.smac_port = port; qp->pri.smac_index = new_smac_index; } unlock: if (release_mac != MLX4_IB_INVALID_MAC) mlx4_unregister_mac(ibdev->dev, port, release_mac); if (qp) mutex_unlock(&qp->mutex); mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); } static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev, struct net_device *dev, unsigned long event) { struct mlx4_ib_iboe *iboe = &ibdev->iboe; ASSERT_RTNL(); if (dev->dev.parent != ibdev->ib_dev.dev.parent) return; spin_lock_bh(&iboe->lock); iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL; if (event == NETDEV_UP || event == NETDEV_DOWN) { enum ib_port_state port_state; struct ib_event ibev = { }; if (ib_get_cached_port_state(&ibdev->ib_dev, dev->dev_port + 1, &port_state)) goto iboe_out; if (event == NETDEV_UP && (port_state != IB_PORT_ACTIVE || iboe->last_port_state[dev->dev_port] != IB_PORT_DOWN)) goto iboe_out; if (event == NETDEV_DOWN && (port_state != IB_PORT_DOWN || iboe->last_port_state[dev->dev_port] != IB_PORT_ACTIVE)) goto iboe_out; iboe->last_port_state[dev->dev_port] = port_state; ibev.device = &ibdev->ib_dev; ibev.element.port_num = dev->dev_port + 1; ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; ib_dispatch_event(&ibev); } iboe_out: spin_unlock_bh(&iboe->lock); if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || event == NETDEV_UP || event == NETDEV_CHANGE) mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1); } static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct mlx4_ib_dev *ibdev; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); mlx4_ib_scan_netdev(ibdev, dev, event); return NOTIFY_DONE; } static void init_pkeys(struct mlx4_ib_dev *ibdev) { int port; int slave; int i; if (mlx4_is_master(ibdev->dev)) { for (slave = 0; slave <= ibdev->dev->persist->num_vfs; ++slave) { for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { for (i = 0; i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; ++i) { ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] = /* master has the identity virt2phys pkey mapping */ (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i : ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1; mlx4_sync_pkey_table(ibdev->dev, slave, port, i, ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]); } } } /* initialize pkey cache */ for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { for (i = 0; i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; ++i) ibdev->pkeys.phys_pkey_cache[port-1][i] = (i) ? 0 : 0xFFFF; } } } static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) { int i, j, eq = 0, total_eqs = 0; ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, sizeof(ibdev->eq_table[0]), GFP_KERNEL); if (!ibdev->eq_table) return; for (i = 1; i <= dev->caps.num_ports; i++) { for (j = 0; j < mlx4_get_eqs_per_port(dev, i); j++, total_eqs++) { if (i > 1 && mlx4_is_eq_shared(dev, total_eqs)) continue; ibdev->eq_table[eq] = total_eqs; if (!mlx4_assign_eq(dev, i, &ibdev->eq_table[eq])) eq++; else ibdev->eq_table[eq] = -1; } } for (i = eq; i < dev->caps.num_comp_vectors; ibdev->eq_table[i++] = -1) ; /* Advertise the new number of EQs to clients */ ibdev->ib_dev.num_comp_vectors = eq; } static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) { int i; int total_eqs = ibdev->ib_dev.num_comp_vectors; /* no eqs were allocated */ if (!ibdev->eq_table) return; /* Reset the advertised EQ number */ ibdev->ib_dev.num_comp_vectors = 0; for (i = 0; i < total_eqs; i++) mlx4_release_eq(dev, ibdev->eq_table[i]); kfree(ibdev->eq_table); ibdev->eq_table = NULL; } static int mlx4_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; struct mlx4_ib_dev *mdev = to_mdev(ibdev); int err; if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) { immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; immutable->max_mad_size = IB_MGMT_MAD_SIZE; } else { if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET; if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE | RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP)) immutable->max_mad_size = IB_MGMT_MAD_SIZE; } err = ib_query_port(ibdev, port_num, &attr); if (err) return err; immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; return 0; } static void get_fw_ver_str(struct ib_device *device, char *str) { struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev, ib_dev); snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d", (int) (dev->dev->caps.fw_ver >> 32), (int) (dev->dev->caps.fw_ver >> 16) & 0xffff, (int) dev->dev->caps.fw_ver & 0xffff); } static const struct ib_device_ops mlx4_ib_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_MLX4, .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION, .add_gid = mlx4_ib_add_gid, .alloc_mr = mlx4_ib_alloc_mr, .alloc_pd = mlx4_ib_alloc_pd, .alloc_ucontext = mlx4_ib_alloc_ucontext, .attach_mcast = mlx4_ib_mcg_attach, .create_ah = mlx4_ib_create_ah, .create_cq = mlx4_ib_create_cq, .create_qp = mlx4_ib_create_qp, .create_srq = mlx4_ib_create_srq, .dealloc_pd = mlx4_ib_dealloc_pd, .dealloc_ucontext = mlx4_ib_dealloc_ucontext, .del_gid = mlx4_ib_del_gid, .dereg_mr = mlx4_ib_dereg_mr, .destroy_ah = mlx4_ib_destroy_ah, .destroy_cq = mlx4_ib_destroy_cq, .destroy_qp = mlx4_ib_destroy_qp, .destroy_srq = mlx4_ib_destroy_srq, .detach_mcast = mlx4_ib_mcg_detach, .device_group = &mlx4_attr_group, .disassociate_ucontext = mlx4_ib_disassociate_ucontext, .drain_rq = mlx4_ib_drain_rq, .drain_sq = mlx4_ib_drain_sq, .get_dev_fw_str = get_fw_ver_str, .get_dma_mr = mlx4_ib_get_dma_mr, .get_link_layer = mlx4_ib_port_link_layer, .get_netdev = mlx4_ib_get_netdev, .get_port_immutable = mlx4_port_immutable, .map_mr_sg = mlx4_ib_map_mr_sg, .mmap = mlx4_ib_mmap, .modify_cq = mlx4_ib_modify_cq, .modify_device = mlx4_ib_modify_device, .modify_port = mlx4_ib_modify_port, .modify_qp = mlx4_ib_modify_qp, .modify_srq = mlx4_ib_modify_srq, .poll_cq = mlx4_ib_poll_cq, .post_recv = mlx4_ib_post_recv, .post_send = mlx4_ib_post_send, .post_srq_recv = mlx4_ib_post_srq_recv, .process_mad = mlx4_ib_process_mad, .query_ah = mlx4_ib_query_ah, .query_device = mlx4_ib_query_device, .query_gid = mlx4_ib_query_gid, .query_pkey = mlx4_ib_query_pkey, .query_port = mlx4_ib_query_port, .query_qp = mlx4_ib_query_qp, .query_srq = mlx4_ib_query_srq, .reg_user_mr = mlx4_ib_reg_user_mr, .req_notify_cq = mlx4_ib_arm_cq, .rereg_user_mr = mlx4_ib_rereg_user_mr, .resize_cq = mlx4_ib_resize_cq, INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_qp, mlx4_ib_qp, ibqp), INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext), }; static const struct ib_device_ops mlx4_ib_dev_wq_ops = { .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table, .create_wq = mlx4_ib_create_wq, .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table, .destroy_wq = mlx4_ib_destroy_wq, .modify_wq = mlx4_ib_modify_wq, INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table, ib_rwq_ind_tbl), }; static const struct ib_device_ops mlx4_ib_dev_mw_ops = { .alloc_mw = mlx4_ib_alloc_mw, .dealloc_mw = mlx4_ib_dealloc_mw, INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw), }; static const struct ib_device_ops mlx4_ib_dev_xrc_ops = { .alloc_xrcd = mlx4_ib_alloc_xrcd, .dealloc_xrcd = mlx4_ib_dealloc_xrcd, INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd), }; static const struct ib_device_ops mlx4_ib_dev_fs_ops = { .create_flow = mlx4_ib_create_flow, .destroy_flow = mlx4_ib_destroy_flow, }; static int mlx4_ib_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); struct mlx4_dev *dev = madev->mdev; struct mlx4_ib_dev *ibdev; int num_ports = 0; int i, j; int err; struct mlx4_ib_iboe *iboe; int ib_num_ports = 0; int num_req_counters; int allocated; u32 counter_index; struct counter_index *new_counter_index; pr_info_once("%s", mlx4_ib_version); num_ports = 0; mlx4_foreach_ib_transport_port(i, dev) num_ports++; /* No point in registering a device with no ports... */ if (num_ports == 0) return -ENODEV; ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev); if (!ibdev) { dev_err(&dev->persist->pdev->dev, "Device struct alloc failed\n"); return -ENOMEM; } iboe = &ibdev->iboe; err = mlx4_pd_alloc(dev, &ibdev->priv_pdn); if (err) goto err_dealloc; err = mlx4_uar_alloc(dev, &ibdev->priv_uar); if (err) goto err_pd; ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!ibdev->uar_map) { err = -ENOMEM; goto err_uar; } MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); ibdev->dev = dev; ibdev->bond_next_port = 0; ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; ibdev->num_ports = num_ports; ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops); if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) && ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) == IB_LINK_LAYER_ETHERNET) || (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) == IB_LINK_LAYER_ETHERNET))) ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops); if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops); if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops); } if (check_flow_steering_support(dev)) { ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED; ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops); } if (!dev->caps.userspace_caps) ibdev->ib_dev.ops.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION; mlx4_ib_alloc_eqs(dev, ibdev); spin_lock_init(&iboe->lock); err = init_node_data(ibdev); if (err) goto err_map; mlx4_init_sl2vl_tbl(ibdev); for (i = 0; i < ibdev->num_ports; ++i) { mutex_init(&ibdev->counters_table[i].mutex); INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list); iboe->last_port_state[i] = IB_PORT_DOWN; } num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; for (i = 0; i < num_req_counters; ++i) { mutex_init(&ibdev->qp1_proxy_lock[i]); allocated = 0; if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == IB_LINK_LAYER_ETHERNET) { err = mlx4_counter_alloc(ibdev->dev, &counter_index, MLX4_RES_USAGE_DRIVER); /* if failed to allocate a new counter, use default */ if (err) counter_index = mlx4_get_default_counter_index(dev, i + 1); else allocated = 1; } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */ counter_index = mlx4_get_default_counter_index(dev, i + 1); } new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL); if (!new_counter_index) { err = -ENOMEM; if (allocated) mlx4_counter_free(ibdev->dev, counter_index); goto err_counter; } new_counter_index->index = counter_index; new_counter_index->allocated = allocated; list_add_tail(&new_counter_index->list, &ibdev->counters_table[i].counters_list); ibdev->counters_table[i].default_counter = counter_index; pr_info("counter index %d for port %d allocated %d\n", counter_index, i + 1, allocated); } if (mlx4_is_bonded(dev)) for (i = 1; i < ibdev->num_ports ; ++i) { new_counter_index = kmalloc(sizeof(struct counter_index), GFP_KERNEL); if (!new_counter_index) { err = -ENOMEM; goto err_counter; } new_counter_index->index = counter_index; new_counter_index->allocated = 0; list_add_tail(&new_counter_index->list, &ibdev->counters_table[i].counters_list); ibdev->counters_table[i].default_counter = counter_index; } mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) ib_num_ports++; spin_lock_init(&ibdev->sm_lock); mutex_init(&ibdev->cap_mask_mutex); INIT_LIST_HEAD(&ibdev->qp_list); spin_lock_init(&ibdev->reset_flow_resource_lock); if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && ib_num_ports) { ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, MLX4_IB_UC_STEER_QPN_ALIGN, &ibdev->steer_qpn_base, 0, MLX4_RES_USAGE_DRIVER); if (err) goto err_counter; ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count, GFP_KERNEL); if (!ibdev->ib_uc_qpns_bitmap) { err = -ENOMEM; goto err_steer_qp_release; } if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) { bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count); err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE( dev, ibdev->steer_qpn_base, ibdev->steer_qpn_base + ibdev->steer_qpn_count - 1); if (err) goto err_steer_free_bitmap; } else { bitmap_fill(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count); } } for (j = 1; j <= ibdev->dev->caps.num_ports; j++) atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); err = mlx4_ib_alloc_diag_counters(ibdev); if (err) goto err_steer_free_bitmap; err = ib_register_device(&ibdev->ib_dev, "mlx4_%d", &dev->persist->pdev->dev); if (err) goto err_diag_counters; err = mlx4_ib_mad_init(ibdev); if (err) goto err_reg; err = mlx4_ib_init_sriov(ibdev); if (err) goto err_mad; if (!iboe->nb.notifier_call) { iboe->nb.notifier_call = mlx4_ib_netdev_event; err = register_netdevice_notifier(&iboe->nb); if (err) { iboe->nb.notifier_call = NULL; goto err_notif; } } if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) { err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT); if (err) goto err_notif; } ibdev->ib_active = true; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i), &ibdev->ib_dev); if (mlx4_is_mfunc(ibdev->dev)) init_pkeys(ibdev); /* create paravirt contexts for any VFs which are active */ if (mlx4_is_master(ibdev->dev)) { for (j = 0; j < MLX4_MFUNC_MAX; j++) { if (j == mlx4_master_func_num(ibdev->dev)) continue; if (mlx4_is_slave_active(ibdev->dev, j)) do_slave_init(ibdev, j, 1); } } /* register mlx4 core notifier */ ibdev->mlx_nb.notifier_call = mlx4_ib_event; err = mlx4_register_event_notifier(dev, &ibdev->mlx_nb); WARN(err, "failed to register mlx4 event notifier (%d)", err); auxiliary_set_drvdata(adev, ibdev); return 0; err_notif: if (ibdev->iboe.nb.notifier_call) { if (unregister_netdevice_notifier(&ibdev->iboe.nb)) pr_warn("failure unregistering notifier\n"); ibdev->iboe.nb.notifier_call = NULL; } flush_workqueue(wq); mlx4_ib_close_sriov(ibdev); err_mad: mlx4_ib_mad_cleanup(ibdev); err_reg: ib_unregister_device(&ibdev->ib_dev); err_diag_counters: mlx4_ib_diag_cleanup(ibdev); err_steer_free_bitmap: bitmap_free(ibdev->ib_uc_qpns_bitmap); err_steer_qp_release: mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ibdev->steer_qpn_count); err_counter: for (i = 0; i < ibdev->num_ports; ++i) mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); err_map: mlx4_ib_free_eqs(dev, ibdev); iounmap(ibdev->uar_map); err_uar: mlx4_uar_free(dev, &ibdev->priv_uar); err_pd: mlx4_pd_free(dev, ibdev->priv_pdn); err_dealloc: ib_dealloc_device(&ibdev->ib_dev); return err; } int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) { int offset; WARN_ON(!dev->ib_uc_qpns_bitmap); offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap, dev->steer_qpn_count, get_count_order(count)); if (offset < 0) return offset; *qpn = dev->steer_qpn_base + offset; return 0; } void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count) { if (!qpn || dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED) return; if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n", qpn, dev->steer_qpn_base)) /* not supposed to be here */ return; bitmap_release_region(dev->ib_uc_qpns_bitmap, qpn - dev->steer_qpn_base, get_count_order(count)); } int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, int is_attach) { int err; size_t flow_size; struct ib_flow_attr *flow; struct ib_flow_spec_ib *ib_spec; if (is_attach) { flow_size = sizeof(struct ib_flow_attr) + sizeof(struct ib_flow_spec_ib); flow = kzalloc(flow_size, GFP_KERNEL); if (!flow) return -ENOMEM; flow->port = mqp->port; flow->num_of_specs = 1; flow->size = flow_size; ib_spec = (struct ib_flow_spec_ib *)(flow + 1); ib_spec->type = IB_FLOW_SPEC_IB; ib_spec->size = sizeof(struct ib_flow_spec_ib); /* Add an empty rule for IB L2 */ memset(&ib_spec->mask, 0, sizeof(ib_spec->mask)); err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC, MLX4_FS_REGULAR, &mqp->reg_id); kfree(flow); return err; } return __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); } static void mlx4_ib_remove(struct auxiliary_device *adev) { struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); struct mlx4_dev *dev = madev->mdev; struct mlx4_ib_dev *ibdev = auxiliary_get_drvdata(adev); int p; int i; mlx4_unregister_event_notifier(dev, &ibdev->mlx_nb); mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) devlink_port_type_clear(mlx4_get_devlink_port(dev, i)); ibdev->ib_active = false; flush_workqueue(wq); if (ibdev->iboe.nb.notifier_call) { if (unregister_netdevice_notifier(&ibdev->iboe.nb)) pr_warn("failure unregistering notifier\n"); ibdev->iboe.nb.notifier_call = NULL; } mlx4_ib_close_sriov(ibdev); mlx4_ib_mad_cleanup(ibdev); ib_unregister_device(&ibdev->ib_dev); mlx4_ib_diag_cleanup(ibdev); mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ibdev->steer_qpn_count); bitmap_free(ibdev->ib_uc_qpns_bitmap); iounmap(ibdev->uar_map); for (p = 0; p < ibdev->num_ports; ++p) mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]); mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) mlx4_CLOSE_PORT(dev, p); mlx4_ib_free_eqs(dev, ibdev); mlx4_uar_free(dev, &ibdev->priv_uar); mlx4_pd_free(dev, ibdev->priv_pdn); ib_dealloc_device(&ibdev->ib_dev); } static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) { struct mlx4_ib_demux_work **dm; struct mlx4_dev *dev = ibdev->dev; int i; unsigned long flags; struct mlx4_active_ports actv_ports; unsigned int ports; unsigned int first_port; if (!mlx4_is_master(dev)) return; actv_ports = mlx4_get_active_ports(dev, slave); ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports); first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); if (!dm) return; for (i = 0; i < ports; i++) { dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); if (!dm[i]) { while (--i >= 0) kfree(dm[i]); goto out; } INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); dm[i]->port = first_port + i + 1; dm[i]->slave = slave; dm[i]->do_init = do_init; dm[i]->dev = ibdev; } /* initialize or tear down tunnel QPs for the slave */ spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); if (!ibdev->sriov.is_going_down) { for (i = 0; i < ports; i++) queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); } else { spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); for (i = 0; i < ports; i++) kfree(dm[i]); } out: kfree(dm); return; } static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev) { struct mlx4_ib_qp *mqp; unsigned long flags_qp; unsigned long flags_cq; struct mlx4_ib_cq *send_mcq, *recv_mcq; struct list_head cq_notify_list; struct mlx4_cq *mcq; unsigned long flags; pr_warn("mlx4_ib_handle_catas_error was started\n"); INIT_LIST_HEAD(&cq_notify_list); /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { spin_lock_irqsave(&mqp->sq.lock, flags_qp); if (mqp->sq.tail != mqp->sq.head) { send_mcq = to_mcq(mqp->ibqp.send_cq); spin_lock_irqsave(&send_mcq->lock, flags_cq); if (send_mcq->mcq.comp && mqp->ibqp.send_cq->comp_handler) { if (!send_mcq->mcq.reset_notify_added) { send_mcq->mcq.reset_notify_added = 1; list_add_tail(&send_mcq->mcq.reset_notify, &cq_notify_list); } } spin_unlock_irqrestore(&send_mcq->lock, flags_cq); } spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); /* Now, handle the QP's receive queue */ spin_lock_irqsave(&mqp->rq.lock, flags_qp); /* no handling is needed for SRQ */ if (!mqp->ibqp.srq) { if (mqp->rq.tail != mqp->rq.head) { recv_mcq = to_mcq(mqp->ibqp.recv_cq); spin_lock_irqsave(&recv_mcq->lock, flags_cq); if (recv_mcq->mcq.comp && mqp->ibqp.recv_cq->comp_handler) { if (!recv_mcq->mcq.reset_notify_added) { recv_mcq->mcq.reset_notify_added = 1; list_add_tail(&recv_mcq->mcq.reset_notify, &cq_notify_list); } } spin_unlock_irqrestore(&recv_mcq->lock, flags_cq); } } spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); } list_for_each_entry(mcq, &cq_notify_list, reset_notify) { mcq->comp(mcq); } spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); pr_warn("mlx4_ib_handle_catas_error ended\n"); } static void handle_bonded_port_state_event(struct work_struct *work) { struct ib_event_work *ew = container_of(work, struct ib_event_work, work); struct mlx4_ib_dev *ibdev = ew->ib_dev; enum ib_port_state bonded_port_state = IB_PORT_NOP; int i; struct ib_event ibev; kfree(ew); spin_lock_bh(&ibdev->iboe.lock); for (i = 0; i < MLX4_MAX_PORTS; ++i) { struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; enum ib_port_state curr_port_state; if (!curr_netdev) continue; curr_port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ? curr_port_state : IB_PORT_ACTIVE; } spin_unlock_bh(&ibdev->iboe.lock); ibev.device = &ibdev->ib_dev; ibev.element.port_num = 1; ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; ib_dispatch_event(&ibev); } void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port) { u64 sl2vl; int err; err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl); if (err) { pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n", port, err); sl2vl = 0; } atomic64_set(&mdev->sl2vl[port - 1], sl2vl); } static void ib_sl2vl_update_work(struct work_struct *work) { struct ib_event_work *ew = container_of(work, struct ib_event_work, work); struct mlx4_ib_dev *mdev = ew->ib_dev; int port = ew->port; mlx4_ib_sl2vl_update(mdev, port); kfree(ew); } void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev, int port) { struct ib_event_work *ew; ew = kmalloc(sizeof(*ew), GFP_ATOMIC); if (ew) { INIT_WORK(&ew->work, ib_sl2vl_update_work); ew->port = port; ew->ib_dev = ibdev; queue_work(wq, &ew->work); } } static int mlx4_ib_event(struct notifier_block *this, unsigned long event, void *param) { struct mlx4_ib_dev *ibdev = container_of(this, struct mlx4_ib_dev, mlx_nb); struct mlx4_dev *dev = ibdev->dev; struct ib_event ibev; struct mlx4_eqe *eqe = NULL; struct ib_event_work *ew; int p = 0; if (mlx4_is_bonded(dev) && ((event == MLX4_DEV_EVENT_PORT_UP) || (event == MLX4_DEV_EVENT_PORT_DOWN))) { ew = kmalloc(sizeof(*ew), GFP_ATOMIC); if (!ew) return NOTIFY_DONE; INIT_WORK(&ew->work, handle_bonded_port_state_event); ew->ib_dev = ibdev; queue_work(wq, &ew->work); return NOTIFY_DONE; } switch (event) { case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: break; case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: eqe = (struct mlx4_eqe *)param; break; default: p = *(int *)param; break; } switch (event) { case MLX4_DEV_EVENT_PORT_UP: if (p > ibdev->num_ports) return NOTIFY_DONE; if (!mlx4_is_slave(dev) && rdma_port_get_link_layer(&ibdev->ib_dev, p) == IB_LINK_LAYER_INFINIBAND) { if (mlx4_is_master(dev)) mlx4_ib_invalidate_all_guid_record(ibdev, p); if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST && !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) mlx4_sched_ib_sl2vl_update_work(ibdev, p); } ibev.event = IB_EVENT_PORT_ACTIVE; break; case MLX4_DEV_EVENT_PORT_DOWN: if (p > ibdev->num_ports) return NOTIFY_DONE; ibev.event = IB_EVENT_PORT_ERR; break; case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: ibdev->ib_active = false; ibev.event = IB_EVENT_DEVICE_FATAL; mlx4_ib_handle_catas_error(ibdev); break; case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: ew = kmalloc(sizeof *ew, GFP_ATOMIC); if (!ew) return NOTIFY_DONE; INIT_WORK(&ew->work, handle_port_mgmt_change_event); memcpy(&ew->ib_eqe, eqe, sizeof *eqe); ew->ib_dev = ibdev; /* need to queue only for port owner, which uses GEN_EQE */ if (mlx4_is_master(dev)) queue_work(wq, &ew->work); else handle_port_mgmt_change_event(&ew->work); return NOTIFY_DONE; case MLX4_DEV_EVENT_SLAVE_INIT: /* here, p is the slave id */ do_slave_init(ibdev, p, 1); if (mlx4_is_master(dev)) { int i; for (i = 1; i <= ibdev->num_ports; i++) { if (rdma_port_get_link_layer(&ibdev->ib_dev, i) == IB_LINK_LAYER_INFINIBAND) mlx4_ib_slave_alias_guid_event(ibdev, p, i, 1); } } return NOTIFY_DONE; case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: if (mlx4_is_master(dev)) { int i; for (i = 1; i <= ibdev->num_ports; i++) { if (rdma_port_get_link_layer(&ibdev->ib_dev, i) == IB_LINK_LAYER_INFINIBAND) mlx4_ib_slave_alias_guid_event(ibdev, p, i, 0); } } /* here, p is the slave id */ do_slave_init(ibdev, p, 0); return NOTIFY_DONE; default: return NOTIFY_DONE; } ibev.device = &ibdev->ib_dev; ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; ib_dispatch_event(&ibev); return NOTIFY_DONE; } static const struct auxiliary_device_id mlx4_ib_id_table[] = { { .name = MLX4_ADEV_NAME ".ib" }, {}, }; MODULE_DEVICE_TABLE(auxiliary, mlx4_ib_id_table); static struct mlx4_adrv mlx4_ib_adrv = { .adrv = { .name = "ib", .probe = mlx4_ib_probe, .remove = mlx4_ib_remove, .id_table = mlx4_ib_id_table, }, .protocol = MLX4_PROT_IB_IPV6, .flags = MLX4_INTFF_BONDING }; static int __init mlx4_ib_init(void) { int err; wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM); if (!wq) return -ENOMEM; err = mlx4_ib_qp_event_init(); if (err) goto clean_qp_event; err = mlx4_ib_cm_init(); if (err) goto clean_wq; err = mlx4_ib_mcg_init(); if (err) goto clean_cm; err = mlx4_register_auxiliary_driver(&mlx4_ib_adrv); if (err) goto clean_mcg; return 0; clean_mcg: mlx4_ib_mcg_destroy(); clean_cm: mlx4_ib_cm_destroy(); clean_wq: mlx4_ib_qp_event_cleanup(); clean_qp_event: destroy_workqueue(wq); return err; } static void __exit mlx4_ib_cleanup(void) { mlx4_unregister_auxiliary_driver(&mlx4_ib_adrv); mlx4_ib_mcg_destroy(); mlx4_ib_cm_destroy(); mlx4_ib_qp_event_cleanup(); destroy_workqueue(wq); } module_init(mlx4_ib_init); module_exit(mlx4_ib_cleanup);
linux-master
drivers/infiniband/hw/mlx4/main.c
/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mlx4/cq.h> #include <linux/mlx4/qp.h> #include <linux/mlx4/srq.h> #include <linux/slab.h> #include "mlx4_ib.h" #include <rdma/mlx4-abi.h> #include <rdma/uverbs_ioctl.h> static void mlx4_ib_cq_comp(struct mlx4_cq *cq) { struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; ibcq->comp_handler(ibcq, ibcq->cq_context); } static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) { struct ib_event event; struct ib_cq *ibcq; if (type != MLX4_EVENT_TYPE_CQ_ERROR) { pr_warn("Unexpected event type %d " "on CQ %06x\n", type, cq->cqn); return; } ibcq = &to_mibcq(cq)->ibcq; if (ibcq->event_handler) { event.device = ibcq->device; event.event = IB_EVENT_CQ_ERR; event.element.cq = ibcq; ibcq->event_handler(&event, ibcq->cq_context); } } static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) { return mlx4_buf_offset(&buf->buf, n * buf->entry_size); } static void *get_cqe(struct mlx4_ib_cq *cq, int n) { return get_cqe_from_buf(&cq->buf, n); } static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) { struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; } static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq) { return get_sw_cqe(cq, cq->mcq.cons_index); } int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) { struct mlx4_ib_cq *mcq = to_mcq(cq); struct mlx4_ib_dev *dev = to_mdev(cq->device); return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); } static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) { int err; err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, PAGE_SIZE * 2, &buf->buf); if (err) goto out; buf->entry_size = dev->dev->caps.cqe_size; err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, &buf->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); if (err) goto err_mtt; return 0; err_mtt: mlx4_mtt_cleanup(dev->dev, &buf->mtt); err_buf: mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf); out: return err; } static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) { mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); } static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) { int err; int cqe_size = dev->dev->caps.cqe_size; int shift; int n; *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(*umem)) return PTR_ERR(*umem); shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n); err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); if (err) goto err_mtt; return 0; err_mtt: mlx4_mtt_cleanup(dev->dev, &buf->mtt); err_buf: ib_umem_release(*umem); return err; } #define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { struct ib_device *ibdev = ibcq->device; int entries = attr->cqe; int vector = attr->comp_vector; struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_cq *cq = to_mcq(ibcq); struct mlx4_uar *uar; void *buf_addr; int err; struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext); if (entries < 1 || entries > dev->dev->caps.max_cqes) return -EINVAL; if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED) return -EINVAL; entries = roundup_pow_of_two(entries + 1); cq->ibcq.cqe = entries - 1; mutex_init(&cq->resize_mutex); spin_lock_init(&cq->lock); cq->resize_buf = NULL; cq->resize_umem = NULL; cq->create_flags = attr->flags; INIT_LIST_HEAD(&cq->send_qp_list); INIT_LIST_HEAD(&cq->recv_qp_list); if (udata) { struct mlx4_ib_create_cq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err_cq; } buf_addr = (void *)(unsigned long)ucmd.buf_addr; err = mlx4_ib_get_cq_umem(dev, &cq->buf, &cq->umem, ucmd.buf_addr, entries); if (err) goto err_cq; err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db); if (err) goto err_mtt; uar = &context->uar; cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS; } else { err = mlx4_db_alloc(dev->dev, &cq->db, 1); if (err) goto err_cq; cq->mcq.set_ci_db = cq->db.db; cq->mcq.arm_db = cq->db.db + 1; *cq->mcq.set_ci_db = 0; *cq->mcq.arm_db = 0; err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); if (err) goto err_db; buf_addr = &cq->buf.buf; uar = &dev->priv_uar; cq->mcq.usage = MLX4_RES_USAGE_DRIVER; } if (dev->eq_table) vector = dev->eq_table[vector % ibdev->num_comp_vectors]; err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma, &cq->mcq, vector, 0, !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION), buf_addr, !!udata); if (err) goto err_dbmap; if (udata) cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; else cq->mcq.comp = mlx4_ib_cq_comp; cq->mcq.event = mlx4_ib_cq_event; if (udata) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { err = -EFAULT; goto err_cq_free; } return 0; err_cq_free: mlx4_cq_free(dev->dev, &cq->mcq); err_dbmap: if (udata) mlx4_ib_db_unmap_user(context, &cq->db); err_mtt: mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); ib_umem_release(cq->umem); if (!udata) mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); err_db: if (!udata) mlx4_db_free(dev->dev, &cq->db); err_cq: return err; } static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, int entries) { int err; if (cq->resize_buf) return -EBUSY; cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL); if (!cq->resize_buf) return -ENOMEM; err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); if (err) { kfree(cq->resize_buf); cq->resize_buf = NULL; return err; } cq->resize_buf->cqe = entries - 1; return 0; } static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, int entries, struct ib_udata *udata) { struct mlx4_ib_resize_cq ucmd; int err; if (cq->resize_umem) return -EBUSY; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) return -EFAULT; cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL); if (!cq->resize_buf) return -ENOMEM; err = mlx4_ib_get_cq_umem(dev, &cq->resize_buf->buf, &cq->resize_umem, ucmd.buf_addr, entries); if (err) { kfree(cq->resize_buf); cq->resize_buf = NULL; return err; } cq->resize_buf->cqe = entries - 1; return 0; } static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) { u32 i; i = cq->mcq.cons_index; while (get_sw_cqe(cq, i)) ++i; return i - cq->mcq.cons_index; } static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) { struct mlx4_cqe *cqe, *new_cqe; int i; int cqe_size = cq->buf.entry_size; int cqe_inc = cqe_size == 64 ? 1 : 0; i = cq->mcq.cons_index; cqe = get_cqe(cq, i & cq->ibcq.cqe); cqe += cqe_inc; while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, (i + 1) & cq->resize_buf->cqe); memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); new_cqe += cqe_inc; new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); cqe = get_cqe(cq, ++i & cq->ibcq.cqe); cqe += cqe_inc; } ++cq->mcq.cons_index; } int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibcq->device); struct mlx4_ib_cq *cq = to_mcq(ibcq); struct mlx4_mtt mtt; int outst_cqe; int err; mutex_lock(&cq->resize_mutex); if (entries < 1 || entries > dev->dev->caps.max_cqes) { err = -EINVAL; goto out; } entries = roundup_pow_of_two(entries + 1); if (entries == ibcq->cqe + 1) { err = 0; goto out; } if (entries > dev->dev->caps.max_cqes + 1) { err = -EINVAL; goto out; } if (ibcq->uobject) { err = mlx4_alloc_resize_umem(dev, cq, entries, udata); if (err) goto out; } else { /* Can't be smaller than the number of outstanding CQEs */ outst_cqe = mlx4_ib_get_outstanding_cqes(cq); if (entries < outst_cqe + 1) { err = -EINVAL; goto out; } err = mlx4_alloc_resize_buf(dev, cq, entries); if (err) goto out; } mtt = cq->buf.mtt; err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); if (err) goto err_buf; mlx4_mtt_cleanup(dev->dev, &mtt); if (ibcq->uobject) { cq->buf = cq->resize_buf->buf; cq->ibcq.cqe = cq->resize_buf->cqe; ib_umem_release(cq->umem); cq->umem = cq->resize_umem; kfree(cq->resize_buf); cq->resize_buf = NULL; cq->resize_umem = NULL; } else { struct mlx4_ib_cq_buf tmp_buf; int tmp_cqe = 0; spin_lock_irq(&cq->lock); if (cq->resize_buf) { mlx4_ib_cq_resize_copy_cqes(cq); tmp_buf = cq->buf; tmp_cqe = cq->ibcq.cqe; cq->buf = cq->resize_buf->buf; cq->ibcq.cqe = cq->resize_buf->cqe; kfree(cq->resize_buf); cq->resize_buf = NULL; } spin_unlock_irq(&cq->lock); if (tmp_cqe) mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe); } goto out; err_buf: mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt); if (!ibcq->uobject) mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, cq->resize_buf->cqe); kfree(cq->resize_buf); cq->resize_buf = NULL; ib_umem_release(cq->resize_umem); cq->resize_umem = NULL; out: mutex_unlock(&cq->resize_mutex); return err; } int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(cq->device); struct mlx4_ib_cq *mcq = to_mcq(cq); mlx4_cq_free(dev->dev, &mcq->mcq); mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt); if (udata) { mlx4_ib_db_unmap_user( rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext), &mcq->db); } else { mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); mlx4_db_free(dev->dev, &mcq->db); } ib_umem_release(mcq->umem); return 0; } static void dump_cqe(void *cqe) { __be32 *buf = cqe; pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]), be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]), be32_to_cpu(buf[6]), be32_to_cpu(buf[7])); } static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ib_wc *wc) { if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) { pr_debug("local QP operation err " "(QPN %06x, WQE index %x, vendor syndrome %02x, " "opcode = %02x)\n", be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), cqe->vendor_err_syndrome, cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); dump_cqe(cqe); } switch (cqe->syndrome) { case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR: wc->status = IB_WC_LOC_LEN_ERR; break; case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR: wc->status = IB_WC_LOC_QP_OP_ERR; break; case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR: wc->status = IB_WC_LOC_PROT_ERR; break; case MLX4_CQE_SYNDROME_WR_FLUSH_ERR: wc->status = IB_WC_WR_FLUSH_ERR; break; case MLX4_CQE_SYNDROME_MW_BIND_ERR: wc->status = IB_WC_MW_BIND_ERR; break; case MLX4_CQE_SYNDROME_BAD_RESP_ERR: wc->status = IB_WC_BAD_RESP_ERR; break; case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR: wc->status = IB_WC_LOC_ACCESS_ERR; break; case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: wc->status = IB_WC_REM_INV_REQ_ERR; break; case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR: wc->status = IB_WC_REM_ACCESS_ERR; break; case MLX4_CQE_SYNDROME_REMOTE_OP_ERR: wc->status = IB_WC_REM_OP_ERR; break; case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: wc->status = IB_WC_RETRY_EXC_ERR; break; case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR: wc->status = IB_WC_RNR_RETRY_EXC_ERR; break; case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR: wc->status = IB_WC_REM_ABORT_ERR; break; default: wc->status = IB_WC_GENERAL_ERR; break; } wc->vendor_err = cqe->vendor_err_syndrome; } static int mlx4_ib_ipoib_csum_ok(__be16 status, u8 badfcs_enc, __be16 checksum) { return ((badfcs_enc & MLX4_CQE_STATUS_L4_CSUM) || ((status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && (status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) && (checksum == cpu_to_be16(0xffff)))); } static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, unsigned tail, struct mlx4_cqe *cqe, int is_eth) { struct mlx4_ib_proxy_sqp_hdr *hdr; ib_dma_sync_single_for_cpu(qp->ibqp.device, qp->sqp_proxy_rcv[tail].map, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index); wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF; wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0; wc->dlid_path_bits = 0; if (is_eth) { wc->slid = 0; wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); } else { wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); } } static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, struct ib_wc *wc, int *npolled, int is_send) { struct mlx4_ib_wq *wq; unsigned cur; int i; wq = is_send ? &qp->sq : &qp->rq; cur = wq->head - wq->tail; if (cur == 0) return; for (i = 0; i < cur && *npolled < num_entries; i++) { wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wc->status = IB_WC_WR_FLUSH_ERR; wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR; wq->tail++; (*npolled)++; wc->qp = &qp->ibqp; wc++; } } static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, struct ib_wc *wc, int *npolled) { struct mlx4_ib_qp *qp; *npolled = 0; /* Find uncompleted WQEs belonging to that cq and return * simulated FLUSH_ERR completions */ list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1); if (*npolled >= num_entries) goto out; } list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) { mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0); if (*npolled >= num_entries) goto out; } out: return; } static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, struct mlx4_ib_qp **cur_qp, struct ib_wc *wc) { struct mlx4_cqe *cqe; struct mlx4_qp *mqp; struct mlx4_ib_wq *wq; struct mlx4_ib_srq *srq; struct mlx4_srq *msrq = NULL; int is_send; int is_error; int is_eth; u32 g_mlpath_rqpn; u16 wqe_ctr; unsigned tail = 0; repoll: cqe = next_cqe_sw(cq); if (!cqe) return -EAGAIN; if (cq->buf.entry_size == 64) cqe++; ++cq->mcq.cons_index; /* * Make sure we read CQ entry contents after we've checked the * ownership bit. */ rmb(); is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK; is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR; /* Resize CQ in progress */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { if (cq->resize_buf) { struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); cq->buf = cq->resize_buf->buf; cq->ibcq.cqe = cq->resize_buf->cqe; kfree(cq->resize_buf); cq->resize_buf = NULL; } goto repoll; } if (!*cur_qp || (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { /* * We do not have to take the QP table lock here, * because CQs will be locked while QPs are removed * from the table. */ mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, be32_to_cpu(cqe->vlan_my_qpn)); *cur_qp = to_mibqp(mqp); } wc->qp = &(*cur_qp)->ibqp; if (wc->qp->qp_type == IB_QPT_XRC_TGT) { u32 srq_num; g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); srq_num = g_mlpath_rqpn & 0xffffff; /* SRQ is also in the radix tree */ msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, srq_num); } if (is_send) { wq = &(*cur_qp)->sq; if (!(*cur_qp)->sq_signal_bits) { wqe_ctr = be16_to_cpu(cqe->wqe_index); wq->tail += (u16) (wqe_ctr - (u16) wq->tail); } wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; ++wq->tail; } else if ((*cur_qp)->ibqp.srq) { srq = to_msrq((*cur_qp)->ibqp.srq); wqe_ctr = be16_to_cpu(cqe->wqe_index); wc->wr_id = srq->wrid[wqe_ctr]; mlx4_ib_free_srq_wqe(srq, wqe_ctr); } else if (msrq) { srq = to_mibsrq(msrq); wqe_ctr = be16_to_cpu(cqe->wqe_index); wc->wr_id = srq->wrid[wqe_ctr]; mlx4_ib_free_srq_wqe(srq, wqe_ctr); } else { wq = &(*cur_qp)->rq; tail = wq->tail & (wq->wqe_cnt - 1); wc->wr_id = wq->wrid[tail]; ++wq->tail; } if (unlikely(is_error)) { mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc); return 0; } wc->status = IB_WC_SUCCESS; if (is_send) { wc->wc_flags = 0; switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { case MLX4_OPCODE_RDMA_WRITE_IMM: wc->wc_flags |= IB_WC_WITH_IMM; fallthrough; case MLX4_OPCODE_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case MLX4_OPCODE_SEND_IMM: wc->wc_flags |= IB_WC_WITH_IMM; fallthrough; case MLX4_OPCODE_SEND: case MLX4_OPCODE_SEND_INVAL: wc->opcode = IB_WC_SEND; break; case MLX4_OPCODE_RDMA_READ: wc->opcode = IB_WC_RDMA_READ; wc->byte_len = be32_to_cpu(cqe->byte_cnt); break; case MLX4_OPCODE_ATOMIC_CS: wc->opcode = IB_WC_COMP_SWAP; wc->byte_len = 8; break; case MLX4_OPCODE_ATOMIC_FA: wc->opcode = IB_WC_FETCH_ADD; wc->byte_len = 8; break; case MLX4_OPCODE_MASKED_ATOMIC_CS: wc->opcode = IB_WC_MASKED_COMP_SWAP; wc->byte_len = 8; break; case MLX4_OPCODE_MASKED_ATOMIC_FA: wc->opcode = IB_WC_MASKED_FETCH_ADD; wc->byte_len = 8; break; case MLX4_OPCODE_LSO: wc->opcode = IB_WC_LSO; break; case MLX4_OPCODE_FMR: wc->opcode = IB_WC_REG_MR; break; case MLX4_OPCODE_LOCAL_INVAL: wc->opcode = IB_WC_LOCAL_INV; break; } } else { wc->byte_len = be32_to_cpu(cqe->byte_cnt); switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; wc->wc_flags = IB_WC_WITH_IMM; wc->ex.imm_data = cqe->immed_rss_invalid; break; case MLX4_RECV_OPCODE_SEND_INVAL: wc->opcode = IB_WC_RECV; wc->wc_flags = IB_WC_WITH_INVALIDATE; wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid); break; case MLX4_RECV_OPCODE_SEND: wc->opcode = IB_WC_RECV; wc->wc_flags = 0; break; case MLX4_RECV_OPCODE_SEND_IMM: wc->opcode = IB_WC_RECV; wc->wc_flags = IB_WC_WITH_IMM; wc->ex.imm_data = cqe->immed_rss_invalid; break; } is_eth = (rdma_port_get_link_layer(wc->qp->device, (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET); if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { if ((*cur_qp)->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { use_tunnel_data(*cur_qp, cq, wc, tail, cqe, is_eth); return 0; } } g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); wc->src_qp = g_mlpath_rqpn & 0xffffff; wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, cqe->badfcs_enc, cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; if (is_eth) { wc->slid = 0; wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_CVLAN_PRESENT_MASK) { wc->vlan_id = be16_to_cpu(cqe->sl_vid) & MLX4_CQE_VID_MASK; } else { wc->vlan_id = 0xffff; } memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); } else { wc->slid = be16_to_cpu(cqe->rlid); wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; wc->vlan_id = 0xffff; } } return 0; } int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct mlx4_ib_cq *cq = to_mcq(ibcq); struct mlx4_ib_qp *cur_qp = NULL; unsigned long flags; int npolled; struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); spin_lock_irqsave(&cq->lock, flags); if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled); goto out; } for (npolled = 0; npolled < num_entries; ++npolled) { if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled)) break; } mlx4_cq_set_ci(&cq->mcq); out: spin_unlock_irqrestore(&cq->lock, flags); return npolled; } int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { mlx4_cq_arm(&to_mcq(ibcq)->mcq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT, to_mdev(ibcq->device)->uar_map, MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock)); return 0; } void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) { u32 prod_index; int nfreed = 0; struct mlx4_cqe *cqe, *dest; u8 owner_bit; int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0; /* * First we need to find the current producer index, so we * know where to start cleaning from. It doesn't matter if HW * adds new entries after this loop -- the QP we're worried * about is already in RESET, so the new entries won't come * from our QP and therefore don't need to be checked. */ for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index) if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) break; /* * Now sweep backwards through the CQ, removing CQ entries * that match our QP by copying older entries on top of them. */ while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); cqe += cqe_inc; if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); ++nfreed; } else if (nfreed) { dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); dest += cqe_inc; owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK; memcpy(dest, cqe, sizeof *cqe); dest->owner_sr_opcode = owner_bit | (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); } } if (nfreed) { cq->mcq.cons_index += nfreed; /* * Make sure update of buffer contents is done before * updating consumer index. */ wmb(); mlx4_cq_set_ci(&cq->mcq); } } void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) { spin_lock_irq(&cq->lock); __mlx4_ib_cq_clean(cq, qpn, srq); spin_unlock_irq(&cq->lock); }
linux-master
drivers/infiniband/hw/mlx4/cq.c
/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mlx4/qp.h> #include <linux/mlx4/srq.h> #include <linux/slab.h> #include "mlx4_ib.h" #include <rdma/mlx4-abi.h> #include <rdma/uverbs_ioctl.h> static void *get_wqe(struct mlx4_ib_srq *srq, int n) { return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); } static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) { struct ib_event event; struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; if (ibsrq->event_handler) { event.device = ibsrq->device; event.element.srq = ibsrq; switch (type) { case MLX4_EVENT_TYPE_SRQ_LIMIT: event.event = IB_EVENT_SRQ_LIMIT_REACHED; break; case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: event.event = IB_EVENT_SRQ_ERR; break; default: pr_warn("Unexpected event type %d " "on SRQ %06x\n", type, srq->srqn); return; } ibsrq->event_handler(&event, ibsrq->srq_context); } } int mlx4_ib_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ib_srq->device); struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext); struct mlx4_ib_srq *srq = to_msrq(ib_srq); struct mlx4_wqe_srq_next_seg *next; struct mlx4_wqe_data_seg *scatter; u32 cqn; u16 xrcdn; int desc_size; int buf_size; int err; int i; if (init_attr->srq_type != IB_SRQT_BASIC && init_attr->srq_type != IB_SRQT_XRC) return -EOPNOTSUPP; /* Sanity check SRQ size before proceeding */ if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes || init_attr->attr.max_sge > dev->dev->caps.max_srq_sge) return -EINVAL; mutex_init(&srq->mutex); spin_lock_init(&srq->lock); srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); srq->msrq.max_gs = init_attr->attr.max_sge; desc_size = max(32UL, roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof (struct mlx4_wqe_data_seg))); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; if (udata) { struct mlx4_ib_create_srq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) return -EFAULT; srq->umem = ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0); if (IS_ERR(srq->umem)) return PTR_ERR(srq->umem); err = mlx4_mtt_init( dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE), PAGE_SHIFT, &srq->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); if (err) goto err_mtt; err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db); if (err) goto err_mtt; } else { err = mlx4_db_alloc(dev->dev, &srq->db, 0); if (err) return err; *srq->db.db = 0; if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) { err = -ENOMEM; goto err_db; } srq->head = 0; srq->tail = srq->msrq.max - 1; srq->wqe_ctr = 0; for (i = 0; i < srq->msrq.max; ++i) { next = get_wqe(srq, i); next->next_wqe_index = cpu_to_be16((i + 1) & (srq->msrq.max - 1)); for (scatter = (void *) (next + 1); (void *) scatter < (void *) next + desc_size; ++scatter) scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY); } err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, &srq->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); if (err) goto err_mtt; srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL); if (!srq->wrid) { err = -ENOMEM; goto err_mtt; } } cqn = ib_srq_has_cq(init_attr->srq_type) ? to_mcq(init_attr->ext.cq)->mcq.cqn : 0; xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ? to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn : (u16) dev->dev->caps.reserved_xrcds; err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn, &srq->mtt, srq->db.dma, &srq->msrq); if (err) goto err_wrid; srq->msrq.event = mlx4_ib_srq_event; srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; if (udata) if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { err = -EFAULT; goto err_wrid; } init_attr->attr.max_wr = srq->msrq.max - 1; return 0; err_wrid: if (udata) mlx4_ib_db_unmap_user(ucontext, &srq->db); else kvfree(srq->wrid); err_mtt: mlx4_mtt_cleanup(dev->dev, &srq->mtt); err_buf: if (!srq->umem) mlx4_buf_free(dev->dev, buf_size, &srq->buf); ib_umem_release(srq->umem); err_db: if (!udata) mlx4_db_free(dev->dev, &srq->db); return err; } int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); struct mlx4_ib_srq *srq = to_msrq(ibsrq); int ret; /* We don't support resizing SRQs (yet?) */ if (attr_mask & IB_SRQ_MAX_WR) return -EINVAL; if (attr_mask & IB_SRQ_LIMIT) { if (attr->srq_limit >= srq->msrq.max) return -EINVAL; mutex_lock(&srq->mutex); ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit); mutex_unlock(&srq->mutex); if (ret) return ret; } return 0; } int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) { struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); struct mlx4_ib_srq *srq = to_msrq(ibsrq); int ret; int limit_watermark; ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark); if (ret) return ret; srq_attr->srq_limit = limit_watermark; srq_attr->max_wr = srq->msrq.max - 1; srq_attr->max_sge = srq->msrq.max_gs; return 0; } int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(srq->device); struct mlx4_ib_srq *msrq = to_msrq(srq); mlx4_srq_free(dev->dev, &msrq->msrq); mlx4_mtt_cleanup(dev->dev, &msrq->mtt); if (udata) { mlx4_ib_db_unmap_user( rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext), &msrq->db); } else { kvfree(msrq->wrid); mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, &msrq->buf); mlx4_db_free(dev->dev, &msrq->db); } ib_umem_release(msrq->umem); return 0; } void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index) { struct mlx4_wqe_srq_next_seg *next; /* always called with interrupts disabled. */ spin_lock(&srq->lock); next = get_wqe(srq, srq->tail); next->next_wqe_index = cpu_to_be16(wqe_index); srq->tail = wqe_index; spin_unlock(&srq->lock); } int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct mlx4_ib_srq *srq = to_msrq(ibsrq); struct mlx4_wqe_srq_next_seg *next; struct mlx4_wqe_data_seg *scat; unsigned long flags; int err = 0; int nreq; int i; struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device); spin_lock_irqsave(&srq->lock, flags); if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { err = -EIO; *bad_wr = wr; goto out; } for (nreq = 0; wr; ++nreq, wr = wr->next) { if (unlikely(wr->num_sge > srq->msrq.max_gs)) { err = -EINVAL; *bad_wr = wr; break; } if (unlikely(srq->head == srq->tail)) { err = -ENOMEM; *bad_wr = wr; break; } srq->wrid[srq->head] = wr->wr_id; next = get_wqe(srq, srq->head); srq->head = be16_to_cpu(next->next_wqe_index); scat = (struct mlx4_wqe_data_seg *) (next + 1); for (i = 0; i < wr->num_sge; ++i) { scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); } if (i < srq->msrq.max_gs) { scat[i].byte_count = 0; scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); scat[i].addr = 0; } } if (likely(nreq)) { srq->wqe_ctr += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *srq->db.db = cpu_to_be32(srq->wqe_ctr); } out: spin_unlock_irqrestore(&srq->lock, flags); return err; }
linux-master
drivers/infiniband/hw/mlx4/srq.c
/* * Copyright (c) 2012 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /***********************************************************/ /*This file support the handling of the Alias GUID feature. */ /***********************************************************/ #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> #include <rdma/ib_cache.h> #include <rdma/ib_sa.h> #include <rdma/ib_pack.h> #include <linux/mlx4/cmd.h> #include <linux/init.h> #include <linux/errno.h> #include <rdma/ib_user_verbs.h> #include <linux/delay.h> #include "mlx4_ib.h" /* The driver keeps the current state of all guids, as they are in the HW. Whenever we receive an smp mad GUIDInfo record, the data will be cached. */ struct mlx4_alias_guid_work_context { u8 port; struct mlx4_ib_dev *dev ; struct ib_sa_query *sa_query; struct completion done; int query_id; struct list_head list; int block_num; ib_sa_comp_mask guid_indexes; u8 method; }; struct mlx4_next_alias_guid_work { u8 port; u8 block_num; u8 method; struct mlx4_sriov_alias_guid_info_rec_det rec_det; }; static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, int *resched_delay_sec); void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, u32 port_num, u8 *p_data) { int i; u64 guid_indexes; int slave_id; u32 port_index = port_num - 1; if (!mlx4_is_master(dev->dev)) return; guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. ports_guid[port_num - 1]. all_rec_per_port[block_num].guid_indexes); pr_debug("port: %u, guid_indexes: 0x%llx\n", port_num, guid_indexes); for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { /* The location of the specific index starts from bit number 4 * until bit num 11 */ if (test_bit(i + 4, (unsigned long *)&guid_indexes)) { slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; if (slave_id >= dev->dev->num_slaves) { pr_debug("The last slave: %d\n", slave_id); return; } /* cache the guid: */ memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id], &p_data[i * GUID_REC_SIZE], GUID_REC_SIZE); } else pr_debug("Guid number: %d in block: %d" " was not updated\n", i, block_num); } } static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index) { if (index >= NUM_ALIAS_GUID_PER_PORT) { pr_err("%s: ERROR: asked for index:%d\n", __func__, index); return (__force __be64) -1; } return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index]; } ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index) { return IB_SA_COMP_MASK(4 + index); } void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, int port, int slave_init) { __be64 curr_guid, required_guid; int record_num = slave / 8; int index = slave % 8; int port_index = port - 1; unsigned long flags; int do_work = 0; spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & GUID_STATE_NEED_PORT_INIT) goto unlock; if (!slave_init) { curr_guid = *(__be64 *)&dev->sriov. alias_guid.ports_guid[port_index]. all_rec_per_port[record_num]. all_recs[GUID_REC_SIZE * index]; if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) || !curr_guid) goto unlock; required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); } else { required_guid = mlx4_get_admin_guid(dev->dev, slave, port); if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) goto unlock; } *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num]. all_recs[GUID_REC_SIZE * index] = required_guid; dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num].guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(index); dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num].status = MLX4_GUID_INFO_STATUS_IDLE; /* set to run immediately */ dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num].time_to_run = 0; dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num]. guids_retry_schedule[index] = 0; do_work = 1; unlock: spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); if (do_work) mlx4_ib_init_alias_guid_work(dev, port_index); } /* * Whenever new GUID is set/unset (guid table change) create event and * notify the relevant slave (master also should be notified). * If the GUID value is not as we have in the cache the slave will not be * updated; in this case it waits for the smp_snoop or the port management * event to call the function and to update the slave. * block_number - the index of the block (16 blocks available) * port_number - 1 or 2 */ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, int block_num, u32 port_num, u8 *p_data) { int i; u64 guid_indexes; int slave_id, slave_port; enum slave_port_state new_state; enum slave_port_state prev_state; __be64 tmp_cur_ag, form_cache_ag; enum slave_port_gen_event gen_event; struct mlx4_sriov_alias_guid_info_rec_det *rec; unsigned long flags; __be64 required_value; if (!mlx4_is_master(dev->dev)) return; rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. all_rec_per_port[block_num]; guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. ports_guid[port_num - 1]. all_rec_per_port[block_num].guid_indexes); pr_debug("port: %u, guid_indexes: 0x%llx\n", port_num, guid_indexes); /*calculate the slaves and notify them*/ for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { /* the location of the specific index runs from bits 4..11 */ if (!(test_bit(i + 4, (unsigned long *)&guid_indexes))) continue; slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; if (slave_id >= dev->dev->persist->num_vfs + 1) return; slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num); if (slave_port < 0) /* this port isn't available for the VF */ continue; tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; form_cache_ag = get_cached_alias_guid(dev, port_num, (NUM_ALIAS_GUID_IN_REC * block_num) + i); /* * Check if guid is not the same as in the cache, * If it is different, wait for the snoop_smp or the port mgmt * change event to update the slave on its port state change */ if (tmp_cur_ag != form_cache_ag) continue; spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) required_value = 0; if (tmp_cur_ag == required_value) { rec->guid_indexes = rec->guid_indexes & ~mlx4_ib_get_aguid_comp_mask_from_ix(i); } else { /* may notify port down if value is 0 */ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { spin_unlock_irqrestore(&dev->sriov. alias_guid.ag_work_lock, flags); continue; } } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); /*2 cases: Valid GUID, and Invalid Guid*/ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num); new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num, MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID, &gen_event); pr_debug("slave: %d, port: %u prev_port_state: %d," " new_port_state: %d, gen_event: %d\n", slave_id, port_num, prev_state, new_state, gen_event); if (gen_event == SLAVE_PORT_GEN_EVENT_UP) { pr_debug("sending PORT_UP event to slave: %d, port: %u\n", slave_id, port_num); mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE); } } else { /* request to invalidate GUID */ set_and_calc_slave_port_state(dev->dev, slave_id, port_num, MLX4_PORT_STATE_IB_EVENT_GID_INVALID, &gen_event); if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) { pr_debug("sending PORT DOWN event to slave: %d, port: %u\n", slave_id, port_num); mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, MLX4_PORT_CHANGE_SUBTYPE_DOWN); } } } } static void aliasguid_query_handler(int status, struct ib_sa_guidinfo_rec *guid_rec, void *context) { struct mlx4_ib_dev *dev; struct mlx4_alias_guid_work_context *cb_ctx = context; u8 port_index ; int i; struct mlx4_sriov_alias_guid_info_rec_det *rec; unsigned long flags, flags1; ib_sa_comp_mask declined_guid_indexes = 0; ib_sa_comp_mask applied_guid_indexes = 0; unsigned int resched_delay_sec = 0; if (!context) return; dev = cb_ctx->dev; port_index = cb_ctx->port - 1; rec = &dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[cb_ctx->block_num]; if (status) { pr_debug("(port: %d) failed: status = %d\n", cb_ctx->port, status); rec->time_to_run = ktime_get_boottime_ns() + 1 * NSEC_PER_SEC; goto out; } if (guid_rec->block_num != cb_ctx->block_num) { pr_err("block num mismatch: %d != %d\n", cb_ctx->block_num, guid_rec->block_num); goto out; } pr_debug("lid/port: %d/%d, block_num: %d\n", be16_to_cpu(guid_rec->lid), cb_ctx->port, guid_rec->block_num); rec = &dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[guid_rec->block_num]; spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { __be64 sm_response, required_val; if (!(cb_ctx->guid_indexes & mlx4_ib_get_aguid_comp_mask_from_ix(i))) continue; sm_response = *(__be64 *)&guid_rec->guid_info_list [i * GUID_REC_SIZE]; required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) { if (required_val == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) goto next_entry; /* A new value was set till we got the response */ pr_debug("need to set new value %llx, record num %d, block_num:%d\n", be64_to_cpu(required_val), i, guid_rec->block_num); goto entry_declined; } /* check if the SM didn't assign one of the records. * if it didn't, re-ask for. */ if (sm_response == MLX4_NOT_SET_GUID) { if (rec->guids_retry_schedule[i] == 0) mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in block_num: %d was declined by SM\n", __func__, i, guid_rec->block_num); goto entry_declined; } else { /* properly assigned record. */ /* We save the GUID we just got from the SM in the * admin_guid in order to be persistent, and in the * request from the sm the process will ask for the same GUID */ if (required_val && sm_response != required_val) { /* Warn only on first retry */ if (rec->guids_retry_schedule[i] == 0) mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" " admin guid after SysAdmin " "configuration. " "Record num %d in block_num:%d " "was declined by SM, " "new val(0x%llx) was kept, SM returned (0x%llx)\n", __func__, i, guid_rec->block_num, be64_to_cpu(required_val), be64_to_cpu(sm_response)); goto entry_declined; } else { *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = sm_response; if (required_val == 0) mlx4_set_admin_guid(dev->dev, sm_response, (guid_rec->block_num * NUM_ALIAS_GUID_IN_REC) + i, cb_ctx->port); goto next_entry; } } entry_declined: declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); rec->guids_retry_schedule[i] = (rec->guids_retry_schedule[i] == 0) ? 1 : min((unsigned int)60, rec->guids_retry_schedule[i] * 2); /* using the minimum value among all entries in that record */ resched_delay_sec = (resched_delay_sec == 0) ? rec->guids_retry_schedule[i] : min(resched_delay_sec, rec->guids_retry_schedule[i]); continue; next_entry: rec->guids_retry_schedule[i] = 0; } applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes; if (declined_guid_indexes || rec->guid_indexes & ~(applied_guid_indexes)) { pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n", guid_rec->block_num, be64_to_cpu((__force __be64)rec->guid_indexes), be64_to_cpu((__force __be64)applied_guid_indexes), be64_to_cpu((__force __be64)declined_guid_indexes)); rec->time_to_run = ktime_get_boottime_ns() + resched_delay_sec * NSEC_PER_SEC; } else { rec->status = MLX4_GUID_INFO_STATUS_SET; } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); /* The func is call here to close the cases when the sm doesn't send smp, so in the sa response the driver notifies the slave. */ mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num, cb_ctx->port, guid_rec->guid_info_list); out: spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (!dev->sriov.is_going_down) { get_low_record_time_index(dev, port_index, &resched_delay_sec); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, &dev->sriov.alias_guid.ports_guid[port_index]. alias_guid_work, msecs_to_jiffies(resched_delay_sec * 1000)); } if (cb_ctx->sa_query) { list_del(&cb_ctx->list); kfree(cb_ctx); } else complete(&cb_ctx->done); spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) { int i; u64 cur_admin_val; ib_sa_comp_mask comp_mask = 0; dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_SET; /* calculate the comp_mask for that record.*/ for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { cur_admin_val = *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[index].all_recs[GUID_REC_SIZE * i]; /* check the admin value: if it's for delete (~00LL) or it is the first guid of the first record (hw guid) or the records is not in ownership of the sysadmin and the sm doesn't need to assign GUIDs, then don't put it up for assignment. */ if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || (!index && !i)) continue; comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); } dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[index].guid_indexes |= comp_mask; if (dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[index].guid_indexes) dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE; } static int set_guid_rec(struct ib_device *ibdev, struct mlx4_next_alias_guid_work *rec) { int err; struct mlx4_ib_dev *dev = to_mdev(ibdev); struct ib_sa_guidinfo_rec guid_info_rec; ib_sa_comp_mask comp_mask; struct ib_port_attr attr; struct mlx4_alias_guid_work_context *callback_context; unsigned long resched_delay, flags, flags1; u8 port = rec->port + 1; int index = rec->block_num; struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det; struct list_head *head = &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; memset(&attr, 0, sizeof(attr)); err = __mlx4_ib_query_port(ibdev, port, &attr, 1); if (err) { pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n", err, port); return err; } /*check the port was configured by the sm, otherwise no need to send */ if (attr.state != IB_PORT_ACTIVE) { pr_debug("port %d not active...rescheduling\n", port); resched_delay = 5 * HZ; err = -EAGAIN; goto new_schedule; } callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL); if (!callback_context) { err = -ENOMEM; resched_delay = HZ * 5; goto new_schedule; } callback_context->port = port; callback_context->dev = dev; callback_context->block_num = index; callback_context->guid_indexes = rec_det->guid_indexes; callback_context->method = rec->method; memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); guid_info_rec.lid = ib_lid_be16(attr.lid); guid_info_rec.block_num = index; memcpy(guid_info_rec.guid_info_list, rec_det->all_recs, GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC); comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM | rec_det->guid_indexes; init_completion(&callback_context->done); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); list_add_tail(&callback_context->list, head); spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); callback_context->query_id = ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, ibdev, port, &guid_info_rec, comp_mask, rec->method, 1000, GFP_KERNEL, aliasguid_query_handler, callback_context, &callback_context->sa_query); if (callback_context->query_id < 0) { pr_debug("ib_sa_guid_info_rec_query failed, query_id: " "%d. will reschedule to the next 1 sec.\n", callback_context->query_id); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); list_del(&callback_context->list); kfree(callback_context); spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); resched_delay = 1 * HZ; err = -EAGAIN; goto new_schedule; } err = 0; goto out; new_schedule: spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); invalidate_guid_record(dev, port, index); if (!dev->sriov.is_going_down) { queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, resched_delay); } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); out: return err; } static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port) { int j, k, entry; __be64 guid; /*Check if the SM doesn't need to assign the GUIDs*/ for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { entry = j * NUM_ALIAS_GUID_IN_REC + k; /* no request for the 0 entry (hw guid) */ if (!entry || entry > dev->dev->persist->num_vfs || !mlx4_is_slave_active(dev->dev, entry)) continue; guid = mlx4_get_admin_guid(dev->dev, entry, port); *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[j].all_recs [GUID_REC_SIZE * k] = guid; pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n", entry, be64_to_cpu(guid), port); } } } void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) { int i; unsigned long flags, flags1; pr_debug("port %d\n", port); spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & GUID_STATE_NEED_PORT_INIT) { mlx4_ib_guid_port_init(dev, port); dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= (~GUID_STATE_NEED_PORT_INIT); } for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) invalidate_guid_record(dev, port, i); if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) { /* make sure no work waits in the queue, if the work is already queued(not on the timer) the cancel will fail. That is not a problem because we just want the work started. */ cancel_delayed_work(&dev->sriov.alias_guid. ports_guid[port - 1].alias_guid_work); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, 0); } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } static void set_required_record(struct mlx4_ib_dev *dev, u8 port, struct mlx4_next_alias_guid_work *next_rec, int record_index) { int i; int lowset_time_entry = -1; int lowest_time = 0; ib_sa_comp_mask delete_guid_indexes = 0; ib_sa_comp_mask set_guid_indexes = 0; struct mlx4_sriov_alias_guid_info_rec_det *rec = &dev->sriov.alias_guid.ports_guid[port]. all_rec_per_port[record_index]; for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { if (!(rec->guid_indexes & mlx4_ib_get_aguid_comp_mask_from_ix(i))) continue; if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) delete_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); else set_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <= lowest_time) { lowset_time_entry = i; lowest_time = rec->guids_retry_schedule[i]; } } memcpy(&next_rec->rec_det, rec, sizeof(*rec)); next_rec->port = port; next_rec->block_num = record_index; if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) { next_rec->rec_det.guid_indexes = delete_guid_indexes; next_rec->method = MLX4_GUID_INFO_RECORD_DELETE; } else { next_rec->rec_det.guid_indexes = set_guid_indexes; next_rec->method = MLX4_GUID_INFO_RECORD_SET; } } /* return index of record that should be updated based on lowest * rescheduled time */ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, int *resched_delay_sec) { int record_index = -1; u64 low_record_time = 0; struct mlx4_sriov_alias_guid_info_rec_det rec; int j; for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { rec = dev->sriov.alias_guid.ports_guid[port]. all_rec_per_port[j]; if (rec.status == MLX4_GUID_INFO_STATUS_IDLE && rec.guid_indexes) { if (record_index == -1 || rec.time_to_run < low_record_time) { record_index = j; low_record_time = rec.time_to_run; } } } if (resched_delay_sec) { u64 curr_time = ktime_get_boottime_ns(); *resched_delay_sec = (low_record_time < curr_time) ? 0 : div_u64((low_record_time - curr_time), NSEC_PER_SEC); } return record_index; } /* The function returns the next record that was * not configured (or failed to be configured) */ static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, struct mlx4_next_alias_guid_work *rec) { unsigned long flags; int record_index; int ret = 0; spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); record_index = get_low_record_time_index(dev, port, NULL); if (record_index < 0) { ret = -ENOENT; goto out; } set_required_record(dev, port, rec, record_index); out: spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); return ret; } static void alias_guid_work(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); int ret = 0; struct mlx4_next_alias_guid_work *rec; struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port = container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det, alias_guid_work); struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent; struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid, struct mlx4_ib_sriov, alias_guid); struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); rec = kzalloc(sizeof *rec, GFP_KERNEL); if (!rec) return; pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1); ret = get_next_record_to_update(dev, sriov_alias_port->port, rec); if (ret) { pr_debug("No more records to update.\n"); goto out; } set_guid_rec(&dev->ib_dev, rec); out: kfree(rec); } void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) { unsigned long flags, flags1; if (!mlx4_is_master(dev->dev)) return; spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (!dev->sriov.is_going_down) { /* If there is pending one should cancel then run, otherwise * won't run till previous one is ended as same work * struct is used. */ cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. alias_guid_work); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) { int i; struct mlx4_ib_sriov *sriov = &dev->sriov; struct mlx4_alias_guid_work_context *cb_ctx; struct mlx4_sriov_alias_guid_port_rec_det *det; struct ib_sa_query *sa_query; unsigned long flags; for (i = 0 ; i < dev->num_ports; i++) { det = &sriov->alias_guid.ports_guid[i]; cancel_delayed_work_sync(&det->alias_guid_work); spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); while (!list_empty(&det->cb_list)) { cb_ctx = list_entry(det->cb_list.next, struct mlx4_alias_guid_work_context, list); sa_query = cb_ctx->sa_query; cb_ctx->sa_query = NULL; list_del(&cb_ctx->list); spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); ib_sa_cancel_query(cb_ctx->query_id, sa_query); wait_for_completion(&cb_ctx->done); kfree(cb_ctx); spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); } spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); } for (i = 0 ; i < dev->num_ports; i++) destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); kfree(dev->sriov.alias_guid.sa_client); } int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) { char alias_wq_name[15]; int ret = 0; int i, j; union ib_gid gid; if (!mlx4_is_master(dev->dev)) return 0; dev->sriov.alias_guid.sa_client = kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL); if (!dev->sriov.alias_guid.sa_client) return -ENOMEM; ib_sa_register_client(dev->sriov.alias_guid.sa_client); spin_lock_init(&dev->sriov.alias_guid.ag_work_lock); for (i = 1; i <= dev->num_ports; ++i) { if (dev->ib_dev.ops.query_gid(&dev->ib_dev, i, 0, &gid)) { ret = -EFAULT; goto err_unregister; } } for (i = 0 ; i < dev->num_ports; i++) { memset(&dev->sriov.alias_guid.ports_guid[i], 0, sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); dev->sriov.alias_guid.ports_guid[i].state_flags |= GUID_STATE_NEED_PORT_INIT; for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { /* mark each val as it was deleted */ memset(dev->sriov.alias_guid.ports_guid[i]. all_rec_per_port[j].all_recs, 0xFF, sizeof(dev->sriov.alias_guid.ports_guid[i]. all_rec_per_port[j].all_recs)); } INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); /*prepare the records, set them to be allocated by sm*/ if (mlx4_ib_sm_guid_assign) for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++) mlx4_set_admin_guid(dev->dev, 0, j, i + 1); for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) invalidate_guid_record(dev, i + 1, j); dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].port = i; snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); dev->sriov.alias_guid.ports_guid[i].wq = alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM); if (!dev->sriov.alias_guid.ports_guid[i].wq) { ret = -ENOMEM; goto err_thread; } INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work, alias_guid_work); } return 0; err_thread: for (--i; i >= 0; i--) { destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); dev->sriov.alias_guid.ports_guid[i].wq = NULL; } err_unregister: ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); kfree(dev->sriov.alias_guid.sa_client); dev->sriov.alias_guid.sa_client = NULL; pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret); return ret; }
linux-master
drivers/infiniband/hw/mlx4/alias_GUID.c
/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <rdma/ib_user_verbs.h> #include "mlx4_ib.h" static u32 convert_access(int acc) { return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) | (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) | MLX4_PERM_LOCAL_READ; } static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type) { switch (type) { case IB_MW_TYPE_1: return MLX4_MW_TYPE_1; case IB_MW_TYPE_2: return MLX4_MW_TYPE_2; default: return -1; } } struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) { struct mlx4_ib_mr *mr; int err; mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0, ~0ull, convert_access(acc), 0, 0, &mr->mmr); if (err) goto err_free; err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); if (err) goto err_mr; mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; mr->umem = NULL; return &mr->ibmr; err_mr: (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); err_free: kfree(mr); return ERR_PTR(err); } enum { MLX4_MAX_MTT_SHIFT = 31 }; static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, u64 mtt_size, u64 mtt_shift, u64 len, u64 cur_start_addr, u64 *pages, int *start_index, int *npages) { u64 cur_end_addr = cur_start_addr + len; u64 cur_end_addr_aligned = 0; u64 mtt_entries; int err = 0; int k; len += (cur_start_addr & (mtt_size - 1ULL)); cur_end_addr_aligned = round_up(cur_end_addr, mtt_size); len += (cur_end_addr_aligned - cur_end_addr); if (len & (mtt_size - 1ULL)) { pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n", len, mtt_size); return -EINVAL; } mtt_entries = (len >> mtt_shift); /* * Align the MTT start address to the mtt_size. * Required to handle cases when the MR starts in the middle of an MTT * record. Was not required in old code since the physical addresses * provided by the dma subsystem were page aligned, which was also the * MTT size. */ cur_start_addr = round_down(cur_start_addr, mtt_size); /* A new block is started ... */ for (k = 0; k < mtt_entries; ++k) { pages[*npages] = cur_start_addr + (mtt_size * k); (*npages)++; /* * Be friendly to mlx4_write_mtt() and pass it chunks of * appropriate size. */ if (*npages == PAGE_SIZE / sizeof(u64)) { err = mlx4_write_mtt(dev->dev, mtt, *start_index, *npages, pages); if (err) return err; (*start_index) += *npages; *npages = 0; } } return 0; } static inline u64 alignment_of(u64 ptr) { return ilog2(ptr & (~(ptr - 1))); } static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start, u64 current_block_end, u64 block_shift) { /* Check whether the alignment of the new block is aligned as well as * the previous block. * Block address must start with zeros till size of entity_size. */ if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0) /* * It is not as well aligned as the previous block-reduce the * mtt size accordingly. Here we take the last right bit which * is 1. */ block_shift = alignment_of(next_block_start); /* * Check whether the alignment of the end of previous block - is it * aligned as well as the start of the block */ if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0) /* * It is not as well aligned as the start of the block - * reduce the mtt size accordingly. */ block_shift = alignment_of(current_block_end); return block_shift; } int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, struct ib_umem *umem) { u64 *pages; u64 len = 0; int err = 0; u64 mtt_size; u64 cur_start_addr = 0; u64 mtt_shift; int start_index = 0; int npages = 0; struct scatterlist *sg; int i; pages = (u64 *) __get_free_page(GFP_KERNEL); if (!pages) return -ENOMEM; mtt_shift = mtt->page_shift; mtt_size = 1ULL << mtt_shift; for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { if (cur_start_addr + len == sg_dma_address(sg)) { /* still the same block */ len += sg_dma_len(sg); continue; } /* * A new block is started ... * If len is malaligned, write an extra mtt entry to cover the * misaligned area (round up the division) */ err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size, mtt_shift, len, cur_start_addr, pages, &start_index, &npages); if (err) goto out; cur_start_addr = sg_dma_address(sg); len = sg_dma_len(sg); } /* Handle the last block */ if (len > 0) { /* * If len is malaligned, write an extra mtt entry to cover * the misaligned area (round up the division) */ err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size, mtt_shift, len, cur_start_addr, pages, &start_index, &npages); if (err) goto out; } if (npages) err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages); out: free_page((unsigned long) pages); return err; } /* * Calculate optimal mtt size based on contiguous pages. * Function will return also the number of pages that are not aligned to the * calculated mtt_size to be added to total number of pages. For that we should * check the first chunk length & last chunk length and if not aligned to * mtt_size we should increment the non_aligned_pages number. All chunks in the * middle already handled as part of mtt shift calculation for both their start * & end addresses. */ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, int *num_of_mtts) { u64 block_shift = MLX4_MAX_MTT_SHIFT; u64 min_shift = PAGE_SHIFT; u64 last_block_aligned_end = 0; u64 current_block_start = 0; u64 first_block_start = 0; u64 current_block_len = 0; u64 last_block_end = 0; struct scatterlist *sg; u64 current_block_end; u64 misalignment_bits; u64 next_block_start; u64 total_len = 0; int i; *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { /* * Initialization - save the first chunk start as the * current_block_start - block means contiguous pages. */ if (current_block_len == 0 && current_block_start == 0) { current_block_start = sg_dma_address(sg); first_block_start = current_block_start; /* * Find the bits that are different between the physical * address and the virtual address for the start of the * MR. * umem_get aligned the start_va to a page boundary. * Therefore, we need to align the start va to the same * boundary. * misalignment_bits is needed to handle the case of a * single memory region. In this case, the rest of the * logic will not reduce the block size. If we use a * block size which is bigger than the alignment of the * misalignment bits, we might use the virtual page * number instead of the physical page number, resulting * in access to the wrong data. */ misalignment_bits = (start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^ current_block_start; block_shift = min(alignment_of(misalignment_bits), block_shift); } /* * Go over the scatter entries and check if they continue the * previous scatter entry. */ next_block_start = sg_dma_address(sg); current_block_end = current_block_start + current_block_len; /* If we have a split (non-contig.) between two blocks */ if (current_block_end != next_block_start) { block_shift = mlx4_ib_umem_calc_block_mtt (next_block_start, current_block_end, block_shift); /* * If we reached the minimum shift for 4k page we stop * the loop. */ if (block_shift <= min_shift) goto end; /* * If not saved yet we are in first block - we save the * length of first block to calculate the * non_aligned_pages number at the end. */ total_len += current_block_len; /* Start a new block */ current_block_start = next_block_start; current_block_len = sg_dma_len(sg); continue; } /* The scatter entry is another part of the current block, * increase the block size. * An entry in the scatter can be larger than 4k (page) as of * dma mapping which merge some blocks together. */ current_block_len += sg_dma_len(sg); } /* Account for the last block in the total len */ total_len += current_block_len; /* Add to the first block the misalignment that it suffers from. */ total_len += (first_block_start & ((1ULL << block_shift) - 1ULL)); last_block_end = current_block_start + current_block_len; last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift); total_len += (last_block_aligned_end - last_block_end); if (total_len & ((1ULL << block_shift) - 1ULL)) pr_warn("misaligned total length detected (%llu, %llu)!", total_len, block_shift); *num_of_mtts = total_len >> block_shift; end: if (block_shift < min_shift) { /* * If shift is less than the min we set a warning and return the * min shift. */ pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift); block_shift = min_shift; } return block_shift; } static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start, u64 length, int access_flags) { /* * Force registering the memory as writable if the underlying pages * are writable. This is so rereg can change the access permissions * from readable to writable without having to run through ib_umem_get * again */ if (!ib_access_writable(access_flags)) { unsigned long untagged_start = untagged_addr(start); struct vm_area_struct *vma; mmap_read_lock(current->mm); /* * FIXME: Ideally this would iterate over all the vmas that * cover the memory, but for now it requires a single vma to * entirely cover the MR to support RO mappings. */ vma = find_vma(current->mm, untagged_start); if (vma && vma->vm_end >= untagged_start + length && vma->vm_start <= untagged_start) { if (vma->vm_flags & VM_WRITE) access_flags |= IB_ACCESS_LOCAL_WRITE; } else { access_flags |= IB_ACCESS_LOCAL_WRITE; } mmap_read_unlock(current->mm); } return ib_umem_get(device, start, length, access_flags); } struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(pd->device); struct mlx4_ib_mr *mr; int shift; int err; int n; mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); goto err_free; } shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, convert_access(access_flags), n, shift, &mr->mmr); if (err) goto err_umem; err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); if (err) goto err_mr; err = mlx4_mr_enable(dev->dev, &mr->mmr); if (err) goto err_mr; mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; mr->ibmr.page_size = 1U << shift; return &mr->ibmr; err_mr: (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); err_umem: ib_umem_release(mr->umem); err_free: kfree(mr); return ERR_PTR(err); } struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(mr->device); struct mlx4_ib_mr *mmr = to_mmr(mr); struct mlx4_mpt_entry *mpt_entry; struct mlx4_mpt_entry **pmpt_entry = &mpt_entry; int err; /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs, * we assume that the calls can't run concurrently. Otherwise, a * race exists. */ err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry); if (err) return ERR_PTR(err); if (flags & IB_MR_REREG_PD) { err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry, to_mpd(pd)->pdn); if (err) goto release_mpt_entry; } if (flags & IB_MR_REREG_ACCESS) { if (ib_access_writable(mr_access_flags) && !mmr->umem->writable) { err = -EPERM; goto release_mpt_entry; } err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, convert_access(mr_access_flags)); if (err) goto release_mpt_entry; } if (flags & IB_MR_REREG_TRANS) { int shift; int n; mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); ib_umem_release(mmr->umem); mmr->umem = mlx4_get_umem_mr(mr->device, start, length, mr_access_flags); if (IS_ERR(mmr->umem)) { err = PTR_ERR(mmr->umem); /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ mmr->umem = NULL; goto release_mpt_entry; } n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE); shift = PAGE_SHIFT; err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, virt_addr, length, n, shift, *pmpt_entry); if (err) { ib_umem_release(mmr->umem); goto release_mpt_entry; } mmr->mmr.iova = virt_addr; mmr->mmr.size = length; err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); if (err) { mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); ib_umem_release(mmr->umem); goto release_mpt_entry; } } /* If we couldn't transfer the MR to the HCA, just remember to * return a failure. But dereg_mr will free the resources. */ err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); if (!err && flags & IB_MR_REREG_ACCESS) mmr->mmr.access = mr_access_flags; release_mpt_entry: mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); if (err) return ERR_PTR(err); return NULL; } static int mlx4_alloc_priv_pages(struct ib_device *device, struct mlx4_ib_mr *mr, int max_pages) { int ret; /* Ensure that size is aligned to DMA cacheline * requirements. * max_pages is limited to MLX4_MAX_FAST_REG_PAGES * so page_map_size will never cross PAGE_SIZE. */ mr->page_map_size = roundup(max_pages * sizeof(u64), MLX4_MR_PAGES_ALIGN); /* Prevent cross page boundary allocation. */ mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); if (!mr->pages) return -ENOMEM; mr->page_map = dma_map_single(device->dev.parent, mr->pages, mr->page_map_size, DMA_TO_DEVICE); if (dma_mapping_error(device->dev.parent, mr->page_map)) { ret = -ENOMEM; goto err; } return 0; err: free_page((unsigned long)mr->pages); return ret; } static void mlx4_free_priv_pages(struct mlx4_ib_mr *mr) { if (mr->pages) { struct ib_device *device = mr->ibmr.device; dma_unmap_single(device->dev.parent, mr->page_map, mr->page_map_size, DMA_TO_DEVICE); free_page((unsigned long)mr->pages); mr->pages = NULL; } } int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct mlx4_ib_mr *mr = to_mmr(ibmr); int ret; mlx4_free_priv_pages(mr); ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); if (ret) return ret; if (mr->umem) ib_umem_release(mr->umem); kfree(mr); return 0; } int mlx4_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibmw->device); struct mlx4_ib_mw *mw = to_mmw(ibmw); int err; err = mlx4_mw_alloc(dev->dev, to_mpd(ibmw->pd)->pdn, to_mlx4_type(ibmw->type), &mw->mmw); if (err) return err; err = mlx4_mw_enable(dev->dev, &mw->mmw); if (err) goto err_mw; ibmw->rkey = mw->mmw.key; return 0; err_mw: mlx4_mw_free(dev->dev, &mw->mmw); return err; } int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) { struct mlx4_ib_mw *mw = to_mmw(ibmw); mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); return 0; } struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) { struct mlx4_ib_dev *dev = to_mdev(pd->device); struct mlx4_ib_mr *mr; int err; if (mr_type != IB_MR_TYPE_MEM_REG || max_num_sg > MLX4_MAX_FAST_REG_PAGES) return ERR_PTR(-EINVAL); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0, max_num_sg, 0, &mr->mmr); if (err) goto err_free; err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg); if (err) goto err_free_mr; mr->max_pages = max_num_sg; err = mlx4_mr_enable(dev->dev, &mr->mmr); if (err) goto err_free_pl; mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; mr->umem = NULL; return &mr->ibmr; err_free_pl: mr->ibmr.device = pd->device; mlx4_free_priv_pages(mr); err_free_mr: (void) mlx4_mr_free(dev->dev, &mr->mmr); err_free: kfree(mr); return ERR_PTR(err); } static int mlx4_set_page(struct ib_mr *ibmr, u64 addr) { struct mlx4_ib_mr *mr = to_mmr(ibmr); if (unlikely(mr->npages == mr->max_pages)) return -ENOMEM; mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); return 0; } int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct mlx4_ib_mr *mr = to_mmr(ibmr); int rc; mr->npages = 0; ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, mr->page_map_size, DMA_TO_DEVICE); rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); ib_dma_sync_single_for_device(ibmr->device, mr->page_map, mr->page_map_size, DMA_TO_DEVICE); return rc; }
linux-master
drivers/infiniband/hw/mlx4/mr.c
/* * Copyright (c) 2012 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_mad.h> #include <linux/mlx4/cmd.h> #include <linux/rbtree.h> #include <linux/idr.h> #include <rdma/ib_cm.h> #include "mlx4_ib.h" #define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ) struct id_map_entry { struct rb_node node; u32 sl_cm_id; u32 pv_cm_id; int slave_id; int scheduled_delete; struct mlx4_ib_dev *dev; struct list_head list; struct delayed_work timeout; }; struct rej_tmout_entry { int slave; u32 rem_pv_cm_id; struct delayed_work timeout; struct xarray *xa_rej_tmout; }; struct cm_generic_msg { struct ib_mad_hdr hdr; __be32 local_comm_id; __be32 remote_comm_id; unsigned char unused[2]; __be16 rej_reason; }; struct cm_sidr_generic_msg { struct ib_mad_hdr hdr; __be32 request_id; }; struct cm_req_msg { unsigned char unused[0x60]; union ib_gid primary_path_sgid; }; static struct workqueue_struct *cm_wq; static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) { if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { struct cm_sidr_generic_msg *msg = (struct cm_sidr_generic_msg *)mad; msg->request_id = cpu_to_be32(cm_id); } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { pr_err("trying to set local_comm_id in SIDR_REP\n"); return; } else { struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; msg->local_comm_id = cpu_to_be32(cm_id); } } static u32 get_local_comm_id(struct ib_mad *mad) { if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { struct cm_sidr_generic_msg *msg = (struct cm_sidr_generic_msg *)mad; return be32_to_cpu(msg->request_id); } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { pr_err("trying to set local_comm_id in SIDR_REP\n"); return -1; } else { struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; return be32_to_cpu(msg->local_comm_id); } } static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) { if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { struct cm_sidr_generic_msg *msg = (struct cm_sidr_generic_msg *)mad; msg->request_id = cpu_to_be32(cm_id); } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { pr_err("trying to set remote_comm_id in SIDR_REQ\n"); return; } else { struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; msg->remote_comm_id = cpu_to_be32(cm_id); } } static u32 get_remote_comm_id(struct ib_mad *mad) { if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { struct cm_sidr_generic_msg *msg = (struct cm_sidr_generic_msg *)mad; return be32_to_cpu(msg->request_id); } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { pr_err("trying to set remote_comm_id in SIDR_REQ\n"); return -1; } else { struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; return be32_to_cpu(msg->remote_comm_id); } } static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad) { struct cm_req_msg *msg = (struct cm_req_msg *)mad; return msg->primary_path_sgid; } /* Lock should be taken before called */ static struct id_map_entry * id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id) { struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; struct rb_node *node = sl_id_map->rb_node; while (node) { struct id_map_entry *id_map_entry = rb_entry(node, struct id_map_entry, node); if (id_map_entry->sl_cm_id > sl_cm_id) node = node->rb_left; else if (id_map_entry->sl_cm_id < sl_cm_id) node = node->rb_right; else if (id_map_entry->slave_id > slave_id) node = node->rb_left; else if (id_map_entry->slave_id < slave_id) node = node->rb_right; else return id_map_entry; } return NULL; } static void id_map_ent_timeout(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout); struct id_map_entry *found_ent; struct mlx4_ib_dev *dev = ent->dev; struct mlx4_ib_sriov *sriov = &dev->sriov; struct rb_root *sl_id_map = &sriov->sl_id_map; spin_lock(&sriov->id_map_lock); if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id)) goto out; found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); if (found_ent && found_ent == ent) rb_erase(&found_ent->node, sl_id_map); out: list_del(&ent->list); spin_unlock(&sriov->id_map_lock); kfree(ent); } static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) { struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; struct rb_node **link = &sl_id_map->rb_node, *parent = NULL; struct id_map_entry *ent; int slave_id = new->slave_id; int sl_cm_id = new->sl_cm_id; ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id); if (ent) { pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n", sl_cm_id); rb_replace_node(&ent->node, &new->node, sl_id_map); return; } /* Go to the bottom of the tree */ while (*link) { parent = *link; ent = rb_entry(parent, struct id_map_entry, node); if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id)) link = &(*link)->rb_left; else link = &(*link)->rb_right; } rb_link_node(&new->node, parent, link); rb_insert_color(&new->node, sl_id_map); } static struct id_map_entry * id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) { int ret; struct id_map_entry *ent; struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL); if (!ent) return ERR_PTR(-ENOMEM); ent->sl_cm_id = sl_cm_id; ent->slave_id = slave_id; ent->scheduled_delete = 0; ent->dev = to_mdev(ibdev); INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent, xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL); if (ret >= 0) { spin_lock(&sriov->id_map_lock); sl_id_map_add(ibdev, ent); list_add_tail(&ent->list, &sriov->cm_list); spin_unlock(&sriov->id_map_lock); return ent; } /*error flow*/ kfree(ent); mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret); return ERR_PTR(-ENOMEM); } static struct id_map_entry * id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id) { struct id_map_entry *ent; struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; spin_lock(&sriov->id_map_lock); if (*pv_cm_id == -1) { ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id); if (ent) *pv_cm_id = (int) ent->pv_cm_id; } else ent = xa_load(&sriov->pv_id_table, *pv_cm_id); spin_unlock(&sriov->id_map_lock); return ent; } static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) { struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; unsigned long flags; spin_lock(&sriov->id_map_lock); spin_lock_irqsave(&sriov->going_down_lock, flags); /*make sure that there is no schedule inside the scheduled work.*/ if (!sriov->is_going_down && !id->scheduled_delete) { id->scheduled_delete = 1; queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); } else if (id->scheduled_delete) { /* Adjust timeout if already scheduled */ mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); } spin_unlock_irqrestore(&sriov->going_down_lock, flags); spin_unlock(&sriov->id_map_lock); } #define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason) int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id, struct ib_mad *mad) { struct id_map_entry *id; u32 sl_cm_id; int pv_cm_id = -1; if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || mad->mad_hdr.attr_id == CM_REP_ATTR_ID || mad->mad_hdr.attr_id == CM_MRA_ATTR_ID || mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID || (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) { sl_cm_id = get_local_comm_id(mad); id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); if (id) goto cont; id = id_map_alloc(ibdev, slave_id, sl_cm_id); if (IS_ERR(id)) { mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", __func__, slave_id, sl_cm_id); return PTR_ERR(id); } } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID || mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { return 0; } else { sl_cm_id = get_local_comm_id(mad); id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); } if (!id) { pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n", slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id)); return -EINVAL; } cont: set_local_comm_id(mad, id->pv_cm_id); if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) schedule_delayed(ibdev, id); return 0; } static void rej_tmout_timeout(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout); struct rej_tmout_entry *deleted; deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0); if (deleted != item) pr_debug("deleted(%p) != item(%p)\n", deleted, item); kfree(item); } static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave) { struct rej_tmout_entry *item; struct rej_tmout_entry *old; int ret = 0; xa_lock(&sriov->xa_rej_tmout); item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); if (item) { if (xa_err(item)) ret = xa_err(item); else /* If a retry, adjust delayed work */ mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT); goto err_or_exists; } xa_unlock(&sriov->xa_rej_tmout); item = kmalloc(sizeof(*item), GFP_KERNEL); if (!item) return -ENOMEM; INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout); item->slave = slave; item->rem_pv_cm_id = rem_pv_cm_id; item->xa_rej_tmout = &sriov->xa_rej_tmout; old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL); if (old) { pr_debug( "Non-null old entry (%p) or error (%d) when inserting\n", old, xa_err(old)); kfree(item); return xa_err(old); } queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT); return 0; err_or_exists: xa_unlock(&sriov->xa_rej_tmout); return ret; } static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id) { struct rej_tmout_entry *item; int slave; xa_lock(&sriov->xa_rej_tmout); item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); if (!item || xa_err(item)) { pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n", rem_pv_cm_id, xa_err(item)); slave = !item ? -ENOENT : xa_err(item); } else { slave = item->slave; } xa_unlock(&sriov->xa_rej_tmout); return slave; } int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, struct ib_mad *mad) { struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; u32 rem_pv_cm_id = get_local_comm_id(mad); u32 pv_cm_id; struct id_map_entry *id; int sts; if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { union ib_gid gid; if (!slave) return 0; gid = gid_from_req_msg(ibdev, mad); *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); if (*slave < 0) { mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", be64_to_cpu(gid.global.interface_id)); return -ENOENT; } sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave); if (sts) /* Even if this fails, we pass on the REQ to the slave */ pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n", rem_pv_cm_id, *slave, sts); return 0; } pv_cm_id = get_remote_comm_id(mad); id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1); if (!id) { if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) { *slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id); return (*slave < 0) ? *slave : 0; } pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n", pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id)); return -ENOENT; } if (slave) *slave = id->slave_id; set_remote_comm_id(mad, id->sl_cm_id); if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID || mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) schedule_delayed(ibdev, id); return 0; } void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev) { spin_lock_init(&dev->sriov.id_map_lock); INIT_LIST_HEAD(&dev->sriov.cm_list); dev->sriov.sl_id_map = RB_ROOT; xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC); xa_init(&dev->sriov.xa_rej_tmout); } static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave) { struct rej_tmout_entry *item; bool flush_needed = false; unsigned long id; int cnt = 0; xa_lock(&sriov->xa_rej_tmout); xa_for_each(&sriov->xa_rej_tmout, id, item) { if (slave < 0 || slave == item->slave) { mod_delayed_work(cm_wq, &item->timeout, 0); flush_needed = true; ++cnt; } } xa_unlock(&sriov->xa_rej_tmout); if (flush_needed) { flush_workqueue(cm_wq); pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n", cnt, slave); } if (slave < 0) WARN_ON(!xa_empty(&sriov->xa_rej_tmout)); } /* slave = -1 ==> all slaves */ /* TBD -- call paravirt clean for single slave. Need for slave RESET event */ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) { struct mlx4_ib_sriov *sriov = &dev->sriov; struct rb_root *sl_id_map = &sriov->sl_id_map; struct list_head lh; struct rb_node *nd; int need_flush = 0; struct id_map_entry *map, *tmp_map; /* cancel all delayed work queue entries */ INIT_LIST_HEAD(&lh); spin_lock(&sriov->id_map_lock); list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { if (slave < 0 || slave == map->slave_id) { if (map->scheduled_delete) need_flush |= !cancel_delayed_work(&map->timeout); } } spin_unlock(&sriov->id_map_lock); if (need_flush) flush_workqueue(cm_wq); /* make sure all timers were flushed */ /* now, remove all leftover entries from databases*/ spin_lock(&sriov->id_map_lock); if (slave < 0) { while (rb_first(sl_id_map)) { struct id_map_entry *ent = rb_entry(rb_first(sl_id_map), struct id_map_entry, node); rb_erase(&ent->node, sl_id_map); xa_erase(&sriov->pv_id_table, ent->pv_cm_id); } list_splice_init(&dev->sriov.cm_list, &lh); } else { /* first, move nodes belonging to slave to db remove list */ nd = rb_first(sl_id_map); while (nd) { struct id_map_entry *ent = rb_entry(nd, struct id_map_entry, node); nd = rb_next(nd); if (ent->slave_id == slave) list_move_tail(&ent->list, &lh); } /* remove those nodes from databases */ list_for_each_entry_safe(map, tmp_map, &lh, list) { rb_erase(&map->node, sl_id_map); xa_erase(&sriov->pv_id_table, map->pv_cm_id); } /* add remaining nodes from cm_list */ list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { if (slave == map->slave_id) list_move_tail(&map->list, &lh); } } spin_unlock(&sriov->id_map_lock); /* free any map entries left behind due to cancel_delayed_work above */ list_for_each_entry_safe(map, tmp_map, &lh, list) { list_del(&map->list); kfree(map); } rej_tmout_xa_cleanup(sriov, slave); } int mlx4_ib_cm_init(void) { cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0); if (!cm_wq) return -ENOMEM; return 0; } void mlx4_ib_cm_destroy(void) { destroy_workqueue(cm_wq); }
linux-master
drivers/infiniband/hw/mlx4/cm.c