From fecea0ab3415bfab9a1964690e53b10c5d8f2e46 Mon Sep 17 00:00:00 2001 From: Stefan Roscher Date: Fri, 31 Aug 2007 16:02:59 +0200 Subject: IB/ehca: Fix Small QP regressions The new Small QP code had a few bugs that would also make it trigger for non-Small QPs. Fix them. Signed-off-by: Joachim Fenkes Signed-off-by: Roland Dreier --- drivers/infiniband/hw/ehca/ehca_qp.c | 10 ++++++---- drivers/infiniband/hw/ehca/ipz_pt_fn.c | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index b178cba96345..84d435a5ee11 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -600,10 +600,12 @@ static struct ehca_qp *internal_create_qp( if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap) && !(context && udata)) { /* no small QP support in userspace ATM */ - ehca_determine_small_queue( - &parms.squeue, max_send_sge, is_llqp); - ehca_determine_small_queue( - &parms.rqueue, max_recv_sge, is_llqp); + if (HAS_SQ(my_qp)) + ehca_determine_small_queue( + &parms.squeue, max_send_sge, is_llqp); + if (HAS_RQ(my_qp)) + ehca_determine_small_queue( + &parms.rqueue, max_recv_sge, is_llqp); parms.qp_storage = (parms.squeue.is_small || parms.rqueue.is_small); } diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index a090c679c397..29bd476fbd54 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c @@ -172,7 +172,7 @@ static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd) unsigned long bit; int free_page = 0; - bit = ((unsigned long)queue->queue_pages[0] & PAGE_MASK) + bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK) >> (order + 9); mutex_lock(&pd->lock); -- cgit v1.2.3 From 5ff70cac3e98af64f9a1eaec9e762ff4927c26d1 Mon Sep 17 00:00:00 2001 From: Joachim Fenkes Date: Fri, 31 Aug 2007 16:03:37 +0200 Subject: IB/ehca: SRQ fixes to enable IPoIB CM Fix ehca SRQ support so that IPoIB connected mode works: - Report max_srq > 0 if SRQ is supported - Report "last wqe reached" asynchronous event when base QP dies; this is required by the IB spec and IPoIB CM relies on receiving it when cleaning up. Signed-off-by: Joachim Fenkes Signed-off-by: Roland Dreier --- drivers/infiniband/hw/ehca/ehca_hca.c | 10 +++++--- drivers/infiniband/hw/ehca/ehca_irq.c | 48 ++++++++++++++++++++++------------- 2 files changed, 38 insertions(+), 20 deletions(-) diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index fc19ef9fd963..cf22472d9414 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c @@ -93,9 +93,13 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) props->max_pd = min_t(int, rblock->max_pd, INT_MAX); props->max_ah = min_t(int, rblock->max_ah, INT_MAX); props->max_fmr = min_t(int, rblock->max_mr, INT_MAX); - props->max_srq = 0; - props->max_srq_wr = 0; - props->max_srq_sge = 0; + + if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { + props->max_srq = props->max_qp; + props->max_srq_wr = props->max_qp_wr; + props->max_srq_sge = 3; + } + props->max_pkeys = 16; props->local_ca_ack_delay = rblock->local_ca_ack_delay; diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index ee06d8bd7396..a925ea52443f 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -175,41 +175,55 @@ error_data1: } -static void qp_event_callback(struct ehca_shca *shca, u64 eqe, - enum ib_event_type event_type, int fatal) +static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp, + enum ib_event_type event_type) { struct ib_event event; - struct ehca_qp *qp; - u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); - - read_lock(&ehca_qp_idr_lock); - qp = idr_find(&ehca_qp_idr, token); - read_unlock(&ehca_qp_idr_lock); - - - if (!qp) - return; - - if (fatal) - ehca_error_data(shca, qp, qp->ipz_qp_handle.handle); event.device = &shca->ib_device; + event.event = event_type; if (qp->ext_type == EQPT_SRQ) { if (!qp->ib_srq.event_handler) return; - event.event = fatal ? IB_EVENT_SRQ_ERR : event_type; event.element.srq = &qp->ib_srq; qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); } else { if (!qp->ib_qp.event_handler) return; - event.event = event_type; event.element.qp = &qp->ib_qp; qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); } +} + +static void qp_event_callback(struct ehca_shca *shca, u64 eqe, + enum ib_event_type event_type, int fatal) +{ + struct ehca_qp *qp; + u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); + + read_lock(&ehca_qp_idr_lock); + qp = idr_find(&ehca_qp_idr, token); + read_unlock(&ehca_qp_idr_lock); + + if (!qp) + return; + + if (fatal) + ehca_error_data(shca, qp, qp->ipz_qp_handle.handle); + + dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ? + IB_EVENT_SRQ_ERR : event_type); + + /* + * eHCA only processes one WQE at a time for SRQ base QPs, + * so the last WQE has been processed as soon as the QP enters + * error state. + */ + if (fatal && qp->ext_type == EQPT_SRQBASE) + dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED); return; } -- cgit v1.2.3