]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/infiniband/hw/ipath/ipath_cq.c
IB/ipath: Fix port sharing on powerpc
[linux-2.6-omap-h63xx.git] / drivers / infiniband / hw / ipath / ipath_cq.c
index 3c4c198a4514af1f870f28b98a9dd34fa85176c0..87462e0cb4d2f5e95bf41f12b9ec9910429eb24e 100644 (file)
@@ -46,7 +46,7 @@
  */
 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
 {
-       struct ipath_cq_wc *wc = cq->queue;
+       struct ipath_cq_wc *wc;
        unsigned long flags;
        u32 head;
        u32 next;
@@ -57,6 +57,7 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
         * Note that the head pointer might be writable by user processes.
         * Take care to verify it is a sane value.
         */
+       wc = cq->queue;
        head = wc->head;
        if (head >= (unsigned) cq->ibcq.cqe) {
                head = cq->ibcq.cqe;
@@ -109,21 +110,27 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
 {
        struct ipath_cq *cq = to_icq(ibcq);
-       struct ipath_cq_wc *wc = cq->queue;
+       struct ipath_cq_wc *wc;
        unsigned long flags;
        int npolled;
+       u32 tail;
 
        spin_lock_irqsave(&cq->lock, flags);
 
+       wc = cq->queue;
+       tail = wc->tail;
+       if (tail > (u32) cq->ibcq.cqe)
+               tail = (u32) cq->ibcq.cqe;
        for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
-               if (wc->tail == wc->head)
+               if (tail == wc->head)
                        break;
-               *entry = wc->queue[wc->tail];
-               if (wc->tail >= cq->ibcq.cqe)
-                       wc->tail = 0;
+               *entry = wc->queue[tail];
+               if (tail >= cq->ibcq.cqe)
+                       tail = 0;
                else
-                       wc->tail++;
+                       tail++;
        }
+       wc->tail = tail;
 
        spin_unlock_irqrestore(&cq->lock, flags);
 
@@ -172,16 +179,11 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
        struct ipath_cq_wc *wc;
        struct ib_cq *ret;
 
-       if (entries > ib_ipath_max_cqes) {
+       if (entries < 1 || entries > ib_ipath_max_cqes) {
                ret = ERR_PTR(-EINVAL);
                goto done;
        }
 
-       if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
-               ret = ERR_PTR(-ENOMEM);
-               goto done;
-       }
-
        /* Allocate the completion queue structure. */
        cq = kmalloc(sizeof(*cq), GFP_KERNEL);
        if (!cq) {
@@ -237,6 +239,16 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
        } else
                cq->ip = NULL;
 
+       spin_lock(&dev->n_cqs_lock);
+       if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
+               spin_unlock(&dev->n_cqs_lock);
+               ret = ERR_PTR(-ENOMEM);
+               goto bail_wc;
+       }
+
+       dev->n_cqs_allocated++;
+       spin_unlock(&dev->n_cqs_lock);
+
        /*
         * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
         * The number of entries should be >= the number requested or return
@@ -253,7 +265,6 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
 
        ret = &cq->ibcq;
 
-       dev->n_cqs_allocated++;
        goto done;
 
 bail_wc:
@@ -280,7 +291,9 @@ int ipath_destroy_cq(struct ib_cq *ibcq)
        struct ipath_cq *cq = to_icq(ibcq);
 
        tasklet_kill(&cq->comptask);
+       spin_lock(&dev->n_cqs_lock);
        dev->n_cqs_allocated--;
+       spin_unlock(&dev->n_cqs_lock);
        if (cq->ip)
                kref_put(&cq->ip->ref, ipath_release_mmap_info);
        else
@@ -316,14 +329,25 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
        return 0;
 }
 
+/**
+ * ipath_resize_cq - change the size of the CQ
+ * @ibcq: the completion queue
+ *
+ * Returns 0 for success.
+ */
 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
 {
        struct ipath_cq *cq = to_icq(ibcq);
-       struct ipath_cq_wc *old_wc = cq->queue;
+       struct ipath_cq_wc *old_wc;
        struct ipath_cq_wc *wc;
        u32 head, tail, n;
        int ret;
 
+       if (cqe < 1 || cqe > ib_ipath_max_cqes) {
+               ret = -EINVAL;
+               goto bail;
+       }
+
        /*
         * Need to use vmalloc() if we want to support large #s of entries.
         */
@@ -350,6 +374,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
         * Make sure head and tail are sane since they
         * might be user writable.
         */
+       old_wc = cq->queue;
        head = old_wc->head;
        if (head > (u32) cq->ibcq.cqe)
                head = (u32) cq->ibcq.cqe;