spin_lock_init(&my_cq->spinlock);
        spin_lock_init(&my_cq->cb_lock);
        spin_lock_init(&my_cq->task_lock);
+       atomic_set(&my_cq->nr_events, 0);
        init_waitqueue_head(&my_cq->wait_completion);
        my_cq->ownpid = current->tgid;
 
        return cq;
 }
 
-static int get_cq_nr_events(struct ehca_cq *my_cq)
-{
-       int ret;
-       unsigned long flags;
-       spin_lock_irqsave(&ehca_cq_idr_lock, flags);
-       ret = my_cq->nr_events;
-       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-       return ret;
-}
-
 int ehca_destroy_cq(struct ib_cq *cq)
 {
        u64 h_ret;
                }
        }
 
+       /*
+        * remove the CQ from the idr first to make sure
+        * no more interrupt tasklets will touch this CQ
+        */
        spin_lock_irqsave(&ehca_cq_idr_lock, flags);
-       while (my_cq->nr_events) {
-               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-               wait_event(my_cq->wait_completion, !get_cq_nr_events(my_cq));
-               spin_lock_irqsave(&ehca_cq_idr_lock, flags);
-               /* recheck nr_events to assure no cqe has just arrived */
-       }
-
        idr_remove(&ehca_cq_idr, my_cq->token);
        spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
 
+       /* now wait until all pending events have completed */
+       wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
+
+       /* nobody's using our CQ any longer -- we can destroy it */
        h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
        if (h_ret == H_R_STATE) {
                /* cq in err: read err data and destroy it forcibly */
 
  *
  *  Authors: Heiko J Schick <schickhj@de.ibm.com>
  *           Khadija Souissi <souissi@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Joachim Fenkes <fenkes@de.ibm.com>
  *
  *  Copyright (c) 2005 IBM Corporation
  *
 
        spin_lock_irqsave(&ehca_cq_idr_lock, flags);
        cq = idr_find(&ehca_cq_idr, token);
+       if (cq)
+               atomic_inc(&cq->nr_events);
        spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
 
        if (!cq)
 
        ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
 
+       if (atomic_dec_and_test(&cq->nr_events))
+               wake_up(&cq->wait_completion);
+
        return;
 }
 
                token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
                spin_lock_irqsave(&ehca_cq_idr_lock, flags);
                cq = idr_find(&ehca_cq_idr, token);
+               if (cq)
+                       atomic_inc(&cq->nr_events);
+               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
                if (cq == NULL) {
-                       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
                        ehca_err(&shca->ib_device,
                                 "Invalid eqe for non-existing cq token=%x",
                                 token);
                        return;
                }
                reset_eq_pending(cq);
-               cq->nr_events++;
-               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
                if (ehca_scaling_code)
                        queue_comp_task(cq);
                else {
                        comp_event_callback(cq);
-                       spin_lock_irqsave(&ehca_cq_idr_lock, flags);
-                       cq->nr_events--;
-                       if (!cq->nr_events)
+                       if (atomic_dec_and_test(&cq->nr_events))
                                wake_up(&cq->wait_completion);
-                       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
                }
        } else {
                ehca_dbg(&shca->ib_device, "Got non completion event");
                        token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
                        spin_lock(&ehca_cq_idr_lock);
                        eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
+                       if (eqe_cache[eqe_cnt].cq)
+                               atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
+                       spin_unlock(&ehca_cq_idr_lock);
                        if (!eqe_cache[eqe_cnt].cq) {
-                               spin_unlock(&ehca_cq_idr_lock);
                                ehca_err(&shca->ib_device,
                                         "Invalid eqe for non-existing cq "
                                         "token=%x", token);
                                continue;
                        }
-                       eqe_cache[eqe_cnt].cq->nr_events++;
-                       spin_unlock(&ehca_cq_idr_lock);
                } else
                        eqe_cache[eqe_cnt].cq = NULL;
                eqe_cnt++;
                        else {
                                struct ehca_cq *cq = eq->eqe_cache[i].cq;
                                comp_event_callback(cq);
-                               spin_lock(&ehca_cq_idr_lock);
-                               cq->nr_events--;
-                               if (!cq->nr_events)
+                               if (atomic_dec_and_test(&cq->nr_events))
                                        wake_up(&cq->wait_completion);
-                               spin_unlock(&ehca_cq_idr_lock);
                        }
                } else {
                        ehca_dbg(&shca->ib_device, "Got non completion event");
        while (!list_empty(&cct->cq_list)) {
                cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
                spin_unlock_irqrestore(&cct->task_lock, flags);
-               comp_event_callback(cq);
 
-               spin_lock_irqsave(&ehca_cq_idr_lock, flags);
-               cq->nr_events--;
-               if (!cq->nr_events)
+               comp_event_callback(cq);
+               if (atomic_dec_and_test(&cq->nr_events))
                        wake_up(&cq->wait_completion);
-               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
 
                spin_lock_irqsave(&cct->task_lock, flags);
                spin_lock(&cq->task_lock);