return err;
 }
 
+static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
+{
+       if (send_cq == recv_cq)
+               spin_lock_irq(&send_cq->lock);
+       else if (send_cq > recv_cq) {
+               spin_lock_irq(&send_cq->lock);
+               spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+       } else {
+               spin_lock_irq(&recv_cq->lock);
+               spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+       }
+}
+
+static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
+{
+       if (send_cq == recv_cq)
+               spin_unlock_irq(&send_cq->lock);
+       else if (send_cq > recv_cq) {
+               spin_unlock(&recv_cq->lock);
+               spin_unlock_irq(&send_cq->lock);
+       } else {
+               spin_unlock(&send_cq->lock);
+               spin_unlock_irq(&recv_cq->lock);
+       }
+}
+
 void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
 {
        struct c2_cq *send_cq;
         * Lock CQs here, so that CQ polling code can do QP lookup
         * without taking a lock.
         */
-       spin_lock_irq(&send_cq->lock);
-       if (send_cq != recv_cq)
-               spin_lock(&recv_cq->lock);
-
+       c2_lock_cqs(send_cq, recv_cq);
        c2_free_qpn(c2dev, qp->qpn);
-
-       if (send_cq != recv_cq)
-               spin_unlock(&recv_cq->lock);
-       spin_unlock_irq(&send_cq->lock);
+       c2_unlock_cqs(send_cq, recv_cq);
 
        /*
         * Destory qp in the rnic...