unsigned cnt)
 {
        unsigned i, last = first + cnt;
-       u64 sendctrl, sendorig;
+       unsigned long flags;
 
        ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
-       sendorig = dd->ipath_sendctrl;
        for (i = first; i < last; i++) {
-               sendctrl = sendorig  | INFINIPATH_S_DISARM |
-                       (i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
+               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+               /*
+                * The disarm-related bits are write-only, so it
+                * is ok to OR them in with our copy of sendctrl
+                * while we hold the lock.
+                */
                ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                                sendctrl);
+                       dd->ipath_sendctrl | INFINIPATH_S_DISARM |
+                       (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
+               /* can't disarm bufs back-to-back per iba7220 spec */
+               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
        }
 
        /*
-        * Write it again with current value, in case ipath_sendctrl changed
-        * while we were looping; no critical bits that would require
-        * locking.
-        *
-        * disable PIOAVAILUPD, then re-enable, reading scratch in
+        * Disable PIOAVAILUPD, then re-enable, reading scratch in
         * between.  This seems to avoid a chip timing race that causes
-        * pioavail updates to memory to stop.
+        * pioavail updates to memory to stop.  We xor as we don't
+        * know the state of the bit when we're called.
         */
+       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
        ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                        sendorig & ~INFINIPATH_S_PIOBUFAVAILUPD);
-       sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+               dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
+       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
        ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
                         dd->ipath_sendctrl);
+       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 }
 
 /**
  */
 void ipath_shutdown_device(struct ipath_devdata *dd)
 {
+       unsigned long flags;
+
        ipath_dbg("Shutting down the device\n");
 
        dd->ipath_flags |= IPATH_LINKUNK;
         * gracefully stop all sends allowing any in progress to trickle out
         * first.
         */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
+       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+       dd->ipath_sendctrl = 0;
+       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
        /* flush it */
        ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+
        /*
         * enough for anything that's going to trickle out to have actually
         * done so.
 
 
 static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
 {
-       u64 reg = dd->ipath_sendctrl;
+       unsigned long flags;
 
-       clear_bit(IPATH_S_PIOBUFAVAILUPD, ®);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg);
+       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+               dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
+       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
        ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
+       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
        return 0;
 }
 
                       dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
 
        spin_lock_init(&dd->ipath_tid_lock);
-
+       spin_lock_init(&dd->ipath_sendctrl_lock);
        spin_lock_init(&dd->ipath_gpio_lock);
        spin_lock_init(&dd->ipath_eep_st_lock);
        mutex_init(&dd->ipath_eep_lock);
        *pdp = dd->ipath_pd[0];
        /* ensure chip does no sends or receives while we re-initialize */
        dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0);
+       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
+       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
+       ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
 
        rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
        if (dd->ipath_portcnt != rtmp)
                        struct ipath_portdata *pd, int reinit)
 {
        u32 val;
+       unsigned long flags;
        int i;
 
        if (!reinit)
        ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
                         dd->ipath_rcvctrl);
 
+       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
        /* Enable PIO send, and update of PIOavail regs to memory. */
        dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
                INFINIPATH_S_PIOBUFAVAILUPD;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                        dd->ipath_sendctrl);
+       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
+       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
        /*
         * enable port 0 receive, and receive interrupt.  other ports
        u64 val;
        struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
        gfp_t gfp_flags = GFP_USER | __GFP_COMP;
+       unsigned long flags;
 
        ret = init_housekeeping(dd, &pd, reinit);
        if (ret)
        ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
                         ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
        ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                        INFINIPATH_S_PIOENABLE);
+
+       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+       dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE;
+       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
+       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
        /*
         * before error clears, since we expect serdes pll errors during
 
 {
        int i, im;
        __le64 val;
+       unsigned long flags;
 
        /* disable error interrupts, to avoid confusion */
        ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
                         dd->ipath_control);
 
        /* ensure pio avail updates continue */
+       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
        ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
                 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
        ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
        ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                dd->ipath_sendctrl);
+                        dd->ipath_sendctrl);
+       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
        /*
         * We just enabled pioavailupdate, so dma copy is almost certainly
 
 static void handle_layer_pioavail(struct ipath_devdata *dd)
 {
+       unsigned long flags;
        int ret;
 
        ret = ipath_ib_piobufavail(dd->verbs_dev);
 
        return;
 set:
-       set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
+       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+       dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
        ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
                         dd->ipath_sendctrl);
+       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 }
 
 /*
                handle_urcv(dd, istat);
 
        if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
-               clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
+               unsigned long flags;
+
+               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+               dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
                ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
                                 dd->ipath_sendctrl);
+               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
                handle_layer_pioavail(dd);
        }
 
        dma_addr_t *ipath_physshadow;
        /* lock to workaround chip bug 9437 */
        spinlock_t ipath_tid_lock;
+       spinlock_t ipath_sendctrl_lock;
 
        /*
         * IPATH_STATUS_*,
 
 
 static void want_buffer(struct ipath_devdata *dd)
 {
-       set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
+       unsigned long flags;
+
+       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+       dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
        ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
                         dd->ipath_sendctrl);
+       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 }
 
 /**