return modes;
 }
 
+struct ata_exec_internal_arg {
+       unsigned int err_mask;
+       struct ata_taskfile *tf;
+       struct completion *waiting;
+};
+
+int ata_qc_complete_internal(struct ata_queued_cmd *qc)
+{
+       struct ata_exec_internal_arg *arg = qc->private_data;
+       struct completion *waiting = arg->waiting;
+
+       if (!(qc->err_mask & ~AC_ERR_DEV))
+               qc->ap->ops->tf_read(qc->ap, arg->tf);
+       arg->err_mask = qc->err_mask;
+       arg->waiting = NULL;
+       complete(waiting);
+
+       return 0;
+}
+
+/**
+ *     ata_exec_internal - execute libata internal command
+ *     @ap: Port to which the command is sent
+ *     @dev: Device to which the command is sent
+ *     @tf: Taskfile registers for the command and the result
+ *     @dma_dir: Data tranfer direction of the command
+ *     @buf: Data buffer of the command
+ *     @buflen: Length of data buffer
+ *
+ *     Executes libata internal command with timeout.  @tf contains
+ *     command on entry and result on return.  Timeout and error
+ *     conditions are reported via return value.  No recovery action
+ *     is taken after a command times out.  It's caller's duty to
+ *     clean up after timeout.
+ *
+ *     LOCKING:
+ *     None.  Should be called with kernel context, might sleep.
+ */
+
+static unsigned
+ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
+                 struct ata_taskfile *tf,
+                 int dma_dir, void *buf, unsigned int buflen)
+{
+       u8 command = tf->command;
+       struct ata_queued_cmd *qc;
+       DECLARE_COMPLETION(wait);
+       unsigned long flags;
+       struct ata_exec_internal_arg arg;
+
+       spin_lock_irqsave(&ap->host_set->lock, flags);
+
+       qc = ata_qc_new_init(ap, dev);
+       BUG_ON(qc == NULL);
+
+       qc->tf = *tf;
+       qc->dma_dir = dma_dir;
+       if (dma_dir != DMA_NONE) {
+               ata_sg_init_one(qc, buf, buflen);
+               qc->nsect = buflen / ATA_SECT_SIZE;
+       }
+
+       arg.waiting = &wait;
+       arg.tf = tf;
+       qc->private_data = &arg;
+       qc->complete_fn = ata_qc_complete_internal;
+
+       if (ata_qc_issue(qc))
+               goto issue_fail;
+
+       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+       if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
+               spin_lock_irqsave(&ap->host_set->lock, flags);
+
+               /* We're racing with irq here.  If we lose, the
+                * following test prevents us from completing the qc
+                * again.  If completion irq occurs after here but
+                * before the caller cleans up, it will result in a
+                * spurious interrupt.  We can live with that.
+                */
+               if (arg.waiting) {
+                       qc->err_mask = AC_ERR_OTHER;
+                       ata_qc_complete(qc);
+                       printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
+                              ap->id, command);
+               }
+
+               spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       }
+
+       return arg.err_mask;
+
+ issue_fail:
+       ata_qc_free(qc);
+       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       return AC_ERR_OTHER;
+}
+
 static int ata_qc_wait_err(struct ata_queued_cmd *qc,
                           struct completion *wait)
 {