]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/char/drm/drm_lock.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6-omap-h63xx.git] / drivers / char / drm / drm_lock.c
index e9993ba461a2ba85e39229ebfc9d46dbe240d13a..bea2a7d5b2b22f695d3c6131d2068450ed55f074 100644 (file)
 
 #include "drmP.h"
 
-static int drm_lock_transfer(drm_device_t * dev,
-                            __volatile__ unsigned int *lock,
-                            unsigned int context);
 static int drm_notifier(void *priv);
 
 /**
  * Lock ioctl.
  *
  * \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
  * \param cmd command.
  * \param arg user argument, pointing to a drm_lock structure.
  * \return zero on success or negative number on failure.
  *
  * Add the current task to the lock wait queue, and attempt to take to lock.
  */
-int drm_lock(struct inode *inode, struct file *filp,
-            unsigned int cmd, unsigned long arg)
+int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
-       drm_file_t *priv = filp->private_data;
-       drm_device_t *dev = priv->head->dev;
        DECLARE_WAITQUEUE(entry, current);
-       drm_lock_t lock;
+       struct drm_lock *lock = data;
        int ret = 0;
 
-       ++priv->lock_count;
+       ++file_priv->lock_count;
 
-       if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
-               return -EFAULT;
-
-       if (lock.context == DRM_KERNEL_CONTEXT) {
+       if (lock->context == DRM_KERNEL_CONTEXT) {
                DRM_ERROR("Process %d using kernel context %d\n",
-                         current->pid, lock.context);
+                         task_pid_nr(current), lock->context);
                return -EINVAL;
        }
 
        DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
-                 lock.context, current->pid,
-                 dev->lock.hw_lock->lock, lock.flags);
+                 lock->context, task_pid_nr(current),
+                 dev->lock.hw_lock->lock, lock->flags);
 
        if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
-               if (lock.context < 0)
+               if (lock->context < 0)
                        return -EINVAL;
 
        add_wait_queue(&dev->lock.lock_queue, &entry);
+       spin_lock(&dev->lock.spinlock);
+       dev->lock.user_waiters++;
+       spin_unlock(&dev->lock.spinlock);
        for (;;) {
                __set_current_state(TASK_INTERRUPTIBLE);
                if (!dev->lock.hw_lock) {
@@ -87,8 +81,8 @@ int drm_lock(struct inode *inode, struct file *filp,
                        ret = -EINTR;
                        break;
                }
-               if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
-                       dev->lock.filp = filp;
+               if (drm_lock_take(&dev->lock, lock->context)) {
+                       dev->lock.file_priv = file_priv;
                        dev->lock.lock_time = jiffies;
                        atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
                        break;  /* Got lock */
@@ -101,40 +95,43 @@ int drm_lock(struct inode *inode, struct file *filp,
                        break;
                }
        }
+       spin_lock(&dev->lock.spinlock);
+       dev->lock.user_waiters--;
+       spin_unlock(&dev->lock.spinlock);
        __set_current_state(TASK_RUNNING);
        remove_wait_queue(&dev->lock.lock_queue, &entry);
 
-       DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
-       if (ret)
-               return ret;
+       DRM_DEBUG("%d %s\n", lock->context,
+                 ret ? "interrupted" : "has lock");
+       if (ret) return ret;
 
        sigemptyset(&dev->sigmask);
        sigaddset(&dev->sigmask, SIGSTOP);
        sigaddset(&dev->sigmask, SIGTSTP);
        sigaddset(&dev->sigmask, SIGTTIN);
        sigaddset(&dev->sigmask, SIGTTOU);
-       dev->sigdata.context = lock.context;
+       dev->sigdata.context = lock->context;
        dev->sigdata.lock = dev->lock.hw_lock;
        block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
 
-       if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY))
+       if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
                dev->driver->dma_ready(dev);
 
-       if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) {
+       if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+       {
                if (dev->driver->dma_quiescent(dev)) {
-                       DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context);
-                       return DRM_ERR(EBUSY);
+                       DRM_DEBUG("%d waiting for DMA quiescent\n",
+                                 lock->context);
+                       return -EBUSY;
                }
        }
 
-       /* dev->driver->kernel_context_switch isn't used by any of the x86
-        *  drivers but is used by the Sparc driver.
-        */
        if (dev->driver->kernel_context_switch &&
-           dev->last_context != lock.context) {
+           dev->last_context != lock->context) {
                dev->driver->kernel_context_switch(dev, dev->last_context,
-                                                  lock.context);
+                                                  lock->context);
        }
+
        return 0;
 }
 
@@ -142,27 +139,21 @@ int drm_lock(struct inode *inode, struct file *filp,
  * Unlock ioctl.
  *
  * \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
  * \param cmd command.
  * \param arg user argument, pointing to a drm_lock structure.
  * \return zero on success or negative number on failure.
  *
  * Transfer and free the lock.
  */
-int drm_unlock(struct inode *inode, struct file *filp,
-              unsigned int cmd, unsigned long arg)
+int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
-       drm_file_t *priv = filp->private_data;
-       drm_device_t *dev = priv->head->dev;
-       drm_lock_t lock;
+       struct drm_lock *lock = data;
        unsigned long irqflags;
 
-       if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
-               return -EFAULT;
-
-       if (lock.context == DRM_KERNEL_CONTEXT) {
+       if (lock->context == DRM_KERNEL_CONTEXT) {
                DRM_ERROR("Process %d using kernel context %d\n",
-                         current->pid, lock.context);
+                         task_pid_nr(current), lock->context);
                return -EINVAL;
        }
 
@@ -184,12 +175,8 @@ int drm_unlock(struct inode *inode, struct file *filp,
        if (dev->driver->kernel_context_switch_unlock)
                dev->driver->kernel_context_switch_unlock(dev);
        else {
-               drm_lock_transfer(dev, &dev->lock.hw_lock->lock,
-                                 DRM_KERNEL_CONTEXT);
-
-               if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
-                                 DRM_KERNEL_CONTEXT)) {
-                       DRM_ERROR("\n");
+               if (drm_lock_free(&dev->lock,lock->context)) {
+                       /* FIXME: Should really bail out here. */
                }
        }
 
@@ -206,18 +193,26 @@ int drm_unlock(struct inode *inode, struct file *filp,
  *
  * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
  */
-int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
+int drm_lock_take(struct drm_lock_data *lock_data,
+                 unsigned int context)
 {
        unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
 
+       spin_lock(&lock_data->spinlock);
        do {
                old = *lock;
                if (old & _DRM_LOCK_HELD)
                        new = old | _DRM_LOCK_CONT;
-               else
-                       new = context | _DRM_LOCK_HELD;
+               else {
+                       new = context | _DRM_LOCK_HELD |
+                               ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+                                _DRM_LOCK_CONT : 0);
+               }
                prev = cmpxchg(lock, old, new);
        } while (prev != old);
+       spin_unlock(&lock_data->spinlock);
+
        if (_DRM_LOCKING_CONTEXT(old) == context) {
                if (old & _DRM_LOCK_HELD) {
                        if (context != DRM_KERNEL_CONTEXT) {
@@ -227,7 +222,8 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
                        return 0;
                }
        }
-       if (new == (context | _DRM_LOCK_HELD)) {
+
+       if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
                /* Have lock */
                return 1;
        }
@@ -246,13 +242,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
  * Resets the lock file pointer.
  * Marks the lock as held by the given context, via the \p cmpxchg instruction.
  */
-static int drm_lock_transfer(drm_device_t * dev,
-                            __volatile__ unsigned int *lock,
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
                             unsigned int context)
 {
        unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
 
-       dev->lock.filp = NULL;
+       lock_data->file_priv = NULL;
        do {
                old = *lock;
                new = context | _DRM_LOCK_HELD;
@@ -272,23 +268,32 @@ static int drm_lock_transfer(drm_device_t * dev,
  * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
  * waiting on the lock queue.
  */
-int drm_lock_free(drm_device_t * dev,
-                 __volatile__ unsigned int *lock, unsigned int context)
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
 {
        unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock(&lock_data->spinlock);
+       if (lock_data->kernel_waiters != 0) {
+               drm_lock_transfer(lock_data, 0);
+               lock_data->idle_has_lock = 1;
+               spin_unlock(&lock_data->spinlock);
+               return 1;
+       }
+       spin_unlock(&lock_data->spinlock);
 
-       dev->lock.filp = NULL;
        do {
                old = *lock;
-               new = 0;
+               new = _DRM_LOCKING_CONTEXT(old);
                prev = cmpxchg(lock, old, new);
        } while (prev != old);
+
        if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
                DRM_ERROR("%d freed heavyweight lock held by %d\n",
                          context, _DRM_LOCKING_CONTEXT(old));
                return 1;
        }
-       wake_up_interruptible(&dev->lock.lock_queue);
+       wake_up_interruptible(&lock_data->lock_queue);
        return 0;
 }
 
@@ -305,7 +310,7 @@ int drm_lock_free(drm_device_t * dev,
  */
 static int drm_notifier(void *priv)
 {
-       drm_sigdata_t *s = (drm_sigdata_t *) priv;
+       struct drm_sigdata *s = (struct drm_sigdata *) priv;
        unsigned int old, new, prev;
 
        /* Allow signal delivery if lock isn't held */
@@ -322,3 +327,65 @@ static int drm_notifier(void *priv)
        } while (prev != old);
        return 0;
 }
+
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(struct drm_lock_data *lock_data)
+{
+       int ret = 0;
+
+       spin_lock(&lock_data->spinlock);
+       lock_data->kernel_waiters++;
+       if (!lock_data->idle_has_lock) {
+
+               spin_unlock(&lock_data->spinlock);
+               ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
+               spin_lock(&lock_data->spinlock);
+
+               if (ret == 1)
+                       lock_data->idle_has_lock = 1;
+       }
+       spin_unlock(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_take);
+
+void drm_idlelock_release(struct drm_lock_data *lock_data)
+{
+       unsigned int old, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock(&lock_data->spinlock);
+       if (--lock_data->kernel_waiters == 0) {
+               if (lock_data->idle_has_lock) {
+                       do {
+                               old = *lock;
+                               prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
+                       } while (prev != old);
+                       wake_up_interruptible(&lock_data->lock_queue);
+                       lock_data->idle_has_lock = 0;
+               }
+       }
+       spin_unlock(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_release);
+
+
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+{
+       return (file_priv->lock_count && dev->lock.hw_lock &&
+               _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
+               dev->lock.file_priv == file_priv);
+}
+
+EXPORT_SYMBOL(drm_i_have_hw_lock);