#include <linux/smp_lock.h>    /* For (un)lock_kernel */
 #include <linux/mm.h>
 #include <linux/cdev.h>
+#include <linux/mutex.h>
 #if defined(__alpha__) || defined(__powerpc__)
 #include <asm/pgtable.h>       /* For pte_wrprotect */
 #endif
        /** \name Locks */
        /*@{ */
        spinlock_t count_lock;          /**< For inuse, drm_device::open_count, drm_device::buf_use */
-       struct semaphore struct_sem;    /**< For others */
+       struct mutex struct_mutex;      /**< For others */
        /*@} */
 
        /** \name Usage Counters */
        /*@{ */
        drm_ctx_list_t *ctxlist;        /**< Linked list of context handles */
        int ctx_count;                  /**< Number of context handles */
-       struct semaphore ctxlist_sem;   /**< For ctxlist */
+       struct mutex ctxlist_mutex;     /**< For ctxlist */
 
        drm_map_t **context_sareas;         /**< per-context SAREA's */
        int max_context;
 
  * \param magic magic number.
  *
  * Searches in drm_device::magiclist within all files with the same hash key
- * the one with matching magic number, while holding the drm_device::struct_sem
+ * the one with matching magic number, while holding the drm_device::struct_mutex
  * lock.
  */
 static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
        drm_magic_entry_t *pt;
        int hash = drm_hash_magic(magic);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
                if (pt->magic == magic) {
                        retval = pt->priv;
                        break;
                }
        }
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
        return retval;
 }
 
  *
  * Creates a drm_magic_entry structure and appends to the linked list
  * associated the magic number hash key in drm_device::magiclist, while holding
- * the drm_device::struct_sem lock.
+ * the drm_device::struct_mutex lock.
  */
 static int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
                         drm_magic_t magic)
        entry->priv = priv;
        entry->next = NULL;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        if (dev->magiclist[hash].tail) {
                dev->magiclist[hash].tail->next = entry;
                dev->magiclist[hash].tail = entry;
                dev->magiclist[hash].head = entry;
                dev->magiclist[hash].tail = entry;
        }
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
  * \param magic magic number.
  *
  * Searches and unlinks the entry in drm_device::magiclist with the magic
- * number hash key, while holding the drm_device::struct_sem lock.
+ * number hash key, while holding the drm_device::struct_mutex lock.
  */
 static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
 {
        DRM_DEBUG("%d\n", magic);
        hash = drm_hash_magic(magic);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
                if (pt->magic == magic) {
                        if (dev->magiclist[hash].head == pt) {
                        if (prev) {
                                prev->next = pt->next;
                        }
-                       up(&dev->struct_sem);
+                       mutex_unlock(&dev->struct_mutex);
                        return 0;
                }
        }
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
 
 
        memset(list, 0, sizeof(*list));
        list->map = map;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        list_add(&list->head, &dev->maplist->head);
        /* Assign a 32-bit handle */
-       /* We do it here so that dev->struct_sem protects the increment */
+       /* We do it here so that dev->struct_mutex protects the increment */
        list->user_token = HandleID(map->type == _DRM_SHM
                                    ? (unsigned long)map->handle
                                    : map->offset, dev);
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        *maplist = list;
        return 0;
 {
        int ret;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        ret = drm_rmmap_locked(dev, map);
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        return ret;
 }
                return -EFAULT;
        }
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        list_for_each(list, &dev->maplist->head) {
                drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
 
         * find anything.
         */
        if (list == (&dev->maplist->head)) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
 
 
        /* Register and framebuffer maps are permanent */
        if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return 0;
        }
 
        ret = drm_rmmap_locked(dev, map);
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        return ret;
 }
        atomic_inc(&dev->buf_alloc);
        spin_unlock(&dev->count_lock);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
        if (entry->buf_count) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM; /* May only call once for each order */
        }
 
        if (count < 0 || count > 4096) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -EINVAL;
        }
        entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
                                   DRM_MEM_BUFS);
        if (!entry->buflist) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
                        /* Set count correctly so we free the proper amount. */
                        entry->buf_count = count;
                        drm_cleanup_buf_error(dev, entry);
-                       up(&dev->struct_sem);
+                       mutex_unlock(&dev->struct_mutex);
                        atomic_dec(&dev->buf_alloc);
                        return -ENOMEM;
                }
        if (!temp_buflist) {
                /* Free the entry because it isn't valid */
                drm_cleanup_buf_error(dev, entry);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        request->count = entry->buf_count;
        request->size = size;
        atomic_inc(&dev->buf_alloc);
        spin_unlock(&dev->count_lock);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
        if (entry->buf_count) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM; /* May only call once for each order */
        }
 
        if (count < 0 || count > 4096) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -EINVAL;
        }
        entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
                                   DRM_MEM_BUFS);
        if (!entry->buflist) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
        if (!entry->seglist) {
                drm_free(entry->buflist,
                         count * sizeof(*entry->buflist), DRM_MEM_BUFS);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
                         count * sizeof(*entry->buflist), DRM_MEM_BUFS);
                drm_free(entry->seglist,
                         count * sizeof(*entry->seglist), DRM_MEM_SEGS);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
                        drm_free(temp_pagelist,
                                 (dma->page_count + (count << page_order))
                                 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
-                       up(&dev->struct_sem);
+                       mutex_unlock(&dev->struct_mutex);
                        atomic_dec(&dev->buf_alloc);
                        return -ENOMEM;
                }
                                          (count << page_order))
                                         * sizeof(*dma->pagelist),
                                         DRM_MEM_PAGES);
-                               up(&dev->struct_sem);
+                               mutex_unlock(&dev->struct_mutex);
                                atomic_dec(&dev->buf_alloc);
                                return -ENOMEM;
                        }
                drm_free(temp_pagelist,
                         (dma->page_count + (count << page_order))
                         * sizeof(*dma->pagelist), DRM_MEM_PAGES);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
        dma->page_count += entry->seg_count << page_order;
        dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        request->count = entry->buf_count;
        request->size = size;
        atomic_inc(&dev->buf_alloc);
        spin_unlock(&dev->count_lock);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
        if (entry->buf_count) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM; /* May only call once for each order */
        }
 
        if (count < 0 || count > 4096) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -EINVAL;
        }
        entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
                                   DRM_MEM_BUFS);
        if (!entry->buflist) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
                        /* Set count correctly so we free the proper amount. */
                        entry->buf_count = count;
                        drm_cleanup_buf_error(dev, entry);
-                       up(&dev->struct_sem);
+                       mutex_unlock(&dev->struct_mutex);
                        atomic_dec(&dev->buf_alloc);
                        return -ENOMEM;
                }
        if (!temp_buflist) {
                /* Free the entry because it isn't valid */
                drm_cleanup_buf_error(dev, entry);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        request->count = entry->buf_count;
        request->size = size;
        atomic_inc(&dev->buf_alloc);
        spin_unlock(&dev->count_lock);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
        if (entry->buf_count) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM; /* May only call once for each order */
        }
 
        if (count < 0 || count > 4096) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -EINVAL;
        }
        entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
                                   DRM_MEM_BUFS);
        if (!entry->buflist) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
                        /* Set count correctly so we free the proper amount. */
                        entry->buf_count = count;
                        drm_cleanup_buf_error(dev, entry);
-                       up(&dev->struct_sem);
+                       mutex_unlock(&dev->struct_mutex);
                        atomic_dec(&dev->buf_alloc);
                        return -ENOMEM;
                }
        if (!temp_buflist) {
                /* Free the entry because it isn't valid */
                drm_cleanup_buf_error(dev, entry);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        request->count = entry->buf_count;
        request->size = size;
 
  * \param ctx_handle context handle.
  *
  * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::context_sareas, while holding the drm_device::struct_sem
+ * in drm_device::context_sareas, while holding the drm_device::struct_mutex
  * lock.
  */
 void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
                goto failed;
 
        if (ctx_handle < DRM_MAX_CTXBITMAP) {
-               down(&dev->struct_sem);
+               mutex_lock(&dev->struct_mutex);
                clear_bit(ctx_handle, dev->ctx_bitmap);
                dev->context_sareas[ctx_handle] = NULL;
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return;
        }
       failed:
  *
  * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
  * drm_device::context_sareas to accommodate the new entry while holding the
- * drm_device::struct_sem lock.
+ * drm_device::struct_mutex lock.
  */
 static int drm_ctxbitmap_next(drm_device_t * dev)
 {
        if (!dev->ctx_bitmap)
                return -1;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
        if (bit < DRM_MAX_CTXBITMAP) {
                set_bit(bit, dev->ctx_bitmap);
                                                         DRM_MEM_MAPS);
                                if (!ctx_sareas) {
                                        clear_bit(bit, dev->ctx_bitmap);
-                                       up(&dev->struct_sem);
+                                       mutex_unlock(&dev->struct_mutex);
                                        return -1;
                                }
                                dev->context_sareas = ctx_sareas;
                                              DRM_MEM_MAPS);
                                if (!dev->context_sareas) {
                                        clear_bit(bit, dev->ctx_bitmap);
-                                       up(&dev->struct_sem);
+                                       mutex_unlock(&dev->struct_mutex);
                                        return -1;
                                }
                                dev->context_sareas[bit] = NULL;
                        }
                }
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return bit;
        }
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
        return -1;
 }
 
  * \param dev DRM device.
  *
  * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
- * the drm_device::struct_sem lock.
+ * the drm_device::struct_mutex lock.
  */
 int drm_ctxbitmap_init(drm_device_t * dev)
 {
        int i;
        int temp;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE,
                                                     DRM_MEM_CTXBITMAP);
        if (dev->ctx_bitmap == NULL) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return -ENOMEM;
        }
        memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE);
        dev->context_sareas = NULL;
        dev->max_context = -1;
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
                temp = drm_ctxbitmap_next(dev);
  * \param dev DRM device.
  *
  * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
- * the drm_device::struct_sem lock.
+ * the drm_device::struct_mutex lock.
  */
 void drm_ctxbitmap_cleanup(drm_device_t * dev)
 {
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        if (dev->context_sareas)
                drm_free(dev->context_sareas,
                         sizeof(*dev->context_sareas) *
                         dev->max_context, DRM_MEM_MAPS);
        drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP);
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 }
 
 /*@}*/
        if (copy_from_user(&request, argp, sizeof(request)))
                return -EFAULT;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        if (dev->max_context < 0
            || request.ctx_id >= (unsigned)dev->max_context) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
 
        map = dev->context_sareas[request.ctx_id];
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        request.handle = NULL;
        list_for_each_entry(_entry, &dev->maplist->head, head) {
                           (drm_ctx_priv_map_t __user *) arg, sizeof(request)))
                return -EFAULT;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        list_for_each(list, &dev->maplist->head) {
                r_list = list_entry(list, drm_map_list_t, head);
                if (r_list->map
                        goto found;
        }
       bad:
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
        return -EINVAL;
 
       found:
        if (request.ctx_id >= (unsigned)dev->max_context)
                goto bad;
        dev->context_sareas[request.ctx_id] = map;
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
        ctx_entry->handle = ctx.handle;
        ctx_entry->tag = priv;
 
-       down(&dev->ctxlist_sem);
+       mutex_lock(&dev->ctxlist_mutex);
        list_add(&ctx_entry->head, &dev->ctxlist->head);
        ++dev->ctx_count;
-       up(&dev->ctxlist_sem);
+       mutex_unlock(&dev->ctxlist_mutex);
 
        if (copy_to_user(argp, &ctx, sizeof(ctx)))
                return -EFAULT;
                drm_ctxbitmap_free(dev, ctx.handle);
        }
 
-       down(&dev->ctxlist_sem);
+       mutex_lock(&dev->ctxlist_mutex);
        if (!list_empty(&dev->ctxlist->head)) {
                drm_ctx_list_t *pos, *n;
 
                        }
                }
        }
-       up(&dev->ctxlist_sem);
+       mutex_unlock(&dev->ctxlist_mutex);
 
        return 0;
 }
 
        if (dev->irq_enabled)
                drm_irq_uninstall(dev);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        del_timer(&dev->timer);
 
        /* Clear pid list */
                dev->lock.filp = NULL;
                wake_up_interruptible(&dev->lock.lock_queue);
        }
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        DRM_DEBUG("lastclose completed\n");
        return 0;
 
                        goto out_free;
        }
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        if (!dev->file_last) {
                priv->next = NULL;
                priv->prev = NULL;
                dev->file_last->next = priv;
                dev->file_last = priv;
        }
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
 #ifdef __alpha__
        /*
 
        drm_fasync(-1, filp, 0);
 
-       down(&dev->ctxlist_sem);
+       mutex_lock(&dev->ctxlist_mutex);
        if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
                drm_ctx_list_t *pos, *n;
 
                        }
                }
        }
-       up(&dev->ctxlist_sem);
+       mutex_unlock(&dev->ctxlist_mutex);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        if (priv->remove_auth_on_close == 1) {
                drm_file_t *temp = dev->file_first;
                while (temp) {
        } else {
                dev->file_last = priv->prev;
        }
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        if (dev->driver->postclose)
                dev->driver->postclose(dev, priv);
 
                return -EFAULT;
        idx = map.offset;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        if (idx < 0) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
 
                i++;
        }
        if (!r_list || !r_list->map) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
 
        map.flags = r_list->map->flags;
        map.handle = (void *)(unsigned long)r_list->user_token;
        map.mtrr = r_list->map->mtrr;
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        if (copy_to_user(argp, &map, sizeof(map)))
                return -EFAULT;
        if (copy_from_user(&client, argp, sizeof(client)))
                return -EFAULT;
        idx = client.idx;
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;
 
        if (!pt) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
        client.auth = pt->authenticated;
        client.uid = pt->uid;
        client.magic = pt->magic;
        client.iocs = pt->ioctl_count;
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        if (copy_to_user(argp, &client, sizeof(client)))
                return -EFAULT;
 
        memset(&stats, 0, sizeof(stats));
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
 
        for (i = 0; i < dev->counters; i++) {
                if (dev->types[i] == _DRM_STAT_LOCK)
 
        stats.count = dev->counters;
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats)))
                return -EFAULT;
 
        if (dev->irq == 0)
                return -EINVAL;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
 
        /* Driver must have been initialized */
        if (!dev->dev_private) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
 
        if (dev->irq_enabled) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return -EBUSY;
        }
        dev->irq_enabled = 1;
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
 
        ret = request_irq(dev->irq, dev->driver->irq_handler,
                          sh_flags, dev->devname, dev);
        if (ret < 0) {
-               down(&dev->struct_sem);
+               mutex_lock(&dev->struct_mutex);
                dev->irq_enabled = 0;
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return ret;
        }
 
        if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
                return -EINVAL;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        irq_enabled = dev->irq_enabled;
        dev->irq_enabled = 0;
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        if (!irq_enabled)
                return -EINVAL;
 
 }
 
 /**
- * Simply calls _vm_info() while holding the drm_device::struct_sem lock.
+ * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
  */
 static int drm_vm_info(char *buf, char **start, off_t offset, int request,
                       int *eof, void *data)
        drm_device_t *dev = (drm_device_t *) data;
        int ret;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        ret = drm__vm_info(buf, start, offset, request, eof, data);
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
 }
 
 /**
- * Simply calls _queues_info() while holding the drm_device::struct_sem lock.
+ * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
  */
 static int drm_queues_info(char *buf, char **start, off_t offset, int request,
                           int *eof, void *data)
        drm_device_t *dev = (drm_device_t *) data;
        int ret;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        ret = drm__queues_info(buf, start, offset, request, eof, data);
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
 }
 
 /**
- * Simply calls _bufs_info() while holding the drm_device::struct_sem lock.
+ * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
  */
 static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
                         int *eof, void *data)
        drm_device_t *dev = (drm_device_t *) data;
        int ret;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        ret = drm__bufs_info(buf, start, offset, request, eof, data);
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
 }
 
 /**
- * Simply calls _clients_info() while holding the drm_device::struct_sem lock.
+ * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
  */
 static int drm_clients_info(char *buf, char **start, off_t offset,
                            int request, int *eof, void *data)
        drm_device_t *dev = (drm_device_t *) data;
        int ret;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        ret = drm__clients_info(buf, start, offset, request, eof, data);
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
        drm_device_t *dev = (drm_device_t *) data;
        int ret;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        ret = drm__vma_info(buf, start, offset, request, eof, data);
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 #endif
 
 
        spin_lock_init(&dev->count_lock);
        init_timer(&dev->timer);
-       sema_init(&dev->struct_sem, 1);
-       sema_init(&dev->ctxlist_sem, 1);
+       mutex_init(&dev->struct_mutex);
+       mutex_init(&dev->ctxlist_mutex);
 
        dev->pdev = pdev;
 
 
 
        map = vma->vm_private_data;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
                next = pt->next;
                if (pt->vma->vm_private_data == map)
                        drm_free(map, sizeof(*map), DRM_MEM_MAPS);
                }
        }
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 }
 
 /**
 
        vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
        if (vma_entry) {
-               down(&dev->struct_sem);
+               mutex_lock(&dev->struct_mutex);
                vma_entry->vma = vma;
                vma_entry->next = dev->vmalist;
                vma_entry->pid = current->pid;
                dev->vmalist = vma_entry;
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
        }
 }
 
                  vma->vm_start, vma->vm_end - vma->vm_start);
        atomic_dec(&dev->vma_count);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
                if (pt->vma == vma) {
                        if (prev) {
                        break;
                }
        }
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 }
 
 /**