3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
41 return pci_resource_start(dev->pdev, resource);
43 EXPORT_SYMBOL(drm_get_resource_start);
45 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
47 return pci_resource_len(dev->pdev, resource);
50 EXPORT_SYMBOL(drm_get_resource_len);
52 static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
55 struct list_head *list;
57 list_for_each(list, &dev->maplist->head) {
58 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
59 if (entry->map && map->type == entry->map->type &&
60 entry->map->offset == map->offset) {
69 * Used to allocate 32-bit handles for mappings.
71 #define START_RANGE 0x10000000
72 #define END_RANGE 0x40000000
75 static __inline__ unsigned int HandleID(unsigned long lhandle,
78 static unsigned int map32_handle = START_RANGE;
81 if (lhandle & 0xffffffff00000000) {
83 map32_handle += PAGE_SIZE;
84 if (map32_handle > END_RANGE)
85 map32_handle = START_RANGE;
90 drm_map_list_t *_entry;
91 list_for_each_entry(_entry, &dev->maplist->head, head) {
92 if (_entry->user_token == hash)
95 if (&_entry->head == &dev->maplist->head)
99 map32_handle += PAGE_SIZE;
103 # define HandleID(x,dev) (unsigned int)(x)
107 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
109 * \param inode device inode.
110 * \param filp file pointer.
111 * \param cmd command.
112 * \param arg pointer to a drm_map structure.
113 * \return zero on success or a negative value on error.
115 * Adjusts the memory offset to its absolute value according to the mapping
116 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
117 * applicable and if supported by the kernel.
119 static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
120 unsigned int size, drm_map_type_t type,
121 drm_map_flags_t flags, drm_map_list_t ** maplist)
124 drm_map_list_t *list;
125 drm_dma_handle_t *dmah;
127 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
131 map->offset = offset;
136 /* Only allow shared memory to be removable since we only keep enough
137 * book keeping information about shared memory to allow for removal
138 * when processes fork.
140 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
141 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
144 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
145 map->offset, map->size, map->type);
146 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
147 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
155 case _DRM_FRAME_BUFFER:
156 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
157 if (map->offset + (map->size-1) < map->offset ||
158 map->offset < virt_to_phys(high_memory)) {
159 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
164 map->offset += dev->hose->mem_space->start;
166 /* Some drivers preinitialize some maps, without the X Server
167 * needing to be aware of it. Therefore, we just return success
168 * when the server tries to create a duplicate map.
170 list = drm_find_matching_map(dev, map);
172 if (list->map->size != map->size) {
173 DRM_DEBUG("Matching maps of type %d with "
174 "mismatched sizes, (%ld vs %ld)\n",
175 map->type, map->size,
177 list->map->size = map->size;
180 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
185 if (drm_core_has_MTRR(dev)) {
186 if (map->type == _DRM_FRAME_BUFFER ||
187 (map->flags & _DRM_WRITE_COMBINING)) {
188 map->mtrr = mtrr_add(map->offset, map->size,
189 MTRR_TYPE_WRCOMB, 1);
192 if (map->type == _DRM_REGISTERS)
193 map->handle = drm_ioremap(map->offset, map->size, dev);
197 map->handle = vmalloc_32(map->size);
198 DRM_DEBUG("%lu %d %p\n",
199 map->size, drm_order(map->size), map->handle);
201 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
204 map->offset = (unsigned long)map->handle;
205 if (map->flags & _DRM_CONTAINS_LOCK) {
206 /* Prevent a 2nd X Server from creating a 2nd lock */
207 if (dev->lock.hw_lock != NULL) {
209 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
212 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
216 if (drm_core_has_AGP(dev)) {
218 map->offset += dev->hose->mem_space->start;
220 map->offset += dev->agp->base;
221 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
224 case _DRM_SCATTER_GATHER:
226 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
229 map->offset += (unsigned long)dev->sg->virtual;
231 case _DRM_CONSISTENT:
232 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
233 * As we're limiting the address to 2^32-1 (or less),
234 * casting it down to 32 bits is no problem, but we
235 * need to point to a 64bit variable first. */
236 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
238 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
241 map->handle = dmah->vaddr;
242 map->offset = (unsigned long)dmah->busaddr;
246 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
250 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
252 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
255 memset(list, 0, sizeof(*list));
258 mutex_lock(&dev->struct_mutex);
259 list_add(&list->head, &dev->maplist->head);
260 /* Assign a 32-bit handle */
261 /* We do it here so that dev->struct_mutex protects the increment */
262 list->user_token = HandleID(map->type == _DRM_SHM
263 ? (unsigned long)map->handle
265 mutex_unlock(&dev->struct_mutex);
271 int drm_addmap(drm_device_t * dev, unsigned int offset,
272 unsigned int size, drm_map_type_t type,
273 drm_map_flags_t flags, drm_local_map_t ** map_ptr)
275 drm_map_list_t *list;
278 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
280 *map_ptr = list->map;
284 EXPORT_SYMBOL(drm_addmap);
286 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
287 unsigned int cmd, unsigned long arg)
289 drm_file_t *priv = filp->private_data;
290 drm_device_t *dev = priv->head->dev;
292 drm_map_list_t *maplist;
293 drm_map_t __user *argp = (void __user *)arg;
296 if (!(filp->f_mode & 3))
297 return -EACCES; /* Require read/write */
299 if (copy_from_user(&map, argp, sizeof(map))) {
303 if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
306 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
312 if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
315 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
316 if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
322 * Remove a map private from list and deallocate resources if the mapping
325 * \param inode device inode.
326 * \param filp file pointer.
327 * \param cmd command.
328 * \param arg pointer to a drm_map_t structure.
329 * \return zero on success or a negative value on error.
331 * Searches the map on drm_device::maplist, removes it from the list, see if
332 * its being used, and free any associate resource (such as MTRR's) if it's not
337 int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
339 struct list_head *list;
340 drm_map_list_t *r_list = NULL;
341 drm_dma_handle_t dmah;
343 /* Find the list entry for the map and remove it */
344 list_for_each(list, &dev->maplist->head) {
345 r_list = list_entry(list, drm_map_list_t, head);
347 if (r_list->map == map) {
349 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
354 /* List has wrapped around to the head pointer, or it's empty and we
355 * didn't find anything.
357 if (list == (&dev->maplist->head)) {
363 drm_ioremapfree(map->handle, map->size, dev);
365 case _DRM_FRAME_BUFFER:
366 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
368 retcode = mtrr_del(map->mtrr, map->offset, map->size);
369 DRM_DEBUG("mtrr_del=%d\n", retcode);
376 case _DRM_SCATTER_GATHER:
378 case _DRM_CONSISTENT:
379 dmah.vaddr = map->handle;
380 dmah.busaddr = map->offset;
381 dmah.size = map->size;
382 __drm_pci_free(dev, &dmah);
385 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
390 int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
394 mutex_lock(&dev->struct_mutex);
395 ret = drm_rmmap_locked(dev, map);
396 mutex_unlock(&dev->struct_mutex);
401 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
402 * the last close of the device, and this is necessary for cleanup when things
403 * exit uncleanly. Therefore, having userland manually remove mappings seems
404 * like a pointless exercise since they're going away anyway.
406 * One use case might be after addmap is allowed for normal users for SHM and
407 * gets used by drivers that the server doesn't need to care about. This seems
410 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
411 unsigned int cmd, unsigned long arg)
413 drm_file_t *priv = filp->private_data;
414 drm_device_t *dev = priv->head->dev;
416 drm_local_map_t *map = NULL;
417 struct list_head *list;
420 if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
424 mutex_lock(&dev->struct_mutex);
425 list_for_each(list, &dev->maplist->head) {
426 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
429 r_list->user_token == (unsigned long)request.handle &&
430 r_list->map->flags & _DRM_REMOVABLE) {
436 /* List has wrapped around to the head pointer, or its empty we didn't
439 if (list == (&dev->maplist->head)) {
440 mutex_unlock(&dev->struct_mutex);
445 mutex_unlock(&dev->struct_mutex);
449 /* Register and framebuffer maps are permanent */
450 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
451 mutex_unlock(&dev->struct_mutex);
455 ret = drm_rmmap_locked(dev, map);
457 mutex_unlock(&dev->struct_mutex);
463 * Cleanup after an error on one of the addbufs() functions.
465 * \param dev DRM device.
466 * \param entry buffer entry where the error occurred.
468 * Frees any pages and buffers associated with the given entry.
470 static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
474 if (entry->seg_count) {
475 for (i = 0; i < entry->seg_count; i++) {
476 if (entry->seglist[i]) {
477 drm_pci_free(dev, entry->seglist[i]);
480 drm_free(entry->seglist,
482 sizeof(*entry->seglist), DRM_MEM_SEGS);
484 entry->seg_count = 0;
487 if (entry->buf_count) {
488 for (i = 0; i < entry->buf_count; i++) {
489 if (entry->buflist[i].dev_private) {
490 drm_free(entry->buflist[i].dev_private,
491 entry->buflist[i].dev_priv_size,
495 drm_free(entry->buflist,
497 sizeof(*entry->buflist), DRM_MEM_BUFS);
499 entry->buf_count = 0;
505 * Add AGP buffers for DMA transfers.
507 * \param dev drm_device_t to which the buffers are to be added.
508 * \param request pointer to a drm_buf_desc_t describing the request.
509 * \return zero on success or a negative number on failure.
511 * After some sanity checks creates a drm_buf structure for each buffer and
512 * reallocates the buffer list of the same size order to accommodate the new
515 int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
517 drm_device_dma_t *dma = dev->dma;
518 drm_buf_entry_t *entry;
520 unsigned long offset;
521 unsigned long agp_offset;
530 drm_buf_t **temp_buflist;
535 count = request->count;
536 order = drm_order(request->size);
539 alignment = (request->flags & _DRM_PAGE_ALIGN)
540 ? PAGE_ALIGN(size) : size;
541 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
542 total = PAGE_SIZE << page_order;
545 agp_offset = dev->agp->base + request->agp_start;
547 DRM_DEBUG("count: %d\n", count);
548 DRM_DEBUG("order: %d\n", order);
549 DRM_DEBUG("size: %d\n", size);
550 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
551 DRM_DEBUG("alignment: %d\n", alignment);
552 DRM_DEBUG("page_order: %d\n", page_order);
553 DRM_DEBUG("total: %d\n", total);
555 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
557 if (dev->queue_count)
558 return -EBUSY; /* Not while in use */
560 spin_lock(&dev->count_lock);
562 spin_unlock(&dev->count_lock);
565 atomic_inc(&dev->buf_alloc);
566 spin_unlock(&dev->count_lock);
568 mutex_lock(&dev->struct_mutex);
569 entry = &dma->bufs[order];
570 if (entry->buf_count) {
571 mutex_unlock(&dev->struct_mutex);
572 atomic_dec(&dev->buf_alloc);
573 return -ENOMEM; /* May only call once for each order */
576 if (count < 0 || count > 4096) {
577 mutex_unlock(&dev->struct_mutex);
578 atomic_dec(&dev->buf_alloc);
582 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
584 if (!entry->buflist) {
585 mutex_unlock(&dev->struct_mutex);
586 atomic_dec(&dev->buf_alloc);
589 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
591 entry->buf_size = size;
592 entry->page_order = page_order;
596 while (entry->buf_count < count) {
597 buf = &entry->buflist[entry->buf_count];
598 buf->idx = dma->buf_count + entry->buf_count;
599 buf->total = alignment;
603 buf->offset = (dma->byte_count + offset);
604 buf->bus_address = agp_offset + offset;
605 buf->address = (void *)(agp_offset + offset);
609 init_waitqueue_head(&buf->dma_wait);
612 buf->dev_priv_size = dev->driver->dev_priv_size;
613 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
614 if (!buf->dev_private) {
615 /* Set count correctly so we free the proper amount. */
616 entry->buf_count = count;
617 drm_cleanup_buf_error(dev, entry);
618 mutex_unlock(&dev->struct_mutex);
619 atomic_dec(&dev->buf_alloc);
622 memset(buf->dev_private, 0, buf->dev_priv_size);
624 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
628 byte_count += PAGE_SIZE << page_order;
631 DRM_DEBUG("byte_count: %d\n", byte_count);
633 temp_buflist = drm_realloc(dma->buflist,
634 dma->buf_count * sizeof(*dma->buflist),
635 (dma->buf_count + entry->buf_count)
636 * sizeof(*dma->buflist), DRM_MEM_BUFS);
638 /* Free the entry because it isn't valid */
639 drm_cleanup_buf_error(dev, entry);
640 mutex_unlock(&dev->struct_mutex);
641 atomic_dec(&dev->buf_alloc);
644 dma->buflist = temp_buflist;
646 for (i = 0; i < entry->buf_count; i++) {
647 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
650 dma->buf_count += entry->buf_count;
651 dma->seg_count += entry->seg_count;
652 dma->page_count += byte_count >> PAGE_SHIFT;
653 dma->byte_count += byte_count;
655 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
656 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
658 mutex_unlock(&dev->struct_mutex);
660 request->count = entry->buf_count;
661 request->size = size;
663 dma->flags = _DRM_DMA_USE_AGP;
665 atomic_dec(&dev->buf_alloc);
668 EXPORT_SYMBOL(drm_addbufs_agp);
669 #endif /* __OS_HAS_AGP */
671 int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
673 drm_device_dma_t *dma = dev->dma;
679 drm_buf_entry_t *entry;
680 drm_dma_handle_t *dmah;
683 unsigned long offset;
687 unsigned long *temp_pagelist;
688 drm_buf_t **temp_buflist;
690 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
696 if (!capable(CAP_SYS_ADMIN))
699 count = request->count;
700 order = drm_order(request->size);
703 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
704 request->count, request->size, size, order, dev->queue_count);
706 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
708 if (dev->queue_count)
709 return -EBUSY; /* Not while in use */
711 alignment = (request->flags & _DRM_PAGE_ALIGN)
712 ? PAGE_ALIGN(size) : size;
713 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
714 total = PAGE_SIZE << page_order;
716 spin_lock(&dev->count_lock);
718 spin_unlock(&dev->count_lock);
721 atomic_inc(&dev->buf_alloc);
722 spin_unlock(&dev->count_lock);
724 mutex_lock(&dev->struct_mutex);
725 entry = &dma->bufs[order];
726 if (entry->buf_count) {
727 mutex_unlock(&dev->struct_mutex);
728 atomic_dec(&dev->buf_alloc);
729 return -ENOMEM; /* May only call once for each order */
732 if (count < 0 || count > 4096) {
733 mutex_unlock(&dev->struct_mutex);
734 atomic_dec(&dev->buf_alloc);
738 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
740 if (!entry->buflist) {
741 mutex_unlock(&dev->struct_mutex);
742 atomic_dec(&dev->buf_alloc);
745 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
747 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
749 if (!entry->seglist) {
750 drm_free(entry->buflist,
751 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
752 mutex_unlock(&dev->struct_mutex);
753 atomic_dec(&dev->buf_alloc);
756 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
758 /* Keep the original pagelist until we know all the allocations
761 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
762 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
763 if (!temp_pagelist) {
764 drm_free(entry->buflist,
765 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
766 drm_free(entry->seglist,
767 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
768 mutex_unlock(&dev->struct_mutex);
769 atomic_dec(&dev->buf_alloc);
772 memcpy(temp_pagelist,
773 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
774 DRM_DEBUG("pagelist: %d entries\n",
775 dma->page_count + (count << page_order));
777 entry->buf_size = size;
778 entry->page_order = page_order;
782 while (entry->buf_count < count) {
784 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
787 /* Set count correctly so we free the proper amount. */
788 entry->buf_count = count;
789 entry->seg_count = count;
790 drm_cleanup_buf_error(dev, entry);
791 drm_free(temp_pagelist,
792 (dma->page_count + (count << page_order))
793 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
794 mutex_unlock(&dev->struct_mutex);
795 atomic_dec(&dev->buf_alloc);
798 entry->seglist[entry->seg_count++] = dmah;
799 for (i = 0; i < (1 << page_order); i++) {
800 DRM_DEBUG("page %d @ 0x%08lx\n",
801 dma->page_count + page_count,
802 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
803 temp_pagelist[dma->page_count + page_count++]
804 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
807 offset + size <= total && entry->buf_count < count;
808 offset += alignment, ++entry->buf_count) {
809 buf = &entry->buflist[entry->buf_count];
810 buf->idx = dma->buf_count + entry->buf_count;
811 buf->total = alignment;
814 buf->offset = (dma->byte_count + byte_count + offset);
815 buf->address = (void *)(dmah->vaddr + offset);
816 buf->bus_address = dmah->busaddr + offset;
820 init_waitqueue_head(&buf->dma_wait);
823 buf->dev_priv_size = dev->driver->dev_priv_size;
824 buf->dev_private = drm_alloc(buf->dev_priv_size,
826 if (!buf->dev_private) {
827 /* Set count correctly so we free the proper amount. */
828 entry->buf_count = count;
829 entry->seg_count = count;
830 drm_cleanup_buf_error(dev, entry);
831 drm_free(temp_pagelist,
833 (count << page_order))
834 * sizeof(*dma->pagelist),
836 mutex_unlock(&dev->struct_mutex);
837 atomic_dec(&dev->buf_alloc);
840 memset(buf->dev_private, 0, buf->dev_priv_size);
842 DRM_DEBUG("buffer %d @ %p\n",
843 entry->buf_count, buf->address);
845 byte_count += PAGE_SIZE << page_order;
848 temp_buflist = drm_realloc(dma->buflist,
849 dma->buf_count * sizeof(*dma->buflist),
850 (dma->buf_count + entry->buf_count)
851 * sizeof(*dma->buflist), DRM_MEM_BUFS);
853 /* Free the entry because it isn't valid */
854 drm_cleanup_buf_error(dev, entry);
855 drm_free(temp_pagelist,
856 (dma->page_count + (count << page_order))
857 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
858 mutex_unlock(&dev->struct_mutex);
859 atomic_dec(&dev->buf_alloc);
862 dma->buflist = temp_buflist;
864 for (i = 0; i < entry->buf_count; i++) {
865 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
868 /* No allocations failed, so now we can replace the orginal pagelist
871 if (dma->page_count) {
872 drm_free(dma->pagelist,
873 dma->page_count * sizeof(*dma->pagelist),
876 dma->pagelist = temp_pagelist;
878 dma->buf_count += entry->buf_count;
879 dma->seg_count += entry->seg_count;
880 dma->page_count += entry->seg_count << page_order;
881 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
883 mutex_unlock(&dev->struct_mutex);
885 request->count = entry->buf_count;
886 request->size = size;
888 atomic_dec(&dev->buf_alloc);
892 EXPORT_SYMBOL(drm_addbufs_pci);
894 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
896 drm_device_dma_t *dma = dev->dma;
897 drm_buf_entry_t *entry;
899 unsigned long offset;
900 unsigned long agp_offset;
909 drm_buf_t **temp_buflist;
911 if (!drm_core_check_feature(dev, DRIVER_SG))
917 if (!capable(CAP_SYS_ADMIN))
920 count = request->count;
921 order = drm_order(request->size);
924 alignment = (request->flags & _DRM_PAGE_ALIGN)
925 ? PAGE_ALIGN(size) : size;
926 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
927 total = PAGE_SIZE << page_order;
930 agp_offset = request->agp_start;
932 DRM_DEBUG("count: %d\n", count);
933 DRM_DEBUG("order: %d\n", order);
934 DRM_DEBUG("size: %d\n", size);
935 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
936 DRM_DEBUG("alignment: %d\n", alignment);
937 DRM_DEBUG("page_order: %d\n", page_order);
938 DRM_DEBUG("total: %d\n", total);
940 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
942 if (dev->queue_count)
943 return -EBUSY; /* Not while in use */
945 spin_lock(&dev->count_lock);
947 spin_unlock(&dev->count_lock);
950 atomic_inc(&dev->buf_alloc);
951 spin_unlock(&dev->count_lock);
953 mutex_lock(&dev->struct_mutex);
954 entry = &dma->bufs[order];
955 if (entry->buf_count) {
956 mutex_unlock(&dev->struct_mutex);
957 atomic_dec(&dev->buf_alloc);
958 return -ENOMEM; /* May only call once for each order */
961 if (count < 0 || count > 4096) {
962 mutex_unlock(&dev->struct_mutex);
963 atomic_dec(&dev->buf_alloc);
967 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
969 if (!entry->buflist) {
970 mutex_unlock(&dev->struct_mutex);
971 atomic_dec(&dev->buf_alloc);
974 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
976 entry->buf_size = size;
977 entry->page_order = page_order;
981 while (entry->buf_count < count) {
982 buf = &entry->buflist[entry->buf_count];
983 buf->idx = dma->buf_count + entry->buf_count;
984 buf->total = alignment;
988 buf->offset = (dma->byte_count + offset);
989 buf->bus_address = agp_offset + offset;
990 buf->address = (void *)(agp_offset + offset
991 + (unsigned long)dev->sg->virtual);
995 init_waitqueue_head(&buf->dma_wait);
998 buf->dev_priv_size = dev->driver->dev_priv_size;
999 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1000 if (!buf->dev_private) {
1001 /* Set count correctly so we free the proper amount. */
1002 entry->buf_count = count;
1003 drm_cleanup_buf_error(dev, entry);
1004 mutex_unlock(&dev->struct_mutex);
1005 atomic_dec(&dev->buf_alloc);
1009 memset(buf->dev_private, 0, buf->dev_priv_size);
1011 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1013 offset += alignment;
1015 byte_count += PAGE_SIZE << page_order;
1018 DRM_DEBUG("byte_count: %d\n", byte_count);
1020 temp_buflist = drm_realloc(dma->buflist,
1021 dma->buf_count * sizeof(*dma->buflist),
1022 (dma->buf_count + entry->buf_count)
1023 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1024 if (!temp_buflist) {
1025 /* Free the entry because it isn't valid */
1026 drm_cleanup_buf_error(dev, entry);
1027 mutex_unlock(&dev->struct_mutex);
1028 atomic_dec(&dev->buf_alloc);
1031 dma->buflist = temp_buflist;
1033 for (i = 0; i < entry->buf_count; i++) {
1034 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1037 dma->buf_count += entry->buf_count;
1038 dma->seg_count += entry->seg_count;
1039 dma->page_count += byte_count >> PAGE_SHIFT;
1040 dma->byte_count += byte_count;
1042 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1043 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1045 mutex_unlock(&dev->struct_mutex);
1047 request->count = entry->buf_count;
1048 request->size = size;
1050 dma->flags = _DRM_DMA_USE_SG;
1052 atomic_dec(&dev->buf_alloc);
1056 static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1058 drm_device_dma_t *dma = dev->dma;
1059 drm_buf_entry_t *entry;
1061 unsigned long offset;
1062 unsigned long agp_offset;
1071 drm_buf_t **temp_buflist;
1073 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1079 if (!capable(CAP_SYS_ADMIN))
1082 count = request->count;
1083 order = drm_order(request->size);
1086 alignment = (request->flags & _DRM_PAGE_ALIGN)
1087 ? PAGE_ALIGN(size) : size;
1088 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1089 total = PAGE_SIZE << page_order;
1092 agp_offset = request->agp_start;
1094 DRM_DEBUG("count: %d\n", count);
1095 DRM_DEBUG("order: %d\n", order);
1096 DRM_DEBUG("size: %d\n", size);
1097 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1098 DRM_DEBUG("alignment: %d\n", alignment);
1099 DRM_DEBUG("page_order: %d\n", page_order);
1100 DRM_DEBUG("total: %d\n", total);
1102 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1104 if (dev->queue_count)
1105 return -EBUSY; /* Not while in use */
1107 spin_lock(&dev->count_lock);
1109 spin_unlock(&dev->count_lock);
1112 atomic_inc(&dev->buf_alloc);
1113 spin_unlock(&dev->count_lock);
1115 mutex_lock(&dev->struct_mutex);
1116 entry = &dma->bufs[order];
1117 if (entry->buf_count) {
1118 mutex_unlock(&dev->struct_mutex);
1119 atomic_dec(&dev->buf_alloc);
1120 return -ENOMEM; /* May only call once for each order */
1123 if (count < 0 || count > 4096) {
1124 mutex_unlock(&dev->struct_mutex);
1125 atomic_dec(&dev->buf_alloc);
1129 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1131 if (!entry->buflist) {
1132 mutex_unlock(&dev->struct_mutex);
1133 atomic_dec(&dev->buf_alloc);
1136 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1138 entry->buf_size = size;
1139 entry->page_order = page_order;
1143 while (entry->buf_count < count) {
1144 buf = &entry->buflist[entry->buf_count];
1145 buf->idx = dma->buf_count + entry->buf_count;
1146 buf->total = alignment;
1150 buf->offset = (dma->byte_count + offset);
1151 buf->bus_address = agp_offset + offset;
1152 buf->address = (void *)(agp_offset + offset);
1156 init_waitqueue_head(&buf->dma_wait);
1159 buf->dev_priv_size = dev->driver->dev_priv_size;
1160 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1161 if (!buf->dev_private) {
1162 /* Set count correctly so we free the proper amount. */
1163 entry->buf_count = count;
1164 drm_cleanup_buf_error(dev, entry);
1165 mutex_unlock(&dev->struct_mutex);
1166 atomic_dec(&dev->buf_alloc);
1169 memset(buf->dev_private, 0, buf->dev_priv_size);
1171 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1173 offset += alignment;
1175 byte_count += PAGE_SIZE << page_order;
1178 DRM_DEBUG("byte_count: %d\n", byte_count);
1180 temp_buflist = drm_realloc(dma->buflist,
1181 dma->buf_count * sizeof(*dma->buflist),
1182 (dma->buf_count + entry->buf_count)
1183 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1184 if (!temp_buflist) {
1185 /* Free the entry because it isn't valid */
1186 drm_cleanup_buf_error(dev, entry);
1187 mutex_unlock(&dev->struct_mutex);
1188 atomic_dec(&dev->buf_alloc);
1191 dma->buflist = temp_buflist;
1193 for (i = 0; i < entry->buf_count; i++) {
1194 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1197 dma->buf_count += entry->buf_count;
1198 dma->seg_count += entry->seg_count;
1199 dma->page_count += byte_count >> PAGE_SHIFT;
1200 dma->byte_count += byte_count;
1202 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1203 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1205 mutex_unlock(&dev->struct_mutex);
1207 request->count = entry->buf_count;
1208 request->size = size;
1210 dma->flags = _DRM_DMA_USE_FB;
1212 atomic_dec(&dev->buf_alloc);
1218 * Add buffers for DMA transfers (ioctl).
1220 * \param inode device inode.
1221 * \param filp file pointer.
1222 * \param cmd command.
1223 * \param arg pointer to a drm_buf_desc_t request.
1224 * \return zero on success or a negative number on failure.
1226 * According with the memory type specified in drm_buf_desc::flags and the
1227 * build options, it dispatches the call either to addbufs_agp(),
1228 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1229 * PCI memory respectively.
1231 int drm_addbufs(struct inode *inode, struct file *filp,
1232 unsigned int cmd, unsigned long arg)
1234 drm_buf_desc_t request;
1235 drm_file_t *priv = filp->private_data;
1236 drm_device_t *dev = priv->head->dev;
1239 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1242 if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
1247 if (request.flags & _DRM_AGP_BUFFER)
1248 ret = drm_addbufs_agp(dev, &request);
1251 if (request.flags & _DRM_SG_BUFFER)
1252 ret = drm_addbufs_sg(dev, &request);
1253 else if (request.flags & _DRM_FB_BUFFER)
1254 ret = drm_addbufs_fb(dev, &request);
1256 ret = drm_addbufs_pci(dev, &request);
1259 if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1267 * Get information about the buffer mappings.
1269 * This was originally mean for debugging purposes, or by a sophisticated
1270 * client library to determine how best to use the available buffers (e.g.,
1271 * large buffers can be used for image transfer).
1273 * \param inode device inode.
1274 * \param filp file pointer.
1275 * \param cmd command.
1276 * \param arg pointer to a drm_buf_info structure.
1277 * \return zero on success or a negative number on failure.
1279 * Increments drm_device::buf_use while holding the drm_device::count_lock
1280 * lock, preventing of allocating more buffers after this call. Information
1281 * about each requested buffer is then copied into user space.
1283 int drm_infobufs(struct inode *inode, struct file *filp,
1284 unsigned int cmd, unsigned long arg)
1286 drm_file_t *priv = filp->private_data;
1287 drm_device_t *dev = priv->head->dev;
1288 drm_device_dma_t *dma = dev->dma;
1289 drm_buf_info_t request;
1290 drm_buf_info_t __user *argp = (void __user *)arg;
1294 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1300 spin_lock(&dev->count_lock);
1301 if (atomic_read(&dev->buf_alloc)) {
1302 spin_unlock(&dev->count_lock);
1305 ++dev->buf_use; /* Can't allocate more after this call */
1306 spin_unlock(&dev->count_lock);
1308 if (copy_from_user(&request, argp, sizeof(request)))
1311 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1312 if (dma->bufs[i].buf_count)
1316 DRM_DEBUG("count = %d\n", count);
1318 if (request.count >= count) {
1319 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1320 if (dma->bufs[i].buf_count) {
1321 drm_buf_desc_t __user *to =
1322 &request.list[count];
1323 drm_buf_entry_t *from = &dma->bufs[i];
1324 drm_freelist_t *list = &dma->bufs[i].freelist;
1325 if (copy_to_user(&to->count,
1327 sizeof(from->buf_count)) ||
1328 copy_to_user(&to->size,
1330 sizeof(from->buf_size)) ||
1331 copy_to_user(&to->low_mark,
1333 sizeof(list->low_mark)) ||
1334 copy_to_user(&to->high_mark,
1336 sizeof(list->high_mark)))
1339 DRM_DEBUG("%d %d %d %d %d\n",
1341 dma->bufs[i].buf_count,
1342 dma->bufs[i].buf_size,
1343 dma->bufs[i].freelist.low_mark,
1344 dma->bufs[i].freelist.high_mark);
1349 request.count = count;
1351 if (copy_to_user(argp, &request, sizeof(request)))
1358 * Specifies a low and high water mark for buffer allocation
1360 * \param inode device inode.
1361 * \param filp file pointer.
1362 * \param cmd command.
1363 * \param arg a pointer to a drm_buf_desc structure.
1364 * \return zero on success or a negative number on failure.
1366 * Verifies that the size order is bounded between the admissible orders and
1367 * updates the respective drm_device_dma::bufs entry low and high water mark.
1369 * \note This ioctl is deprecated and mostly never used.
1371 int drm_markbufs(struct inode *inode, struct file *filp,
1372 unsigned int cmd, unsigned long arg)
1374 drm_file_t *priv = filp->private_data;
1375 drm_device_t *dev = priv->head->dev;
1376 drm_device_dma_t *dma = dev->dma;
1377 drm_buf_desc_t request;
1379 drm_buf_entry_t *entry;
1381 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1387 if (copy_from_user(&request,
1388 (drm_buf_desc_t __user *) arg, sizeof(request)))
1391 DRM_DEBUG("%d, %d, %d\n",
1392 request.size, request.low_mark, request.high_mark);
1393 order = drm_order(request.size);
1394 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1396 entry = &dma->bufs[order];
1398 if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1400 if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1403 entry->freelist.low_mark = request.low_mark;
1404 entry->freelist.high_mark = request.high_mark;
1410 * Unreserve the buffers in list, previously reserved using drmDMA.
1412 * \param inode device inode.
1413 * \param filp file pointer.
1414 * \param cmd command.
1415 * \param arg pointer to a drm_buf_free structure.
1416 * \return zero on success or a negative number on failure.
1418 * Calls free_buffer() for each used buffer.
1419 * This function is primarily used for debugging.
1421 int drm_freebufs(struct inode *inode, struct file *filp,
1422 unsigned int cmd, unsigned long arg)
1424 drm_file_t *priv = filp->private_data;
1425 drm_device_t *dev = priv->head->dev;
1426 drm_device_dma_t *dma = dev->dma;
1427 drm_buf_free_t request;
1432 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1438 if (copy_from_user(&request,
1439 (drm_buf_free_t __user *) arg, sizeof(request)))
1442 DRM_DEBUG("%d\n", request.count);
1443 for (i = 0; i < request.count; i++) {
1444 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1446 if (idx < 0 || idx >= dma->buf_count) {
1447 DRM_ERROR("Index %d (of %d max)\n",
1448 idx, dma->buf_count - 1);
1451 buf = dma->buflist[idx];
1452 if (buf->filp != filp) {
1453 DRM_ERROR("Process %d freeing buffer not owned\n",
1457 drm_free_buffer(dev, buf);
1464 * Maps all of the DMA buffers into client-virtual space (ioctl).
1466 * \param inode device inode.
1467 * \param filp file pointer.
1468 * \param cmd command.
1469 * \param arg pointer to a drm_buf_map structure.
1470 * \return zero on success or a negative number on failure.
1472 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1473 * about each buffer into user space. The PCI buffers are already mapped on the
1474 * addbufs_pci() call.
1476 int drm_mapbufs(struct inode *inode, struct file *filp,
1477 unsigned int cmd, unsigned long arg)
1479 drm_file_t *priv = filp->private_data;
1480 drm_device_t *dev = priv->head->dev;
1481 drm_device_dma_t *dma = dev->dma;
1482 drm_buf_map_t __user *argp = (void __user *)arg;
1485 unsigned long virtual;
1486 unsigned long address;
1487 drm_buf_map_t request;
1490 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1496 spin_lock(&dev->count_lock);
1497 if (atomic_read(&dev->buf_alloc)) {
1498 spin_unlock(&dev->count_lock);
1501 dev->buf_use++; /* Can't allocate more after this call */
1502 spin_unlock(&dev->count_lock);
1504 if (copy_from_user(&request, argp, sizeof(request)))
1507 if (request.count >= dma->buf_count) {
1508 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1509 || (drm_core_check_feature(dev, DRIVER_SG)
1510 && (dma->flags & _DRM_DMA_USE_SG))
1511 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1512 && (dma->flags & _DRM_DMA_USE_FB))) {
1513 drm_map_t *map = dev->agp_buffer_map;
1514 unsigned long token = dev->agp_buffer_token;
1521 down_write(¤t->mm->mmap_sem);
1522 virtual = do_mmap(filp, 0, map->size,
1523 PROT_READ | PROT_WRITE,
1525 up_write(¤t->mm->mmap_sem);
1527 down_write(¤t->mm->mmap_sem);
1528 virtual = do_mmap(filp, 0, dma->byte_count,
1529 PROT_READ | PROT_WRITE,
1531 up_write(¤t->mm->mmap_sem);
1533 if (virtual > -1024UL) {
1535 retcode = (signed long)virtual;
1538 request.virtual = (void __user *)virtual;
1540 for (i = 0; i < dma->buf_count; i++) {
1541 if (copy_to_user(&request.list[i].idx,
1542 &dma->buflist[i]->idx,
1543 sizeof(request.list[0].idx))) {
1547 if (copy_to_user(&request.list[i].total,
1548 &dma->buflist[i]->total,
1549 sizeof(request.list[0].total))) {
1553 if (copy_to_user(&request.list[i].used,
1554 &zero, sizeof(zero))) {
1558 address = virtual + dma->buflist[i]->offset; /* *** */
1559 if (copy_to_user(&request.list[i].address,
1560 &address, sizeof(address))) {
1567 request.count = dma->buf_count;
1568 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1570 if (copy_to_user(argp, &request, sizeof(request)))
1577 * Compute size order. Returns the exponent of the smaller power of two which
1578 * is greater or equal to given number.
1583 * \todo Can be made faster.
1585 int drm_order(unsigned long size)
1590 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1592 if (size & (size - 1))
1597 EXPORT_SYMBOL(drm_order);