2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
35 i915_gem_object_set_domain(struct drm_gem_object *obj,
36 uint32_t read_domains,
37 uint32_t write_domain);
39 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
42 uint32_t read_domains,
43 uint32_t write_domain);
45 i915_gem_set_domain(struct drm_gem_object *obj,
46 struct drm_file *file_priv,
47 uint32_t read_domains,
48 uint32_t write_domain);
49 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
50 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
51 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
54 i915_gem_cleanup_ringbuffer(struct drm_device *dev);
57 i915_gem_init_ioctl(struct drm_device *dev, void *data,
58 struct drm_file *file_priv)
60 drm_i915_private_t *dev_priv = dev->dev_private;
61 struct drm_i915_gem_init *args = data;
63 mutex_lock(&dev->struct_mutex);
65 if (args->gtt_start >= args->gtt_end ||
66 (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
67 (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
68 mutex_unlock(&dev->struct_mutex);
72 drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
73 args->gtt_end - args->gtt_start);
75 dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
77 mutex_unlock(&dev->struct_mutex);
84 * Creates a new mm object and returns a handle to it.
87 i915_gem_create_ioctl(struct drm_device *dev, void *data,
88 struct drm_file *file_priv)
90 struct drm_i915_gem_create *args = data;
91 struct drm_gem_object *obj;
94 args->size = roundup(args->size, PAGE_SIZE);
96 /* Allocate the new object */
97 obj = drm_gem_object_alloc(dev, args->size);
101 ret = drm_gem_handle_create(file_priv, obj, &handle);
102 mutex_lock(&dev->struct_mutex);
103 drm_gem_object_handle_unreference(obj);
104 mutex_unlock(&dev->struct_mutex);
109 args->handle = handle;
115 * Reads data from the object referenced by handle.
117 * On error, the contents of *data are undefined.
120 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
121 struct drm_file *file_priv)
123 struct drm_i915_gem_pread *args = data;
124 struct drm_gem_object *obj;
125 struct drm_i915_gem_object *obj_priv;
130 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
133 obj_priv = obj->driver_private;
135 /* Bounds check source.
137 * XXX: This could use review for overflow issues...
139 if (args->offset > obj->size || args->size > obj->size ||
140 args->offset + args->size > obj->size) {
141 drm_gem_object_unreference(obj);
145 mutex_lock(&dev->struct_mutex);
147 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
148 I915_GEM_DOMAIN_CPU, 0);
150 drm_gem_object_unreference(obj);
151 mutex_unlock(&dev->struct_mutex);
155 offset = args->offset;
157 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
158 args->size, &offset);
159 if (read != args->size) {
160 drm_gem_object_unreference(obj);
161 mutex_unlock(&dev->struct_mutex);
168 drm_gem_object_unreference(obj);
169 mutex_unlock(&dev->struct_mutex);
175 * Try to write quickly with an atomic kmap. Return true on success.
177 * If this fails (which includes a partial write), we'll redo the whole
178 * thing with the slow version.
180 * This is a workaround for the low performance of iounmap (approximate
181 * 10% cpu cost on normal 3D workloads). kmap_atomic on HIGHMEM kernels
182 * happens to let us map card memory without taking IPIs. When the vmap
183 * rework lands we should be able to dump this hack.
185 static inline int fast_user_write(unsigned long pfn, char __user *user_data,
188 #ifdef CONFIG_HIGHMEM
189 unsigned long unwritten;
192 vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
194 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
195 i, o, l, pfn, vaddr_atomic);
197 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o, user_data, l);
198 kunmap_atomic(vaddr_atomic, KM_USER0);
206 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
207 struct drm_i915_gem_pwrite *args,
208 struct drm_file *file_priv)
210 struct drm_i915_gem_object *obj_priv = obj->driver_private;
213 char __user *user_data;
216 user_data = (char __user *) (uintptr_t) args->data_ptr;
218 if (!access_ok(VERIFY_READ, user_data, remain))
222 mutex_lock(&dev->struct_mutex);
223 ret = i915_gem_object_pin(obj, 0);
225 mutex_unlock(&dev->struct_mutex);
228 ret = i915_gem_set_domain(obj, file_priv,
229 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
233 obj_priv = obj->driver_private;
234 offset = obj_priv->gtt_offset + args->offset;
241 /* Operation in this page
244 * o = offset within page
247 i = offset >> PAGE_SHIFT;
248 o = offset & (PAGE_SIZE-1);
250 if ((o + l) > PAGE_SIZE)
253 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
255 if (!fast_user_write(pfn, user_data, l, o)) {
256 unsigned long unwritten;
259 vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
261 DRM_INFO("pwrite slow i %d o %d l %d "
262 "pfn %ld vaddr %p\n",
263 i, o, l, pfn, vaddr);
269 unwritten = __copy_from_user(vaddr + o, user_data, l);
271 DRM_INFO("unwritten %ld\n", unwritten);
284 #if WATCH_PWRITE && 1
285 i915_gem_clflush_object(obj);
286 i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
287 i915_gem_clflush_object(obj);
291 i915_gem_object_unpin(obj);
292 mutex_unlock(&dev->struct_mutex);
298 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
299 struct drm_i915_gem_pwrite *args,
300 struct drm_file *file_priv)
306 mutex_lock(&dev->struct_mutex);
308 ret = i915_gem_set_domain(obj, file_priv,
309 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
311 mutex_unlock(&dev->struct_mutex);
315 offset = args->offset;
317 written = vfs_write(obj->filp,
318 (char __user *)(uintptr_t) args->data_ptr,
319 args->size, &offset);
320 if (written != args->size) {
321 mutex_unlock(&dev->struct_mutex);
328 mutex_unlock(&dev->struct_mutex);
334 * Writes data to the object referenced by handle.
336 * On error, the contents of the buffer that were to be modified are undefined.
339 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
340 struct drm_file *file_priv)
342 struct drm_i915_gem_pwrite *args = data;
343 struct drm_gem_object *obj;
344 struct drm_i915_gem_object *obj_priv;
347 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
350 obj_priv = obj->driver_private;
352 /* Bounds check destination.
354 * XXX: This could use review for overflow issues...
356 if (args->offset > obj->size || args->size > obj->size ||
357 args->offset + args->size > obj->size) {
358 drm_gem_object_unreference(obj);
362 /* We can only do the GTT pwrite on untiled buffers, as otherwise
363 * it would end up going through the fenced access, and we'll get
364 * different detiling behavior between reading and writing.
365 * pread/pwrite currently are reading and writing from the CPU
366 * perspective, requiring manual detiling by the client.
368 if (obj_priv->tiling_mode == I915_TILING_NONE &&
370 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
372 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
376 DRM_INFO("pwrite failed %d\n", ret);
379 drm_gem_object_unreference(obj);
385 * Called when user space prepares to use an object
388 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
389 struct drm_file *file_priv)
391 struct drm_i915_gem_set_domain *args = data;
392 struct drm_gem_object *obj;
395 if (!(dev->driver->driver_features & DRIVER_GEM))
398 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
402 mutex_lock(&dev->struct_mutex);
404 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
405 obj, obj->size, args->read_domains, args->write_domain);
407 ret = i915_gem_set_domain(obj, file_priv,
408 args->read_domains, args->write_domain);
409 drm_gem_object_unreference(obj);
410 mutex_unlock(&dev->struct_mutex);
415 * Called when user space has done writes to this buffer
418 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
419 struct drm_file *file_priv)
421 struct drm_i915_gem_sw_finish *args = data;
422 struct drm_gem_object *obj;
423 struct drm_i915_gem_object *obj_priv;
426 if (!(dev->driver->driver_features & DRIVER_GEM))
429 mutex_lock(&dev->struct_mutex);
430 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
432 mutex_unlock(&dev->struct_mutex);
437 DRM_INFO("%s: sw_finish %d (%p %d)\n",
438 __func__, args->handle, obj, obj->size);
440 obj_priv = obj->driver_private;
442 /* Pinned buffers may be scanout, so flush the cache */
443 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
444 i915_gem_clflush_object(obj);
445 drm_agp_chipset_flush(dev);
447 drm_gem_object_unreference(obj);
448 mutex_unlock(&dev->struct_mutex);
453 * Maps the contents of an object, returning the address it is mapped
456 * While the mapping holds a reference on the contents of the object, it doesn't
457 * imply a ref on the object itself.
460 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
461 struct drm_file *file_priv)
463 struct drm_i915_gem_mmap *args = data;
464 struct drm_gem_object *obj;
468 if (!(dev->driver->driver_features & DRIVER_GEM))
471 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
475 offset = args->offset;
477 down_write(¤t->mm->mmap_sem);
478 addr = do_mmap(obj->filp, 0, args->size,
479 PROT_READ | PROT_WRITE, MAP_SHARED,
481 up_write(¤t->mm->mmap_sem);
482 mutex_lock(&dev->struct_mutex);
483 drm_gem_object_unreference(obj);
484 mutex_unlock(&dev->struct_mutex);
485 if (IS_ERR((void *)addr))
488 args->addr_ptr = (uint64_t) addr;
494 i915_gem_object_free_page_list(struct drm_gem_object *obj)
496 struct drm_i915_gem_object *obj_priv = obj->driver_private;
497 int page_count = obj->size / PAGE_SIZE;
500 if (obj_priv->page_list == NULL)
504 for (i = 0; i < page_count; i++)
505 if (obj_priv->page_list[i] != NULL) {
507 set_page_dirty(obj_priv->page_list[i]);
508 mark_page_accessed(obj_priv->page_list[i]);
509 page_cache_release(obj_priv->page_list[i]);
513 drm_free(obj_priv->page_list,
514 page_count * sizeof(struct page *),
516 obj_priv->page_list = NULL;
520 i915_gem_object_move_to_active(struct drm_gem_object *obj)
522 struct drm_device *dev = obj->dev;
523 drm_i915_private_t *dev_priv = dev->dev_private;
524 struct drm_i915_gem_object *obj_priv = obj->driver_private;
526 /* Add a reference if we're newly entering the active list. */
527 if (!obj_priv->active) {
528 drm_gem_object_reference(obj);
529 obj_priv->active = 1;
531 /* Move from whatever list we were on to the tail of execution. */
532 list_move_tail(&obj_priv->list,
533 &dev_priv->mm.active_list);
538 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
540 struct drm_device *dev = obj->dev;
541 drm_i915_private_t *dev_priv = dev->dev_private;
542 struct drm_i915_gem_object *obj_priv = obj->driver_private;
544 i915_verify_inactive(dev, __FILE__, __LINE__);
545 if (obj_priv->pin_count != 0)
546 list_del_init(&obj_priv->list);
548 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
550 if (obj_priv->active) {
551 obj_priv->active = 0;
552 drm_gem_object_unreference(obj);
554 i915_verify_inactive(dev, __FILE__, __LINE__);
558 * Creates a new sequence number, emitting a write of it to the status page
559 * plus an interrupt, which will trigger i915_user_interrupt_handler.
561 * Must be called with struct_lock held.
563 * Returned sequence numbers are nonzero on success.
566 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
568 drm_i915_private_t *dev_priv = dev->dev_private;
569 struct drm_i915_gem_request *request;
574 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
578 /* Grab the seqno we're going to make this request be, and bump the
579 * next (skipping 0 so it can be the reserved no-seqno value).
581 seqno = dev_priv->mm.next_gem_seqno;
582 dev_priv->mm.next_gem_seqno++;
583 if (dev_priv->mm.next_gem_seqno == 0)
584 dev_priv->mm.next_gem_seqno++;
587 OUT_RING(MI_STORE_DWORD_INDEX);
588 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
591 OUT_RING(MI_USER_INTERRUPT);
594 DRM_DEBUG("%d\n", seqno);
596 request->seqno = seqno;
597 request->emitted_jiffies = jiffies;
598 request->flush_domains = flush_domains;
599 was_empty = list_empty(&dev_priv->mm.request_list);
600 list_add_tail(&request->list, &dev_priv->mm.request_list);
602 if (was_empty && !dev_priv->mm.suspended)
603 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
608 * Command execution barrier
610 * Ensures that all commands in the ring are finished
611 * before signalling the CPU
614 i915_retire_commands(struct drm_device *dev)
616 drm_i915_private_t *dev_priv = dev->dev_private;
617 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
618 uint32_t flush_domains = 0;
621 /* The sampler always gets flushed on i965 (sigh) */
623 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
626 OUT_RING(0); /* noop */
628 return flush_domains;
632 * Moves buffers associated only with the given active seqno from the active
633 * to inactive list, potentially freeing them.
636 i915_gem_retire_request(struct drm_device *dev,
637 struct drm_i915_gem_request *request)
639 drm_i915_private_t *dev_priv = dev->dev_private;
641 /* Move any buffers on the active list that are no longer referenced
642 * by the ringbuffer to the flushing/inactive lists as appropriate.
644 while (!list_empty(&dev_priv->mm.active_list)) {
645 struct drm_gem_object *obj;
646 struct drm_i915_gem_object *obj_priv;
648 obj_priv = list_first_entry(&dev_priv->mm.active_list,
649 struct drm_i915_gem_object,
653 /* If the seqno being retired doesn't match the oldest in the
654 * list, then the oldest in the list must still be newer than
657 if (obj_priv->last_rendering_seqno != request->seqno)
660 DRM_INFO("%s: retire %d moves to inactive list %p\n",
661 __func__, request->seqno, obj);
664 if (obj->write_domain != 0) {
665 list_move_tail(&obj_priv->list,
666 &dev_priv->mm.flushing_list);
668 i915_gem_object_move_to_inactive(obj);
672 if (request->flush_domains != 0) {
673 struct drm_i915_gem_object *obj_priv, *next;
675 /* Clear the write domain and activity from any buffers
676 * that are just waiting for a flush matching the one retired.
678 list_for_each_entry_safe(obj_priv, next,
679 &dev_priv->mm.flushing_list, list) {
680 struct drm_gem_object *obj = obj_priv->obj;
682 if (obj->write_domain & request->flush_domains) {
683 obj->write_domain = 0;
684 i915_gem_object_move_to_inactive(obj);
692 * Returns true if seq1 is later than seq2.
695 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
697 return (int32_t)(seq1 - seq2) >= 0;
701 i915_get_gem_seqno(struct drm_device *dev)
703 drm_i915_private_t *dev_priv = dev->dev_private;
705 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
709 * This function clears the request list as sequence numbers are passed.
712 i915_gem_retire_requests(struct drm_device *dev)
714 drm_i915_private_t *dev_priv = dev->dev_private;
717 seqno = i915_get_gem_seqno(dev);
719 while (!list_empty(&dev_priv->mm.request_list)) {
720 struct drm_i915_gem_request *request;
721 uint32_t retiring_seqno;
723 request = list_first_entry(&dev_priv->mm.request_list,
724 struct drm_i915_gem_request,
726 retiring_seqno = request->seqno;
728 if (i915_seqno_passed(seqno, retiring_seqno) ||
729 dev_priv->mm.wedged) {
730 i915_gem_retire_request(dev, request);
732 list_del(&request->list);
733 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
740 i915_gem_retire_work_handler(struct work_struct *work)
742 drm_i915_private_t *dev_priv;
743 struct drm_device *dev;
745 dev_priv = container_of(work, drm_i915_private_t,
746 mm.retire_work.work);
749 mutex_lock(&dev->struct_mutex);
750 i915_gem_retire_requests(dev);
751 if (!dev_priv->mm.suspended &&
752 !list_empty(&dev_priv->mm.request_list))
753 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
754 mutex_unlock(&dev->struct_mutex);
758 * Waits for a sequence number to be signaled, and cleans up the
759 * request and object lists appropriately for that event.
762 i915_wait_request(struct drm_device *dev, uint32_t seqno)
764 drm_i915_private_t *dev_priv = dev->dev_private;
769 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
770 dev_priv->mm.waiting_gem_seqno = seqno;
771 i915_user_irq_get(dev);
772 ret = wait_event_interruptible(dev_priv->irq_queue,
773 i915_seqno_passed(i915_get_gem_seqno(dev),
775 dev_priv->mm.wedged);
776 i915_user_irq_put(dev);
777 dev_priv->mm.waiting_gem_seqno = 0;
779 if (dev_priv->mm.wedged)
782 if (ret && ret != -ERESTARTSYS)
783 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
784 __func__, ret, seqno, i915_get_gem_seqno(dev));
786 /* Directly dispatch request retiring. While we have the work queue
787 * to handle this, the waiter on a request often wants an associated
788 * buffer to have made it to the inactive list, and we would need
789 * a separate wait queue to handle that.
792 i915_gem_retire_requests(dev);
798 i915_gem_flush(struct drm_device *dev,
799 uint32_t invalidate_domains,
800 uint32_t flush_domains)
802 drm_i915_private_t *dev_priv = dev->dev_private;
807 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
808 invalidate_domains, flush_domains);
811 if (flush_domains & I915_GEM_DOMAIN_CPU)
812 drm_agp_chipset_flush(dev);
814 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
815 I915_GEM_DOMAIN_GTT)) {
819 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
820 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
821 * also flushed at 2d versus 3d pipeline switches.
825 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
826 * MI_READ_FLUSH is set, and is always flushed on 965.
828 * I915_GEM_DOMAIN_COMMAND may not exist?
830 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
831 * invalidated when MI_EXE_FLUSH is set.
833 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
834 * invalidated with every MI_FLUSH.
838 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
839 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
840 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
841 * are flushed at any MI_FLUSH.
844 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
845 if ((invalidate_domains|flush_domains) &
846 I915_GEM_DOMAIN_RENDER)
847 cmd &= ~MI_NO_WRITE_FLUSH;
848 if (!IS_I965G(dev)) {
850 * On the 965, the sampler cache always gets flushed
851 * and this bit is reserved.
853 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
854 cmd |= MI_READ_FLUSH;
856 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
860 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
864 OUT_RING(0); /* noop */
870 * Ensures that all rendering to the object has completed and the object is
871 * safe to unbind from the GTT or access from the CPU.
874 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
876 struct drm_device *dev = obj->dev;
877 struct drm_i915_gem_object *obj_priv = obj->driver_private;
880 /* If there are writes queued to the buffer, flush and
881 * create a new seqno to wait for.
883 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
884 uint32_t write_domain = obj->write_domain;
886 DRM_INFO("%s: flushing object %p from write domain %08x\n",
887 __func__, obj, write_domain);
889 i915_gem_flush(dev, 0, write_domain);
891 i915_gem_object_move_to_active(obj);
892 obj_priv->last_rendering_seqno = i915_add_request(dev,
894 BUG_ON(obj_priv->last_rendering_seqno == 0);
896 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
900 /* If there is rendering queued on the buffer being evicted, wait for
903 if (obj_priv->active) {
905 DRM_INFO("%s: object %p wait for seqno %08x\n",
906 __func__, obj, obj_priv->last_rendering_seqno);
908 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
917 * Unbinds an object from the GTT aperture.
920 i915_gem_object_unbind(struct drm_gem_object *obj)
922 struct drm_device *dev = obj->dev;
923 struct drm_i915_gem_object *obj_priv = obj->driver_private;
927 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
928 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
930 if (obj_priv->gtt_space == NULL)
933 if (obj_priv->pin_count != 0) {
934 DRM_ERROR("Attempting to unbind pinned buffer\n");
938 /* Wait for any rendering to complete
940 ret = i915_gem_object_wait_rendering(obj);
942 DRM_ERROR("wait_rendering failed: %d\n", ret);
946 /* Move the object to the CPU domain to ensure that
947 * any possible CPU writes while it's not in the GTT
948 * are flushed when we go to remap it. This will
949 * also ensure that all pending GPU writes are finished
952 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
953 I915_GEM_DOMAIN_CPU);
955 DRM_ERROR("set_domain failed: %d\n", ret);
959 if (obj_priv->agp_mem != NULL) {
960 drm_unbind_agp(obj_priv->agp_mem);
961 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
962 obj_priv->agp_mem = NULL;
965 BUG_ON(obj_priv->active);
967 i915_gem_object_free_page_list(obj);
969 if (obj_priv->gtt_space) {
970 atomic_dec(&dev->gtt_count);
971 atomic_sub(obj->size, &dev->gtt_memory);
973 drm_mm_put_block(obj_priv->gtt_space);
974 obj_priv->gtt_space = NULL;
977 /* Remove ourselves from the LRU list if present. */
978 if (!list_empty(&obj_priv->list))
979 list_del_init(&obj_priv->list);
985 i915_gem_evict_something(struct drm_device *dev)
987 drm_i915_private_t *dev_priv = dev->dev_private;
988 struct drm_gem_object *obj;
989 struct drm_i915_gem_object *obj_priv;
993 /* If there's an inactive buffer available now, grab it
996 if (!list_empty(&dev_priv->mm.inactive_list)) {
997 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
998 struct drm_i915_gem_object,
1000 obj = obj_priv->obj;
1001 BUG_ON(obj_priv->pin_count != 0);
1003 DRM_INFO("%s: evicting %p\n", __func__, obj);
1005 BUG_ON(obj_priv->active);
1007 /* Wait on the rendering and unbind the buffer. */
1008 ret = i915_gem_object_unbind(obj);
1012 /* If we didn't get anything, but the ring is still processing
1013 * things, wait for one of those things to finish and hopefully
1014 * leave us a buffer to evict.
1016 if (!list_empty(&dev_priv->mm.request_list)) {
1017 struct drm_i915_gem_request *request;
1019 request = list_first_entry(&dev_priv->mm.request_list,
1020 struct drm_i915_gem_request,
1023 ret = i915_wait_request(dev, request->seqno);
1027 /* if waiting caused an object to become inactive,
1028 * then loop around and wait for it. Otherwise, we
1029 * assume that waiting freed and unbound something,
1030 * so there should now be some space in the GTT
1032 if (!list_empty(&dev_priv->mm.inactive_list))
1037 /* If we didn't have anything on the request list but there
1038 * are buffers awaiting a flush, emit one and try again.
1039 * When we wait on it, those buffers waiting for that flush
1040 * will get moved to inactive.
1042 if (!list_empty(&dev_priv->mm.flushing_list)) {
1043 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1044 struct drm_i915_gem_object,
1046 obj = obj_priv->obj;
1051 i915_add_request(dev, obj->write_domain);
1057 DRM_ERROR("inactive empty %d request empty %d "
1058 "flushing empty %d\n",
1059 list_empty(&dev_priv->mm.inactive_list),
1060 list_empty(&dev_priv->mm.request_list),
1061 list_empty(&dev_priv->mm.flushing_list));
1062 /* If we didn't do any of the above, there's nothing to be done
1063 * and we just can't fit it in.
1071 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1073 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1075 struct address_space *mapping;
1076 struct inode *inode;
1080 if (obj_priv->page_list)
1083 /* Get the list of pages out of our struct file. They'll be pinned
1084 * at this point until we release them.
1086 page_count = obj->size / PAGE_SIZE;
1087 BUG_ON(obj_priv->page_list != NULL);
1088 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1090 if (obj_priv->page_list == NULL) {
1091 DRM_ERROR("Faled to allocate page list\n");
1095 inode = obj->filp->f_path.dentry->d_inode;
1096 mapping = inode->i_mapping;
1097 for (i = 0; i < page_count; i++) {
1098 page = read_mapping_page(mapping, i, NULL);
1100 ret = PTR_ERR(page);
1101 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1102 i915_gem_object_free_page_list(obj);
1105 obj_priv->page_list[i] = page;
1111 * Finds free space in the GTT aperture and binds the object there.
1114 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1116 struct drm_device *dev = obj->dev;
1117 drm_i915_private_t *dev_priv = dev->dev_private;
1118 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1119 struct drm_mm_node *free_space;
1120 int page_count, ret;
1123 alignment = PAGE_SIZE;
1124 if (alignment & (PAGE_SIZE - 1)) {
1125 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1130 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1131 obj->size, alignment, 0);
1132 if (free_space != NULL) {
1133 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1135 if (obj_priv->gtt_space != NULL) {
1136 obj_priv->gtt_space->private = obj;
1137 obj_priv->gtt_offset = obj_priv->gtt_space->start;
1140 if (obj_priv->gtt_space == NULL) {
1141 /* If the gtt is empty and we're still having trouble
1142 * fitting our object in, we're out of memory.
1145 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1147 if (list_empty(&dev_priv->mm.inactive_list) &&
1148 list_empty(&dev_priv->mm.flushing_list) &&
1149 list_empty(&dev_priv->mm.active_list)) {
1150 DRM_ERROR("GTT full, but LRU list empty\n");
1154 ret = i915_gem_evict_something(dev);
1156 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1163 DRM_INFO("Binding object of size %d at 0x%08x\n",
1164 obj->size, obj_priv->gtt_offset);
1166 ret = i915_gem_object_get_page_list(obj);
1168 drm_mm_put_block(obj_priv->gtt_space);
1169 obj_priv->gtt_space = NULL;
1173 page_count = obj->size / PAGE_SIZE;
1174 /* Create an AGP memory structure pointing at our pages, and bind it
1177 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1178 obj_priv->page_list,
1180 obj_priv->gtt_offset,
1181 obj_priv->agp_type);
1182 if (obj_priv->agp_mem == NULL) {
1183 i915_gem_object_free_page_list(obj);
1184 drm_mm_put_block(obj_priv->gtt_space);
1185 obj_priv->gtt_space = NULL;
1188 atomic_inc(&dev->gtt_count);
1189 atomic_add(obj->size, &dev->gtt_memory);
1191 /* Assert that the object is not currently in any GPU domain. As it
1192 * wasn't in the GTT, there shouldn't be any way it could have been in
1195 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1196 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1202 i915_gem_clflush_object(struct drm_gem_object *obj)
1204 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1206 /* If we don't have a page list set up, then we're not pinned
1207 * to GPU, and we can ignore the cache flush because it'll happen
1208 * again at bind time.
1210 if (obj_priv->page_list == NULL)
1213 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1217 * Set the next domain for the specified object. This
1218 * may not actually perform the necessary flushing/invaliding though,
1219 * as that may want to be batched with other set_domain operations
1221 * This is (we hope) the only really tricky part of gem. The goal
1222 * is fairly simple -- track which caches hold bits of the object
1223 * and make sure they remain coherent. A few concrete examples may
1224 * help to explain how it works. For shorthand, we use the notation
1225 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1226 * a pair of read and write domain masks.
1228 * Case 1: the batch buffer
1234 * 5. Unmapped from GTT
1237 * Let's take these a step at a time
1240 * Pages allocated from the kernel may still have
1241 * cache contents, so we set them to (CPU, CPU) always.
1242 * 2. Written by CPU (using pwrite)
1243 * The pwrite function calls set_domain (CPU, CPU) and
1244 * this function does nothing (as nothing changes)
1246 * This function asserts that the object is not
1247 * currently in any GPU-based read or write domains
1249 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1250 * As write_domain is zero, this function adds in the
1251 * current read domains (CPU+COMMAND, 0).
1252 * flush_domains is set to CPU.
1253 * invalidate_domains is set to COMMAND
1254 * clflush is run to get data out of the CPU caches
1255 * then i915_dev_set_domain calls i915_gem_flush to
1256 * emit an MI_FLUSH and drm_agp_chipset_flush
1257 * 5. Unmapped from GTT
1258 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1259 * flush_domains and invalidate_domains end up both zero
1260 * so no flushing/invalidating happens
1264 * Case 2: The shared render buffer
1268 * 3. Read/written by GPU
1269 * 4. set_domain to (CPU,CPU)
1270 * 5. Read/written by CPU
1271 * 6. Read/written by GPU
1274 * Same as last example, (CPU, CPU)
1276 * Nothing changes (assertions find that it is not in the GPU)
1277 * 3. Read/written by GPU
1278 * execbuffer calls set_domain (RENDER, RENDER)
1279 * flush_domains gets CPU
1280 * invalidate_domains gets GPU
1282 * MI_FLUSH and drm_agp_chipset_flush
1283 * 4. set_domain (CPU, CPU)
1284 * flush_domains gets GPU
1285 * invalidate_domains gets CPU
1286 * wait_rendering (obj) to make sure all drawing is complete.
1287 * This will include an MI_FLUSH to get the data from GPU
1289 * clflush (obj) to invalidate the CPU cache
1290 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1291 * 5. Read/written by CPU
1292 * cache lines are loaded and dirtied
1293 * 6. Read written by GPU
1294 * Same as last GPU access
1296 * Case 3: The constant buffer
1301 * 4. Updated (written) by CPU again
1310 * flush_domains = CPU
1311 * invalidate_domains = RENDER
1314 * drm_agp_chipset_flush
1315 * 4. Updated (written) by CPU again
1317 * flush_domains = 0 (no previous write domain)
1318 * invalidate_domains = 0 (no new read domains)
1321 * flush_domains = CPU
1322 * invalidate_domains = RENDER
1325 * drm_agp_chipset_flush
1328 i915_gem_object_set_domain(struct drm_gem_object *obj,
1329 uint32_t read_domains,
1330 uint32_t write_domain)
1332 struct drm_device *dev = obj->dev;
1333 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1334 uint32_t invalidate_domains = 0;
1335 uint32_t flush_domains = 0;
1339 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1341 obj->read_domains, read_domains,
1342 obj->write_domain, write_domain);
1345 * If the object isn't moving to a new write domain,
1346 * let the object stay in multiple read domains
1348 if (write_domain == 0)
1349 read_domains |= obj->read_domains;
1351 obj_priv->dirty = 1;
1354 * Flush the current write domain if
1355 * the new read domains don't match. Invalidate
1356 * any read domains which differ from the old
1359 if (obj->write_domain && obj->write_domain != read_domains) {
1360 flush_domains |= obj->write_domain;
1361 invalidate_domains |= read_domains & ~obj->write_domain;
1364 * Invalidate any read caches which may have
1365 * stale data. That is, any new read domains.
1367 invalidate_domains |= read_domains & ~obj->read_domains;
1368 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1370 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1371 __func__, flush_domains, invalidate_domains);
1374 * If we're invaliding the CPU cache and flushing a GPU cache,
1375 * then pause for rendering so that the GPU caches will be
1376 * flushed before the cpu cache is invalidated
1378 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1379 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1380 I915_GEM_DOMAIN_GTT))) {
1381 ret = i915_gem_object_wait_rendering(obj);
1385 i915_gem_clflush_object(obj);
1388 if ((write_domain | flush_domains) != 0)
1389 obj->write_domain = write_domain;
1391 /* If we're invalidating the CPU domain, clear the per-page CPU
1392 * domain list as well.
1394 if (obj_priv->page_cpu_valid != NULL &&
1395 (write_domain != 0 ||
1396 read_domains & I915_GEM_DOMAIN_CPU)) {
1397 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1399 obj_priv->page_cpu_valid = NULL;
1401 obj->read_domains = read_domains;
1403 dev->invalidate_domains |= invalidate_domains;
1404 dev->flush_domains |= flush_domains;
1406 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1408 obj->read_domains, obj->write_domain,
1409 dev->invalidate_domains, dev->flush_domains);
1415 * Set the read/write domain on a range of the object.
1417 * Currently only implemented for CPU reads, otherwise drops to normal
1418 * i915_gem_object_set_domain().
1421 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1424 uint32_t read_domains,
1425 uint32_t write_domain)
1427 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1430 if (obj->read_domains & I915_GEM_DOMAIN_CPU)
1433 if (read_domains != I915_GEM_DOMAIN_CPU ||
1435 return i915_gem_object_set_domain(obj,
1436 read_domains, write_domain);
1438 /* Wait on any GPU rendering to the object to be flushed. */
1439 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
1440 ret = i915_gem_object_wait_rendering(obj);
1445 if (obj_priv->page_cpu_valid == NULL) {
1446 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1450 /* Flush the cache on any pages that are still invalid from the CPU's
1453 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
1454 if (obj_priv->page_cpu_valid[i])
1457 drm_clflush_pages(obj_priv->page_list + i, 1);
1459 obj_priv->page_cpu_valid[i] = 1;
1466 * Once all of the objects have been set in the proper domain,
1467 * perform the necessary flush and invalidate operations.
1469 * Returns the write domains flushed, for use in flush tracking.
1472 i915_gem_dev_set_domain(struct drm_device *dev)
1474 uint32_t flush_domains = dev->flush_domains;
1477 * Now that all the buffers are synced to the proper domains,
1478 * flush and invalidate the collected domains
1480 if (dev->invalidate_domains | dev->flush_domains) {
1482 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1484 dev->invalidate_domains,
1485 dev->flush_domains);
1488 dev->invalidate_domains,
1489 dev->flush_domains);
1490 dev->invalidate_domains = 0;
1491 dev->flush_domains = 0;
1494 return flush_domains;
1498 * Pin an object to the GTT and evaluate the relocations landing in it.
1501 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1502 struct drm_file *file_priv,
1503 struct drm_i915_gem_exec_object *entry)
1505 struct drm_device *dev = obj->dev;
1506 struct drm_i915_gem_relocation_entry reloc;
1507 struct drm_i915_gem_relocation_entry __user *relocs;
1508 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1510 uint32_t last_reloc_offset = -1;
1511 void __iomem *reloc_page = NULL;
1513 /* Choose the GTT offset for our buffer and put it there. */
1514 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1518 entry->offset = obj_priv->gtt_offset;
1520 relocs = (struct drm_i915_gem_relocation_entry __user *)
1521 (uintptr_t) entry->relocs_ptr;
1522 /* Apply the relocations, using the GTT aperture to avoid cache
1523 * flushing requirements.
1525 for (i = 0; i < entry->relocation_count; i++) {
1526 struct drm_gem_object *target_obj;
1527 struct drm_i915_gem_object *target_obj_priv;
1528 uint32_t reloc_val, reloc_offset;
1529 uint32_t __iomem *reloc_entry;
1531 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1533 i915_gem_object_unpin(obj);
1537 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1538 reloc.target_handle);
1539 if (target_obj == NULL) {
1540 i915_gem_object_unpin(obj);
1543 target_obj_priv = target_obj->driver_private;
1545 /* The target buffer should have appeared before us in the
1546 * exec_object list, so it should have a GTT space bound by now.
1548 if (target_obj_priv->gtt_space == NULL) {
1549 DRM_ERROR("No GTT space found for object %d\n",
1550 reloc.target_handle);
1551 drm_gem_object_unreference(target_obj);
1552 i915_gem_object_unpin(obj);
1556 if (reloc.offset > obj->size - 4) {
1557 DRM_ERROR("Relocation beyond object bounds: "
1558 "obj %p target %d offset %d size %d.\n",
1559 obj, reloc.target_handle,
1560 (int) reloc.offset, (int) obj->size);
1561 drm_gem_object_unreference(target_obj);
1562 i915_gem_object_unpin(obj);
1565 if (reloc.offset & 3) {
1566 DRM_ERROR("Relocation not 4-byte aligned: "
1567 "obj %p target %d offset %d.\n",
1568 obj, reloc.target_handle,
1569 (int) reloc.offset);
1570 drm_gem_object_unreference(target_obj);
1571 i915_gem_object_unpin(obj);
1575 if (reloc.write_domain && target_obj->pending_write_domain &&
1576 reloc.write_domain != target_obj->pending_write_domain) {
1577 DRM_ERROR("Write domain conflict: "
1578 "obj %p target %d offset %d "
1579 "new %08x old %08x\n",
1580 obj, reloc.target_handle,
1583 target_obj->pending_write_domain);
1584 drm_gem_object_unreference(target_obj);
1585 i915_gem_object_unpin(obj);
1590 DRM_INFO("%s: obj %p offset %08x target %d "
1591 "read %08x write %08x gtt %08x "
1592 "presumed %08x delta %08x\n",
1596 (int) reloc.target_handle,
1597 (int) reloc.read_domains,
1598 (int) reloc.write_domain,
1599 (int) target_obj_priv->gtt_offset,
1600 (int) reloc.presumed_offset,
1604 target_obj->pending_read_domains |= reloc.read_domains;
1605 target_obj->pending_write_domain |= reloc.write_domain;
1607 /* If the relocation already has the right value in it, no
1608 * more work needs to be done.
1610 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1611 drm_gem_object_unreference(target_obj);
1615 /* Now that we're going to actually write some data in,
1616 * make sure that any rendering using this buffer's contents
1619 i915_gem_object_wait_rendering(obj);
1621 /* As we're writing through the gtt, flush
1622 * any CPU writes before we write the relocations
1624 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1625 i915_gem_clflush_object(obj);
1626 drm_agp_chipset_flush(dev);
1627 obj->write_domain = 0;
1630 /* Map the page containing the relocation we're going to
1633 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1634 if (reloc_page == NULL ||
1635 (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
1636 (reloc_offset & ~(PAGE_SIZE - 1))) {
1637 if (reloc_page != NULL)
1638 iounmap(reloc_page);
1640 reloc_page = ioremap_wc(dev->agp->base +
1644 last_reloc_offset = reloc_offset;
1645 if (reloc_page == NULL) {
1646 drm_gem_object_unreference(target_obj);
1647 i915_gem_object_unpin(obj);
1652 reloc_entry = (uint32_t __iomem *)(reloc_page +
1653 (reloc_offset & (PAGE_SIZE - 1)));
1654 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1657 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1658 obj, (unsigned int) reloc.offset,
1659 readl(reloc_entry), reloc_val);
1661 writel(reloc_val, reloc_entry);
1663 /* Write the updated presumed offset for this entry back out
1666 reloc.presumed_offset = target_obj_priv->gtt_offset;
1667 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1669 drm_gem_object_unreference(target_obj);
1670 i915_gem_object_unpin(obj);
1674 drm_gem_object_unreference(target_obj);
1677 if (reloc_page != NULL)
1678 iounmap(reloc_page);
1682 i915_gem_dump_object(obj, 128, __func__, ~0);
1687 /** Dispatch a batchbuffer to the ring
1690 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1691 struct drm_i915_gem_execbuffer *exec,
1692 uint64_t exec_offset)
1694 drm_i915_private_t *dev_priv = dev->dev_private;
1695 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1696 (uintptr_t) exec->cliprects_ptr;
1697 int nbox = exec->num_cliprects;
1699 uint32_t exec_start, exec_len;
1702 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1703 exec_len = (uint32_t) exec->batch_len;
1705 if ((exec_start | exec_len) & 0x7) {
1706 DRM_ERROR("alignment\n");
1713 count = nbox ? nbox : 1;
1715 for (i = 0; i < count; i++) {
1717 int ret = i915_emit_box(dev, boxes, i,
1718 exec->DR1, exec->DR4);
1723 if (IS_I830(dev) || IS_845G(dev)) {
1725 OUT_RING(MI_BATCH_BUFFER);
1726 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1727 OUT_RING(exec_start + exec_len - 4);
1732 if (IS_I965G(dev)) {
1733 OUT_RING(MI_BATCH_BUFFER_START |
1735 MI_BATCH_NON_SECURE_I965);
1736 OUT_RING(exec_start);
1738 OUT_RING(MI_BATCH_BUFFER_START |
1740 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1746 /* XXX breadcrumb */
1750 /* Throttle our rendering by waiting until the ring has completed our requests
1751 * emitted over 20 msec ago.
1753 * This should get us reasonable parallelism between CPU and GPU but also
1754 * relatively low latency when blocking on a particular request to finish.
1757 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1759 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1763 mutex_lock(&dev->struct_mutex);
1764 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1765 i915_file_priv->mm.last_gem_throttle_seqno =
1766 i915_file_priv->mm.last_gem_seqno;
1768 ret = i915_wait_request(dev, seqno);
1769 mutex_unlock(&dev->struct_mutex);
1774 i915_gem_execbuffer(struct drm_device *dev, void *data,
1775 struct drm_file *file_priv)
1777 drm_i915_private_t *dev_priv = dev->dev_private;
1778 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1779 struct drm_i915_gem_execbuffer *args = data;
1780 struct drm_i915_gem_exec_object *exec_list = NULL;
1781 struct drm_gem_object **object_list = NULL;
1782 struct drm_gem_object *batch_obj;
1783 int ret, i, pinned = 0;
1784 uint64_t exec_offset;
1785 uint32_t seqno, flush_domains;
1788 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1789 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1792 if (args->buffer_count < 1) {
1793 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1796 /* Copy in the exec list from userland */
1797 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1799 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1801 if (exec_list == NULL || object_list == NULL) {
1802 DRM_ERROR("Failed to allocate exec or object list "
1804 args->buffer_count);
1808 ret = copy_from_user(exec_list,
1809 (struct drm_i915_relocation_entry __user *)
1810 (uintptr_t) args->buffers_ptr,
1811 sizeof(*exec_list) * args->buffer_count);
1813 DRM_ERROR("copy %d exec entries failed %d\n",
1814 args->buffer_count, ret);
1818 mutex_lock(&dev->struct_mutex);
1820 i915_verify_inactive(dev, __FILE__, __LINE__);
1822 if (dev_priv->mm.wedged) {
1823 DRM_ERROR("Execbuf while wedged\n");
1824 mutex_unlock(&dev->struct_mutex);
1828 if (dev_priv->mm.suspended) {
1829 DRM_ERROR("Execbuf while VT-switched.\n");
1830 mutex_unlock(&dev->struct_mutex);
1834 /* Zero the gloabl flush/invalidate flags. These
1835 * will be modified as each object is bound to the
1838 dev->invalidate_domains = 0;
1839 dev->flush_domains = 0;
1841 /* Look up object handles and perform the relocations */
1842 for (i = 0; i < args->buffer_count; i++) {
1843 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1844 exec_list[i].handle);
1845 if (object_list[i] == NULL) {
1846 DRM_ERROR("Invalid object handle %d at index %d\n",
1847 exec_list[i].handle, i);
1852 object_list[i]->pending_read_domains = 0;
1853 object_list[i]->pending_write_domain = 0;
1854 ret = i915_gem_object_pin_and_relocate(object_list[i],
1858 DRM_ERROR("object bind and relocate failed %d\n", ret);
1864 /* Set the pending read domains for the batch buffer to COMMAND */
1865 batch_obj = object_list[args->buffer_count-1];
1866 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1867 batch_obj->pending_write_domain = 0;
1869 i915_verify_inactive(dev, __FILE__, __LINE__);
1871 for (i = 0; i < args->buffer_count; i++) {
1872 struct drm_gem_object *obj = object_list[i];
1873 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1875 if (obj_priv->gtt_space == NULL) {
1876 /* We evicted the buffer in the process of validating
1877 * our set of buffers in. We could try to recover by
1878 * kicking them everything out and trying again from
1885 /* make sure all previous memory operations have passed */
1886 ret = i915_gem_object_set_domain(obj,
1887 obj->pending_read_domains,
1888 obj->pending_write_domain);
1893 i915_verify_inactive(dev, __FILE__, __LINE__);
1895 /* Flush/invalidate caches and chipset buffer */
1896 flush_domains = i915_gem_dev_set_domain(dev);
1898 i915_verify_inactive(dev, __FILE__, __LINE__);
1901 for (i = 0; i < args->buffer_count; i++) {
1902 i915_gem_object_check_coherency(object_list[i],
1903 exec_list[i].handle);
1907 exec_offset = exec_list[args->buffer_count - 1].offset;
1910 i915_gem_dump_object(object_list[args->buffer_count - 1],
1916 (void)i915_add_request(dev, flush_domains);
1918 /* Exec the batchbuffer */
1919 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1921 DRM_ERROR("dispatch failed %d\n", ret);
1926 * Ensure that the commands in the batch buffer are
1927 * finished before the interrupt fires
1929 flush_domains = i915_retire_commands(dev);
1931 i915_verify_inactive(dev, __FILE__, __LINE__);
1934 * Get a seqno representing the execution of the current buffer,
1935 * which we can wait on. We would like to mitigate these interrupts,
1936 * likely by only creating seqnos occasionally (so that we have
1937 * *some* interrupts representing completion of buffers that we can
1938 * wait on when trying to clear up gtt space).
1940 seqno = i915_add_request(dev, flush_domains);
1942 i915_file_priv->mm.last_gem_seqno = seqno;
1943 for (i = 0; i < args->buffer_count; i++) {
1944 struct drm_gem_object *obj = object_list[i];
1945 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1947 i915_gem_object_move_to_active(obj);
1948 obj_priv->last_rendering_seqno = seqno;
1950 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1954 i915_dump_lru(dev, __func__);
1957 i915_verify_inactive(dev, __FILE__, __LINE__);
1959 /* Copy the new buffer offsets back to the user's exec list. */
1960 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1961 (uintptr_t) args->buffers_ptr,
1963 sizeof(*exec_list) * args->buffer_count);
1965 DRM_ERROR("failed to copy %d exec entries "
1966 "back to user (%d)\n",
1967 args->buffer_count, ret);
1969 if (object_list != NULL) {
1970 for (i = 0; i < pinned; i++)
1971 i915_gem_object_unpin(object_list[i]);
1973 for (i = 0; i < args->buffer_count; i++)
1974 drm_gem_object_unreference(object_list[i]);
1976 mutex_unlock(&dev->struct_mutex);
1979 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1981 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1988 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1990 struct drm_device *dev = obj->dev;
1991 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1994 i915_verify_inactive(dev, __FILE__, __LINE__);
1995 if (obj_priv->gtt_space == NULL) {
1996 ret = i915_gem_object_bind_to_gtt(obj, alignment);
1998 DRM_ERROR("Failure to bind: %d", ret);
2002 obj_priv->pin_count++;
2004 /* If the object is not active and not pending a flush,
2005 * remove it from the inactive list
2007 if (obj_priv->pin_count == 1) {
2008 atomic_inc(&dev->pin_count);
2009 atomic_add(obj->size, &dev->pin_memory);
2010 if (!obj_priv->active &&
2011 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2012 I915_GEM_DOMAIN_GTT)) == 0 &&
2013 !list_empty(&obj_priv->list))
2014 list_del_init(&obj_priv->list);
2016 i915_verify_inactive(dev, __FILE__, __LINE__);
2022 i915_gem_object_unpin(struct drm_gem_object *obj)
2024 struct drm_device *dev = obj->dev;
2025 drm_i915_private_t *dev_priv = dev->dev_private;
2026 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2028 i915_verify_inactive(dev, __FILE__, __LINE__);
2029 obj_priv->pin_count--;
2030 BUG_ON(obj_priv->pin_count < 0);
2031 BUG_ON(obj_priv->gtt_space == NULL);
2033 /* If the object is no longer pinned, and is
2034 * neither active nor being flushed, then stick it on
2037 if (obj_priv->pin_count == 0) {
2038 if (!obj_priv->active &&
2039 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2040 I915_GEM_DOMAIN_GTT)) == 0)
2041 list_move_tail(&obj_priv->list,
2042 &dev_priv->mm.inactive_list);
2043 atomic_dec(&dev->pin_count);
2044 atomic_sub(obj->size, &dev->pin_memory);
2046 i915_verify_inactive(dev, __FILE__, __LINE__);
2050 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2051 struct drm_file *file_priv)
2053 struct drm_i915_gem_pin *args = data;
2054 struct drm_gem_object *obj;
2055 struct drm_i915_gem_object *obj_priv;
2058 mutex_lock(&dev->struct_mutex);
2060 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2062 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2064 mutex_unlock(&dev->struct_mutex);
2067 obj_priv = obj->driver_private;
2069 ret = i915_gem_object_pin(obj, args->alignment);
2071 drm_gem_object_unreference(obj);
2072 mutex_unlock(&dev->struct_mutex);
2076 /* XXX - flush the CPU caches for pinned objects
2077 * as the X server doesn't manage domains yet
2079 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2080 i915_gem_clflush_object(obj);
2081 drm_agp_chipset_flush(dev);
2082 obj->write_domain = 0;
2084 args->offset = obj_priv->gtt_offset;
2085 drm_gem_object_unreference(obj);
2086 mutex_unlock(&dev->struct_mutex);
2092 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2093 struct drm_file *file_priv)
2095 struct drm_i915_gem_pin *args = data;
2096 struct drm_gem_object *obj;
2098 mutex_lock(&dev->struct_mutex);
2100 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2102 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2104 mutex_unlock(&dev->struct_mutex);
2108 i915_gem_object_unpin(obj);
2110 drm_gem_object_unreference(obj);
2111 mutex_unlock(&dev->struct_mutex);
2116 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2117 struct drm_file *file_priv)
2119 struct drm_i915_gem_busy *args = data;
2120 struct drm_gem_object *obj;
2121 struct drm_i915_gem_object *obj_priv;
2123 mutex_lock(&dev->struct_mutex);
2124 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2126 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2128 mutex_unlock(&dev->struct_mutex);
2132 obj_priv = obj->driver_private;
2133 args->busy = obj_priv->active;
2135 drm_gem_object_unreference(obj);
2136 mutex_unlock(&dev->struct_mutex);
2141 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2142 struct drm_file *file_priv)
2144 return i915_gem_ring_throttle(dev, file_priv);
2147 int i915_gem_init_object(struct drm_gem_object *obj)
2149 struct drm_i915_gem_object *obj_priv;
2151 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2152 if (obj_priv == NULL)
2156 * We've just allocated pages from the kernel,
2157 * so they've just been written by the CPU with
2158 * zeros. They'll need to be clflushed before we
2159 * use them with the GPU.
2161 obj->write_domain = I915_GEM_DOMAIN_CPU;
2162 obj->read_domains = I915_GEM_DOMAIN_CPU;
2164 obj_priv->agp_type = AGP_USER_MEMORY;
2166 obj->driver_private = obj_priv;
2167 obj_priv->obj = obj;
2168 INIT_LIST_HEAD(&obj_priv->list);
2172 void i915_gem_free_object(struct drm_gem_object *obj)
2174 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2176 while (obj_priv->pin_count > 0)
2177 i915_gem_object_unpin(obj);
2179 i915_gem_object_unbind(obj);
2181 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2182 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2186 i915_gem_set_domain(struct drm_gem_object *obj,
2187 struct drm_file *file_priv,
2188 uint32_t read_domains,
2189 uint32_t write_domain)
2191 struct drm_device *dev = obj->dev;
2193 uint32_t flush_domains;
2195 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2197 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2200 flush_domains = i915_gem_dev_set_domain(obj->dev);
2202 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2203 (void) i915_add_request(dev, flush_domains);
2208 /** Unbinds all objects that are on the given buffer list. */
2210 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2212 struct drm_gem_object *obj;
2213 struct drm_i915_gem_object *obj_priv;
2216 while (!list_empty(head)) {
2217 obj_priv = list_first_entry(head,
2218 struct drm_i915_gem_object,
2220 obj = obj_priv->obj;
2222 if (obj_priv->pin_count != 0) {
2223 DRM_ERROR("Pinned object in unbind list\n");
2224 mutex_unlock(&dev->struct_mutex);
2228 ret = i915_gem_object_unbind(obj);
2230 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2232 mutex_unlock(&dev->struct_mutex);
2242 i915_gem_idle(struct drm_device *dev)
2244 drm_i915_private_t *dev_priv = dev->dev_private;
2245 uint32_t seqno, cur_seqno, last_seqno;
2248 mutex_lock(&dev->struct_mutex);
2250 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
2251 mutex_unlock(&dev->struct_mutex);
2255 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2256 * We need to replace this with a semaphore, or something.
2258 dev_priv->mm.suspended = 1;
2260 /* Cancel the retire work handler, wait for it to finish if running
2262 mutex_unlock(&dev->struct_mutex);
2263 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2264 mutex_lock(&dev->struct_mutex);
2266 i915_kernel_lost_context(dev);
2268 /* Flush the GPU along with all non-CPU write domains
2270 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2271 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2272 seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2273 I915_GEM_DOMAIN_GTT));
2276 mutex_unlock(&dev->struct_mutex);
2280 dev_priv->mm.waiting_gem_seqno = seqno;
2284 cur_seqno = i915_get_gem_seqno(dev);
2285 if (i915_seqno_passed(cur_seqno, seqno))
2287 if (last_seqno == cur_seqno) {
2288 if (stuck++ > 100) {
2289 DRM_ERROR("hardware wedged\n");
2290 dev_priv->mm.wedged = 1;
2291 DRM_WAKEUP(&dev_priv->irq_queue);
2296 last_seqno = cur_seqno;
2298 dev_priv->mm.waiting_gem_seqno = 0;
2300 i915_gem_retire_requests(dev);
2302 /* Active and flushing should now be empty as we've
2303 * waited for a sequence higher than any pending execbuffer
2305 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2306 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2308 /* Request should now be empty as we've also waited
2309 * for the last request in the list
2311 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2313 /* Move all buffers out of the GTT. */
2314 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2316 mutex_unlock(&dev->struct_mutex);
2320 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2321 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2322 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2323 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2325 i915_gem_cleanup_ringbuffer(dev);
2326 mutex_unlock(&dev->struct_mutex);
2332 i915_gem_init_hws(struct drm_device *dev)
2334 drm_i915_private_t *dev_priv = dev->dev_private;
2335 struct drm_gem_object *obj;
2336 struct drm_i915_gem_object *obj_priv;
2339 /* If we need a physical address for the status page, it's already
2340 * initialized at driver load time.
2342 if (!I915_NEED_GFX_HWS(dev))
2345 obj = drm_gem_object_alloc(dev, 4096);
2347 DRM_ERROR("Failed to allocate status page\n");
2350 obj_priv = obj->driver_private;
2351 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
2353 ret = i915_gem_object_pin(obj, 4096);
2355 drm_gem_object_unreference(obj);
2359 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2361 dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
2362 if (dev_priv->hw_status_page == NULL) {
2363 DRM_ERROR("Failed to map status page.\n");
2364 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2365 drm_gem_object_unreference(obj);
2368 dev_priv->hws_obj = obj;
2369 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2370 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2371 I915_READ(HWS_PGA); /* posting read */
2372 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2378 i915_gem_init_ringbuffer(struct drm_device *dev)
2380 drm_i915_private_t *dev_priv = dev->dev_private;
2381 struct drm_gem_object *obj;
2382 struct drm_i915_gem_object *obj_priv;
2386 ret = i915_gem_init_hws(dev);
2390 obj = drm_gem_object_alloc(dev, 128 * 1024);
2392 DRM_ERROR("Failed to allocate ringbuffer\n");
2395 obj_priv = obj->driver_private;
2397 ret = i915_gem_object_pin(obj, 4096);
2399 drm_gem_object_unreference(obj);
2403 /* Set up the kernel mapping for the ring. */
2404 dev_priv->ring.Size = obj->size;
2405 dev_priv->ring.tail_mask = obj->size - 1;
2407 dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2408 dev_priv->ring.map.size = obj->size;
2409 dev_priv->ring.map.type = 0;
2410 dev_priv->ring.map.flags = 0;
2411 dev_priv->ring.map.mtrr = 0;
2413 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
2414 if (dev_priv->ring.map.handle == NULL) {
2415 DRM_ERROR("Failed to map ringbuffer.\n");
2416 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2417 drm_gem_object_unreference(obj);
2420 dev_priv->ring.ring_obj = obj;
2421 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2423 /* Stop the ring if it's running. */
2424 I915_WRITE(PRB0_CTL, 0);
2425 I915_WRITE(PRB0_TAIL, 0);
2426 I915_WRITE(PRB0_HEAD, 0);
2428 /* Initialize the ring. */
2429 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
2430 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2432 /* G45 ring initialization fails to reset head to zero */
2434 DRM_ERROR("Ring head not reset to zero "
2435 "ctl %08x head %08x tail %08x start %08x\n",
2436 I915_READ(PRB0_CTL),
2437 I915_READ(PRB0_HEAD),
2438 I915_READ(PRB0_TAIL),
2439 I915_READ(PRB0_START));
2440 I915_WRITE(PRB0_HEAD, 0);
2442 DRM_ERROR("Ring head forced to zero "
2443 "ctl %08x head %08x tail %08x start %08x\n",
2444 I915_READ(PRB0_CTL),
2445 I915_READ(PRB0_HEAD),
2446 I915_READ(PRB0_TAIL),
2447 I915_READ(PRB0_START));
2450 I915_WRITE(PRB0_CTL,
2451 ((obj->size - 4096) & RING_NR_PAGES) |
2455 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2457 /* If the head is still not zero, the ring is dead */
2459 DRM_ERROR("Ring initialization failed "
2460 "ctl %08x head %08x tail %08x start %08x\n",
2461 I915_READ(PRB0_CTL),
2462 I915_READ(PRB0_HEAD),
2463 I915_READ(PRB0_TAIL),
2464 I915_READ(PRB0_START));
2468 /* Update our cache of the ring state */
2469 i915_kernel_lost_context(dev);
2475 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2477 drm_i915_private_t *dev_priv = dev->dev_private;
2479 if (dev_priv->ring.ring_obj == NULL)
2482 drm_core_ioremapfree(&dev_priv->ring.map, dev);
2484 i915_gem_object_unpin(dev_priv->ring.ring_obj);
2485 drm_gem_object_unreference(dev_priv->ring.ring_obj);
2486 dev_priv->ring.ring_obj = NULL;
2487 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2489 if (dev_priv->hws_obj != NULL) {
2490 struct drm_gem_object *obj = dev_priv->hws_obj;
2491 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2493 kunmap(obj_priv->page_list[0]);
2494 i915_gem_object_unpin(obj);
2495 drm_gem_object_unreference(obj);
2496 dev_priv->hws_obj = NULL;
2497 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2498 dev_priv->hw_status_page = NULL;
2500 /* Write high address into HWS_PGA when disabling. */
2501 I915_WRITE(HWS_PGA, 0x1ffff000);
2506 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2507 struct drm_file *file_priv)
2509 drm_i915_private_t *dev_priv = dev->dev_private;
2512 if (dev_priv->mm.wedged) {
2513 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2514 dev_priv->mm.wedged = 0;
2517 ret = i915_gem_init_ringbuffer(dev);
2521 mutex_lock(&dev->struct_mutex);
2522 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2523 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2524 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2525 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2526 dev_priv->mm.suspended = 0;
2527 mutex_unlock(&dev->struct_mutex);
2529 drm_irq_install(dev);
2535 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2536 struct drm_file *file_priv)
2540 ret = i915_gem_idle(dev);
2541 drm_irq_uninstall(dev);
2547 i915_gem_lastclose(struct drm_device *dev)
2551 ret = i915_gem_idle(dev);
2553 DRM_ERROR("failed to idle hardware: %d\n", ret);
2557 i915_gem_load(struct drm_device *dev)
2559 drm_i915_private_t *dev_priv = dev->dev_private;
2561 INIT_LIST_HEAD(&dev_priv->mm.active_list);
2562 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2563 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2564 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2565 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2566 i915_gem_retire_work_handler);
2567 dev_priv->mm.next_gem_seqno = 1;
2569 i915_gem_detect_bit_6_swizzle(dev);