]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/xfs/linux-2.6/xfs_buf.c
[XFS] Added quota targets and removed dmapi directory
[linux-2.6-omap-h63xx.git] / fs / xfs / linux-2.6 / xfs_buf.c
index b0f0e58866de6059bcd4b0bc454e886ddde03cbc..302273f8e2a92399b27b13faf9d7ddf72359f518 100644 (file)
@@ -187,6 +187,19 @@ free_address(
 {
        a_list_t        *aentry;
 
+#ifdef CONFIG_XEN
+       /*
+        * Xen needs to be able to make sure it can get an exclusive
+        * RO mapping of pages it wants to turn into a pagetable.  If
+        * a newly allocated page is also still being vmap()ed by xfs,
+        * it will cause pagetable construction to fail.  This is a
+        * quick workaround to always eagerly unmap pages so that Xen
+        * is happy.
+        */
+       vunmap(addr);
+       return;
+#endif
+
        aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
        if (likely(aentry)) {
                spin_lock(&as_lock);
@@ -374,8 +387,6 @@ _xfs_buf_lookup_pages(
                if (unlikely(page == NULL)) {
                        if (flags & XBF_READ_AHEAD) {
                                bp->b_page_count = i;
-                               for (i = 0; i < bp->b_page_count; i++)
-                                       unlock_page(bp->b_pages[i]);
                                return -ENOMEM;
                        }
 
@@ -405,24 +416,17 @@ _xfs_buf_lookup_pages(
                ASSERT(!PagePrivate(page));
                if (!PageUptodate(page)) {
                        page_count--;
-                       if (blocksize >= PAGE_CACHE_SIZE) {
-                               if (flags & XBF_READ)
-                                       bp->b_locked = 1;
-                       } else if (!PagePrivate(page)) {
+                       if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
                                if (test_page_region(page, offset, nbytes))
                                        page_count++;
                        }
                }
 
+               unlock_page(page);
                bp->b_pages[i] = page;
                offset = 0;
        }
 
-       if (!bp->b_locked) {
-               for (i = 0; i < bp->b_page_count; i++)
-                       unlock_page(bp->b_pages[i]);
-       }
-
        if (page_count == bp->b_page_count)
                bp->b_flags |= XBF_DONE;
 
@@ -712,15 +716,15 @@ xfs_buf_associate_memory(
 {
        int                     rval;
        int                     i = 0;
-       size_t                  ptr;
-       size_t                  end, end_cur;
-       off_t                   offset;
+       unsigned long           pageaddr;
+       unsigned long           offset;
+       size_t                  buflen;
        int                     page_count;
 
-       page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
-       offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
-       if (offset && (len > PAGE_CACHE_SIZE))
-               page_count++;
+       pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
+       offset = (unsigned long)mem - pageaddr;
+       buflen = PAGE_CACHE_ALIGN(len + offset);
+       page_count = buflen >> PAGE_CACHE_SHIFT;
 
        /* Free any previous set of page pointers */
        if (bp->b_pages)
@@ -734,22 +738,14 @@ xfs_buf_associate_memory(
                return rval;
 
        bp->b_offset = offset;
-       ptr = (size_t) mem & PAGE_CACHE_MASK;
-       end = PAGE_CACHE_ALIGN((size_t) mem + len);
-       end_cur = end;
-       /* set up first page */
-       bp->b_pages[0] = mem_to_page(mem);
-
-       ptr += PAGE_CACHE_SIZE;
-       bp->b_page_count = ++i;
-       while (ptr < end) {
-               bp->b_pages[i] = mem_to_page((void *)ptr);
-               bp->b_page_count = ++i;
-               ptr += PAGE_CACHE_SIZE;
+
+       for (i = 0; i < bp->b_page_count; i++) {
+               bp->b_pages[i] = mem_to_page((void *)pageaddr);
+               pageaddr += PAGE_CACHE_SIZE;
        }
-       bp->b_locked = 0;
 
-       bp->b_count_desired = bp->b_buffer_length = len;
+       bp->b_count_desired = len;
+       bp->b_buffer_length = buflen;
        bp->b_flags |= XBF_MAPPED;
 
        return 0;
@@ -997,7 +993,18 @@ xfs_buf_iodone_work(
        xfs_buf_t               *bp =
                container_of(work, xfs_buf_t, b_iodone_work);
 
-       if (bp->b_iodone)
+       /*
+        * We can get an EOPNOTSUPP to ordered writes.  Here we clear the
+        * ordered flag and reissue them.  Because we can't tell the higher
+        * layers directly that they should not issue ordered I/O anymore, they
+        * need to check if the ordered flag was cleared during I/O completion.
+        */
+       if ((bp->b_error == EOPNOTSUPP) &&
+           (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
+               XB_TRACE(bp, "ordered_retry", bp->b_iodone);
+               bp->b_flags &= ~XBF_ORDERED;
+               xfs_buf_iorequest(bp);
+       } else if (bp->b_iodone)
                (*(bp->b_iodone))(bp);
        else if (bp->b_flags & XBF_ASYNC)
                xfs_buf_relse(bp);
@@ -1008,7 +1015,7 @@ xfs_buf_ioend(
        xfs_buf_t               *bp,
        int                     schedule)
 {
-       bp->b_flags &= ~(XBF_READ | XBF_WRITE);
+       bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
        if (bp->b_error == 0)
                bp->b_flags |= XBF_DONE;
 
@@ -1082,40 +1089,24 @@ xfs_buf_iostart(
        return status;
 }
 
-STATIC_INLINE int
-_xfs_buf_iolocked(
-       xfs_buf_t               *bp)
-{
-       ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
-       if (bp->b_flags & XBF_READ)
-               return bp->b_locked;
-       return 0;
-}
-
 STATIC_INLINE void
 _xfs_buf_ioend(
        xfs_buf_t               *bp,
        int                     schedule)
 {
-       if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
-               bp->b_locked = 0;
+       if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
                xfs_buf_ioend(bp, schedule);
-       }
 }
 
-STATIC int
+STATIC void
 xfs_buf_bio_end_io(
        struct bio              *bio,
-       unsigned int            bytes_done,
        int                     error)
 {
        xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
        unsigned int            blocksize = bp->b_target->bt_bsize;
        struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
 
-       if (bio->bi_size)
-               return 1;
-
        if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
                bp->b_error = EIO;
 
@@ -1135,28 +1126,22 @@ xfs_buf_bio_end_io(
 
                if (--bvec >= bio->bi_io_vec)
                        prefetchw(&bvec->bv_page->flags);
-
-               if (_xfs_buf_iolocked(bp)) {
-                       unlock_page(page);
-               }
        } while (bvec >= bio->bi_io_vec);
 
        _xfs_buf_ioend(bp, 1);
        bio_put(bio);
-       return 0;
 }
 
 STATIC void
 _xfs_buf_ioapply(
        xfs_buf_t               *bp)
 {
-       int                     i, rw, map_i, total_nr_pages, nr_pages;
+       int                     rw, map_i, total_nr_pages, nr_pages;
        struct bio              *bio;
        int                     offset = bp->b_offset;
        int                     size = bp->b_count_desired;
        sector_t                sector = bp->b_bn;
        unsigned int            blocksize = bp->b_target->bt_bsize;
-       int                     locking = _xfs_buf_iolocked(bp);
 
        total_nr_pages = bp->b_page_count;
        map_i = 0;
@@ -1179,7 +1164,7 @@ _xfs_buf_ioapply(
         * filesystem block size is not smaller than the page size.
         */
        if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
-           (bp->b_flags & XBF_READ) && locking &&
+           (bp->b_flags & XBF_READ) &&
            (blocksize >= PAGE_CACHE_SIZE)) {
                bio = bio_alloc(GFP_NOIO, 1);
 
@@ -1196,24 +1181,6 @@ _xfs_buf_ioapply(
                goto submit_io;
        }
 
-       /* Lock down the pages which we need to for the request */
-       if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
-               for (i = 0; size; i++) {
-                       int             nbytes = PAGE_CACHE_SIZE - offset;
-                       struct page     *page = bp->b_pages[i];
-
-                       if (nbytes > size)
-                               nbytes = size;
-
-                       lock_page(page);
-
-                       size -= nbytes;
-                       offset = 0;
-               }
-               offset = bp->b_offset;
-               size = bp->b_count_desired;
-       }
-
 next_chunk:
        atomic_inc(&bp->b_io_remaining);
        nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
@@ -1560,7 +1527,7 @@ xfs_alloc_delwrite_queue(
 
        INIT_LIST_HEAD(&btp->bt_list);
        INIT_LIST_HEAD(&btp->bt_delwrite_queue);
-       spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
+       spin_lock_init(&btp->bt_delwrite_lock);
        btp->bt_flags = 0;
        btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
        if (IS_ERR(btp->bt_task)) {
@@ -1731,6 +1698,8 @@ xfsbufd(
 
        current->flags |= PF_MEMALLOC;
 
+       set_freezable();
+
        do {
                if (unlikely(freezing(current))) {
                        set_bit(XBT_FORCE_SLEEP, &target->bt_flags);