]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/btrfs/file.c
Btrfs: Add delayed allocation to the extent based page tree code
[linux-2.6-omap-h63xx.git] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/buffer_head.h>
20 #include <linux/fs.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include <linux/time.h>
24 #include <linux/init.h>
25 #include <linux/string.h>
26 #include <linux/smp_lock.h>
27 #include <linux/backing-dev.h>
28 #include <linux/mpage.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/statfs.h>
32 #include <linux/compat.h>
33 #include <linux/version.h>
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "transaction.h"
37 #include "btrfs_inode.h"
38 #include "ioctl.h"
39 #include "print-tree.h"
40
41
42 static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
43                                 struct page **prepared_pages,
44                                 const char __user * buf)
45 {
46         long page_fault = 0;
47         int i;
48         int offset = pos & (PAGE_CACHE_SIZE - 1);
49
50         for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
51                 size_t count = min_t(size_t,
52                                      PAGE_CACHE_SIZE - offset, write_bytes);
53                 struct page *page = prepared_pages[i];
54                 fault_in_pages_readable(buf, count);
55
56                 /* Copy data from userspace to the current page */
57                 kmap(page);
58                 page_fault = __copy_from_user(page_address(page) + offset,
59                                               buf, count);
60                 /* Flush processor's dcache for this page */
61                 flush_dcache_page(page);
62                 kunmap(page);
63                 buf += count;
64                 write_bytes -= count;
65
66                 if (page_fault)
67                         break;
68         }
69         return page_fault ? -EFAULT : 0;
70 }
71
72 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
73 {
74         size_t i;
75         for (i = 0; i < num_pages; i++) {
76                 if (!pages[i])
77                         break;
78                 unlock_page(pages[i]);
79                 mark_page_accessed(pages[i]);
80                 page_cache_release(pages[i]);
81         }
82 }
83
84 static int insert_inline_extent(struct btrfs_trans_handle *trans,
85                                 struct btrfs_root *root, struct inode *inode,
86                                 u64 offset, ssize_t size,
87                                 struct page *page, size_t page_offset)
88 {
89         struct btrfs_key key;
90         struct btrfs_path *path;
91         char *ptr, *kaddr;
92         struct btrfs_file_extent_item *ei;
93         u32 datasize;
94         int err = 0;
95         int ret;
96
97         path = btrfs_alloc_path();
98         if (!path)
99                 return -ENOMEM;
100
101         btrfs_set_trans_block_group(trans, inode);
102
103         key.objectid = inode->i_ino;
104         key.offset = offset;
105         key.flags = 0;
106         btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
107         BUG_ON(size >= PAGE_CACHE_SIZE);
108         datasize = btrfs_file_extent_calc_inline_size(size);
109
110         ret = btrfs_insert_empty_item(trans, root, path, &key,
111                                       datasize);
112         if (ret) {
113                 err = ret;
114                 goto fail;
115         }
116         ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
117                path->slots[0], struct btrfs_file_extent_item);
118         btrfs_set_file_extent_generation(ei, trans->transid);
119         btrfs_set_file_extent_type(ei,
120                                    BTRFS_FILE_EXTENT_INLINE);
121         ptr = btrfs_file_extent_inline_start(ei);
122
123         kaddr = kmap_atomic(page, KM_USER0);
124         btrfs_memcpy(root, path->nodes[0]->b_data,
125                      ptr, kaddr + page_offset, size);
126         kunmap_atomic(kaddr, KM_USER0);
127         btrfs_mark_buffer_dirty(path->nodes[0]);
128 fail:
129         btrfs_free_path(path);
130         return err;
131 }
132
133 static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
134                                    struct btrfs_root *root,
135                                    struct file *file,
136                                    struct page **pages,
137                                    size_t num_pages,
138                                    loff_t pos,
139                                    size_t write_bytes)
140 {
141         int err = 0;
142         int i;
143         struct inode *inode = file->f_path.dentry->d_inode;
144         struct extent_map *em;
145         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
146         u64 hint_block;
147         u64 num_blocks;
148         u64 start_pos;
149         u64 end_of_last_block;
150         u64 end_pos = pos + write_bytes;
151         loff_t isize = i_size_read(inode);
152
153         em = alloc_extent_map(GFP_NOFS);
154         if (!em)
155                 return -ENOMEM;
156
157         em->bdev = inode->i_sb->s_bdev;
158
159         start_pos = pos & ~((u64)root->blocksize - 1);
160         num_blocks = (write_bytes + pos - start_pos + root->blocksize - 1) >>
161                         inode->i_blkbits;
162
163         end_of_last_block = start_pos + (num_blocks << inode->i_blkbits) - 1;
164         lock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS);
165         mutex_lock(&root->fs_info->fs_mutex);
166         trans = btrfs_start_transaction(root, 1);
167         if (!trans) {
168                 err = -ENOMEM;
169                 goto out_unlock;
170         }
171         btrfs_set_trans_block_group(trans, inode);
172         inode->i_blocks += num_blocks << 3;
173         hint_block = 0;
174
175         if ((end_of_last_block & 4095) == 0) {
176                 printk("strange end of last %Lu %lu %Lu\n", start_pos, write_bytes, end_of_last_block);
177         }
178         set_extent_uptodate(em_tree, start_pos, end_of_last_block, GFP_NOFS);
179
180         /* FIXME...EIEIO, ENOSPC and more */
181
182         /* insert any holes we need to create */
183         if (inode->i_size < start_pos) {
184                 u64 last_pos_in_file;
185                 u64 hole_size;
186                 u64 mask = root->blocksize - 1;
187                 last_pos_in_file = (isize + mask) & ~mask;
188                 hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
189                 hole_size >>= inode->i_blkbits;
190                 if (last_pos_in_file < start_pos) {
191                         err = btrfs_insert_file_extent(trans, root,
192                                                        inode->i_ino,
193                                                        last_pos_in_file,
194                                                        0, 0, hole_size);
195                 }
196                 if (err)
197                         goto failed;
198         }
199
200         /*
201          * either allocate an extent for the new bytes or setup the key
202          * to show we are doing inline data in the extent
203          */
204         if (isize >= PAGE_CACHE_SIZE || pos + write_bytes < inode->i_size ||
205             pos + write_bytes - start_pos > BTRFS_MAX_INLINE_DATA_SIZE(root)) {
206                 u64 last_end;
207                 for (i = 0; i < num_pages; i++) {
208                         struct page *p = pages[i];
209                         SetPageUptodate(p);
210                         set_page_dirty(p);
211                 }
212                 last_end = pages[num_pages -1]->index << PAGE_CACHE_SHIFT;
213                 last_end += PAGE_CACHE_SIZE - 1;
214                 set_extent_delalloc(em_tree, start_pos, end_of_last_block,
215                                  GFP_NOFS);
216         } else {
217                 struct page *p = pages[0];
218                 /* step one, delete the existing extents in this range */
219                 /* FIXME blocksize != pagesize */
220                 if (start_pos < inode->i_size) {
221                         err = btrfs_drop_extents(trans, root, inode, start_pos,
222                                  (pos + write_bytes + root->blocksize -1) &
223                                  ~((u64)root->blocksize - 1), &hint_block);
224                         if (err)
225                                 goto failed;
226                 }
227
228                 err = insert_inline_extent(trans, root, inode, start_pos,
229                                            end_pos - start_pos, p, 0);
230                 BUG_ON(err);
231                 em->start = start_pos;
232                 em->end = end_pos;
233                 em->block_start = EXTENT_MAP_INLINE;
234                 em->block_end = EXTENT_MAP_INLINE;
235                 add_extent_mapping(em_tree, em);
236         }
237         if (end_pos > isize) {
238                 i_size_write(inode, end_pos);
239                 btrfs_update_inode(trans, root, inode);
240         }
241 failed:
242         err = btrfs_end_transaction(trans, root);
243 out_unlock:
244         mutex_unlock(&root->fs_info->fs_mutex);
245         unlock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS);
246         free_extent_map(em);
247         return err;
248 }
249
250 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
251 {
252         struct extent_map *em;
253         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
254
255         while(1) {
256                 em = lookup_extent_mapping(em_tree, start, end);
257                 if (!em)
258                         break;
259                 remove_extent_mapping(em_tree, em);
260                 /* once for us */
261                 free_extent_map(em);
262                 /* once for the tree*/
263                 free_extent_map(em);
264         }
265         return 0;
266 }
267
268 /*
269  * this is very complex, but the basic idea is to drop all extents
270  * in the range start - end.  hint_block is filled in with a block number
271  * that would be a good hint to the block allocator for this file.
272  *
273  * If an extent intersects the range but is not entirely inside the range
274  * it is either truncated or split.  Anything entirely inside the range
275  * is deleted from the tree.
276  */
277 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
278                        struct btrfs_root *root, struct inode *inode,
279                        u64 start, u64 end, u64 *hint_block)
280 {
281         int ret;
282         struct btrfs_key key;
283         struct btrfs_leaf *leaf;
284         int slot;
285         struct btrfs_file_extent_item *extent;
286         u64 extent_end = 0;
287         int keep;
288         struct btrfs_file_extent_item old;
289         struct btrfs_path *path;
290         u64 search_start = start;
291         int bookend;
292         int found_type;
293         int found_extent;
294         int found_inline;
295         int recow;
296
297         btrfs_drop_extent_cache(inode, start, end - 1);
298
299         path = btrfs_alloc_path();
300         if (!path)
301                 return -ENOMEM;
302         while(1) {
303                 recow = 0;
304                 btrfs_release_path(root, path);
305                 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
306                                                search_start, -1);
307                 if (ret < 0)
308                         goto out;
309                 if (ret > 0) {
310                         if (path->slots[0] == 0) {
311                                 ret = 0;
312                                 goto out;
313                         }
314                         path->slots[0]--;
315                 }
316 next_slot:
317                 keep = 0;
318                 bookend = 0;
319                 found_extent = 0;
320                 found_inline = 0;
321                 extent = NULL;
322                 leaf = btrfs_buffer_leaf(path->nodes[0]);
323                 slot = path->slots[0];
324                 ret = 0;
325                 btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
326                 if (key.offset >= end || key.objectid != inode->i_ino) {
327                         goto out;
328                 }
329                 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY) {
330                         goto out;
331                 }
332                 if (recow) {
333                         search_start = key.offset;
334                         continue;
335                 }
336                 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
337                         extent = btrfs_item_ptr(leaf, slot,
338                                                 struct btrfs_file_extent_item);
339                         found_type = btrfs_file_extent_type(extent);
340                         if (found_type == BTRFS_FILE_EXTENT_REG) {
341                                 extent_end = key.offset +
342                                         (btrfs_file_extent_num_blocks(extent) <<
343                                          inode->i_blkbits);
344                                 found_extent = 1;
345                         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
346                                 found_inline = 1;
347                                 extent_end = key.offset +
348                                      btrfs_file_extent_inline_len(leaf->items +
349                                                                   slot);
350                         }
351                 } else {
352                         extent_end = search_start;
353                 }
354
355                 /* we found nothing we can drop */
356                 if ((!found_extent && !found_inline) ||
357                     search_start >= extent_end) {
358                         int nextret;
359                         u32 nritems;
360                         nritems = btrfs_header_nritems(
361                                         btrfs_buffer_header(path->nodes[0]));
362                         if (slot >= nritems - 1) {
363                                 nextret = btrfs_next_leaf(root, path);
364                                 if (nextret)
365                                         goto out;
366                                 recow = 1;
367                         } else {
368                                 path->slots[0]++;
369                         }
370                         goto next_slot;
371                 }
372
373                 /* FIXME, there's only one inline extent allowed right now */
374                 if (found_inline) {
375                         u64 mask = root->blocksize - 1;
376                         search_start = (extent_end + mask) & ~mask;
377                 } else
378                         search_start = extent_end;
379
380                 if (end < extent_end && end >= key.offset) {
381                         if (found_extent) {
382                                 u64 disk_blocknr =
383                                         btrfs_file_extent_disk_blocknr(extent);
384                                 u64 disk_num_blocks =
385                                       btrfs_file_extent_disk_num_blocks(extent);
386                                 memcpy(&old, extent, sizeof(old));
387                                 if (disk_blocknr != 0) {
388                                         ret = btrfs_inc_extent_ref(trans, root,
389                                                  disk_blocknr, disk_num_blocks);
390                                         BUG_ON(ret);
391                                 }
392                         }
393                         WARN_ON(found_inline);
394                         bookend = 1;
395                 }
396                 /* truncate existing extent */
397                 if (start > key.offset) {
398                         u64 new_num;
399                         u64 old_num;
400                         keep = 1;
401                         WARN_ON(start & (root->blocksize - 1));
402                         if (found_extent) {
403                                 new_num = (start - key.offset) >>
404                                         inode->i_blkbits;
405                                 old_num = btrfs_file_extent_num_blocks(extent);
406                                 *hint_block =
407                                         btrfs_file_extent_disk_blocknr(extent);
408                                 if (btrfs_file_extent_disk_blocknr(extent)) {
409                                         inode->i_blocks -=
410                                                 (old_num - new_num) << 3;
411                                 }
412                                 btrfs_set_file_extent_num_blocks(extent,
413                                                                  new_num);
414                                 btrfs_mark_buffer_dirty(path->nodes[0]);
415                         } else {
416                                 WARN_ON(1);
417                         }
418                 }
419                 /* delete the entire extent */
420                 if (!keep) {
421                         u64 disk_blocknr = 0;
422                         u64 disk_num_blocks = 0;
423                         u64 extent_num_blocks = 0;
424                         if (found_extent) {
425                                 disk_blocknr =
426                                       btrfs_file_extent_disk_blocknr(extent);
427                                 disk_num_blocks =
428                                       btrfs_file_extent_disk_num_blocks(extent);
429                                 extent_num_blocks =
430                                       btrfs_file_extent_num_blocks(extent);
431                                 *hint_block =
432                                         btrfs_file_extent_disk_blocknr(extent);
433                         }
434                         ret = btrfs_del_item(trans, root, path);
435                         /* TODO update progress marker and return */
436                         BUG_ON(ret);
437                         btrfs_release_path(root, path);
438                         extent = NULL;
439                         if (found_extent && disk_blocknr != 0) {
440                                 inode->i_blocks -= extent_num_blocks << 3;
441                                 ret = btrfs_free_extent(trans, root,
442                                                         disk_blocknr,
443                                                         disk_num_blocks, 0);
444                         }
445
446                         BUG_ON(ret);
447                         if (!bookend && search_start >= end) {
448                                 ret = 0;
449                                 goto out;
450                         }
451                         if (!bookend)
452                                 continue;
453                 }
454                 /* create bookend, splitting the extent in two */
455                 if (bookend && found_extent) {
456                         struct btrfs_key ins;
457                         ins.objectid = inode->i_ino;
458                         ins.offset = end;
459                         ins.flags = 0;
460                         btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
461                         btrfs_release_path(root, path);
462                         ret = btrfs_insert_empty_item(trans, root, path, &ins,
463                                                       sizeof(*extent));
464
465                         if (ret) {
466                                 btrfs_print_leaf(root, btrfs_buffer_leaf(path->nodes[0]));
467                                 printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu\n", ret , ins.objectid, ins.flags, ins.offset, start, end, key.offset, extent_end);
468                         }
469                         BUG_ON(ret);
470                         extent = btrfs_item_ptr(
471                                     btrfs_buffer_leaf(path->nodes[0]),
472                                     path->slots[0],
473                                     struct btrfs_file_extent_item);
474                         btrfs_set_file_extent_disk_blocknr(extent,
475                                     btrfs_file_extent_disk_blocknr(&old));
476                         btrfs_set_file_extent_disk_num_blocks(extent,
477                                     btrfs_file_extent_disk_num_blocks(&old));
478
479                         btrfs_set_file_extent_offset(extent,
480                                     btrfs_file_extent_offset(&old) +
481                                     ((end - key.offset) >> inode->i_blkbits));
482                         WARN_ON(btrfs_file_extent_num_blocks(&old) <
483                                 (extent_end - end) >> inode->i_blkbits);
484                         btrfs_set_file_extent_num_blocks(extent,
485                                     (extent_end - end) >> inode->i_blkbits);
486
487                         btrfs_set_file_extent_type(extent,
488                                                    BTRFS_FILE_EXTENT_REG);
489                         btrfs_set_file_extent_generation(extent,
490                                     btrfs_file_extent_generation(&old));
491                         btrfs_mark_buffer_dirty(path->nodes[0]);
492                         if (btrfs_file_extent_disk_blocknr(&old) != 0) {
493                                 inode->i_blocks +=
494                                       btrfs_file_extent_num_blocks(extent) << 3;
495                         }
496                         ret = 0;
497                         goto out;
498                 }
499         }
500 out:
501         btrfs_free_path(path);
502         return ret;
503 }
504
505 /*
506  * this gets pages into the page cache and locks them down
507  */
508 static int prepare_pages(struct btrfs_root *root,
509                          struct file *file,
510                          struct page **pages,
511                          size_t num_pages,
512                          loff_t pos,
513                          unsigned long first_index,
514                          unsigned long last_index,
515                          size_t write_bytes)
516 {
517         int i;
518         unsigned long index = pos >> PAGE_CACHE_SHIFT;
519         struct inode *inode = file->f_path.dentry->d_inode;
520         int err = 0;
521         u64 num_blocks;
522         u64 start_pos;
523
524         start_pos = pos & ~((u64)root->blocksize - 1);
525         num_blocks = (write_bytes + pos - start_pos + root->blocksize - 1) >>
526                         inode->i_blkbits;
527
528         memset(pages, 0, num_pages * sizeof(struct page *));
529
530         for (i = 0; i < num_pages; i++) {
531                 pages[i] = grab_cache_page(inode->i_mapping, index + i);
532                 if (!pages[i]) {
533                         err = -ENOMEM;
534                         BUG_ON(1);
535                 }
536                 cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
537                 wait_on_page_writeback(pages[i]);
538                 if (!PagePrivate(pages[i])) {
539                         SetPagePrivate(pages[i]);
540                         set_page_private(pages[i], 1);
541                         WARN_ON(!pages[i]->mapping->a_ops->invalidatepage);
542                         page_cache_get(pages[i]);
543                 }
544                 WARN_ON(!PageLocked(pages[i]));
545         }
546         return 0;
547 }
548
549 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
550                                 size_t count, loff_t *ppos)
551 {
552         loff_t pos;
553         size_t num_written = 0;
554         int err = 0;
555         int ret = 0;
556         struct inode *inode = file->f_path.dentry->d_inode;
557         struct btrfs_root *root = BTRFS_I(inode)->root;
558         struct page **pages = NULL;
559         int nrptrs;
560         struct page *pinned[2];
561         unsigned long first_index;
562         unsigned long last_index;
563
564         nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
565                      PAGE_CACHE_SIZE / (sizeof(struct page *)));
566         pinned[0] = NULL;
567         pinned[1] = NULL;
568         if (file->f_flags & O_DIRECT)
569                 return -EINVAL;
570         pos = *ppos;
571         vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
572         current->backing_dev_info = inode->i_mapping->backing_dev_info;
573         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
574         if (err)
575                 goto out;
576         if (count == 0)
577                 goto out;
578         err = remove_suid(file->f_path.dentry);
579         if (err)
580                 goto out;
581         file_update_time(file);
582
583         pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
584
585         mutex_lock(&inode->i_mutex);
586         first_index = pos >> PAGE_CACHE_SHIFT;
587         last_index = (pos + count) >> PAGE_CACHE_SHIFT;
588
589         /*
590          * there are lots of better ways to do this, but this code
591          * makes sure the first and last page in the file range are
592          * up to date and ready for cow
593          */
594         if ((pos & (PAGE_CACHE_SIZE - 1))) {
595                 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
596                 if (!PageUptodate(pinned[0])) {
597                         ret = btrfs_readpage(NULL, pinned[0]);
598                         BUG_ON(ret);
599                         wait_on_page_locked(pinned[0]);
600                 } else {
601                         unlock_page(pinned[0]);
602                 }
603         }
604         if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
605                 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
606                 if (!PageUptodate(pinned[1])) {
607                         ret = btrfs_readpage(NULL, pinned[1]);
608                         BUG_ON(ret);
609                         wait_on_page_locked(pinned[1]);
610                 } else {
611                         unlock_page(pinned[1]);
612                 }
613         }
614
615         while(count > 0) {
616                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
617                 size_t write_bytes = min(count, nrptrs *
618                                         (size_t)PAGE_CACHE_SIZE -
619                                          offset);
620                 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
621                                         PAGE_CACHE_SHIFT;
622
623                 WARN_ON(num_pages > nrptrs);
624                 memset(pages, 0, sizeof(pages));
625                 ret = prepare_pages(root, file, pages, num_pages,
626                                     pos, first_index, last_index,
627                                     write_bytes);
628                 if (ret)
629                         goto out;
630
631                 ret = btrfs_copy_from_user(pos, num_pages,
632                                            write_bytes, pages, buf);
633                 if (ret) {
634                         btrfs_drop_pages(pages, num_pages);
635                         goto out;
636                 }
637
638                 ret = dirty_and_release_pages(NULL, root, file, pages,
639                                               num_pages, pos, write_bytes);
640                 btrfs_drop_pages(pages, num_pages);
641                 if (ret)
642                         goto out;
643
644                 buf += write_bytes;
645                 count -= write_bytes;
646                 pos += write_bytes;
647                 num_written += write_bytes;
648
649                 balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
650                 btrfs_btree_balance_dirty(root);
651                 cond_resched();
652         }
653         mutex_unlock(&inode->i_mutex);
654 out:
655         kfree(pages);
656         if (pinned[0])
657                 page_cache_release(pinned[0]);
658         if (pinned[1])
659                 page_cache_release(pinned[1]);
660         *ppos = pos;
661         current->backing_dev_info = NULL;
662         return num_written ? num_written : err;
663 }
664
665 static int btrfs_sync_file(struct file *file,
666                            struct dentry *dentry, int datasync)
667 {
668         struct inode *inode = dentry->d_inode;
669         struct btrfs_root *root = BTRFS_I(inode)->root;
670         int ret = 0;
671         struct btrfs_trans_handle *trans;
672
673         /*
674          * check the transaction that last modified this inode
675          * and see if its already been committed
676          */
677         mutex_lock(&root->fs_info->fs_mutex);
678         if (!BTRFS_I(inode)->last_trans)
679                 goto out;
680         mutex_lock(&root->fs_info->trans_mutex);
681         if (BTRFS_I(inode)->last_trans <=
682             root->fs_info->last_trans_committed) {
683                 BTRFS_I(inode)->last_trans = 0;
684                 mutex_unlock(&root->fs_info->trans_mutex);
685                 goto out;
686         }
687         mutex_unlock(&root->fs_info->trans_mutex);
688
689         /*
690          * ok we haven't committed the transaction yet, lets do a commit
691          */
692         trans = btrfs_start_transaction(root, 1);
693         if (!trans) {
694                 ret = -ENOMEM;
695                 goto out;
696         }
697         ret = btrfs_commit_transaction(trans, root);
698 out:
699         mutex_unlock(&root->fs_info->fs_mutex);
700         return ret > 0 ? EIO : ret;
701 }
702
703 static struct vm_operations_struct btrfs_file_vm_ops = {
704 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
705         .nopage         = filemap_nopage,
706         .populate       = filemap_populate,
707 #else
708         .fault          = filemap_fault,
709 #endif
710         .page_mkwrite   = btrfs_page_mkwrite,
711 };
712
713 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
714 {
715         vma->vm_ops = &btrfs_file_vm_ops;
716         file_accessed(filp);
717         return 0;
718 }
719
720 struct file_operations btrfs_file_operations = {
721         .llseek         = generic_file_llseek,
722         .read           = do_sync_read,
723         .aio_read       = generic_file_aio_read,
724         .write          = btrfs_file_write,
725         .mmap           = btrfs_file_mmap,
726         .open           = generic_file_open,
727         .ioctl          = btrfs_ioctl,
728         .fsync          = btrfs_sync_file,
729 #ifdef CONFIG_COMPAT
730         .compat_ioctl   = btrfs_compat_ioctl,
731 #endif
732 };
733