]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/gfs2/ops_address.c
[GFS2] gfs2_adjust_quota has broken unstuffing code
[linux-2.6-omap-h63xx.git] / fs / gfs2 / ops_address.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
18 #include <linux/fs.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/lm_interface.h>
23 #include <linux/backing-dev.h>
24
25 #include "gfs2.h"
26 #include "incore.h"
27 #include "bmap.h"
28 #include "glock.h"
29 #include "inode.h"
30 #include "log.h"
31 #include "meta_io.h"
32 #include "ops_address.h"
33 #include "quota.h"
34 #include "trans.h"
35 #include "rgrp.h"
36 #include "super.h"
37 #include "util.h"
38 #include "glops.h"
39
40
41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
42                                    unsigned int from, unsigned int to)
43 {
44         struct buffer_head *head = page_buffers(page);
45         unsigned int bsize = head->b_size;
46         struct buffer_head *bh;
47         unsigned int start, end;
48
49         for (bh = head, start = 0; bh != head || !start;
50              bh = bh->b_this_page, start = end) {
51                 end = start + bsize;
52                 if (end <= from || start >= to)
53                         continue;
54                 if (gfs2_is_jdata(ip))
55                         set_buffer_uptodate(bh);
56                 gfs2_trans_add_bh(ip->i_gl, bh, 0);
57         }
58 }
59
60 /**
61  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
62  * @inode: The inode
63  * @lblock: The block number to look up
64  * @bh_result: The buffer head to return the result in
65  * @create: Non-zero if we may add block to the file
66  *
67  * Returns: errno
68  */
69
70 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
71                                   struct buffer_head *bh_result, int create)
72 {
73         int error;
74
75         error = gfs2_block_map(inode, lblock, bh_result, 0);
76         if (error)
77                 return error;
78         if (!buffer_mapped(bh_result))
79                 return -EIO;
80         return 0;
81 }
82
83 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
84                                  struct buffer_head *bh_result, int create)
85 {
86         return gfs2_block_map(inode, lblock, bh_result, 0);
87 }
88
89 /**
90  * gfs2_writepage_common - Common bits of writepage
91  * @page: The page to be written
92  * @wbc: The writeback control
93  *
94  * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
95  */
96
97 static int gfs2_writepage_common(struct page *page,
98                                  struct writeback_control *wbc)
99 {
100         struct inode *inode = page->mapping->host;
101         struct gfs2_inode *ip = GFS2_I(inode);
102         struct gfs2_sbd *sdp = GFS2_SB(inode);
103         loff_t i_size = i_size_read(inode);
104         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
105         unsigned offset;
106
107         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
108                 goto out;
109         if (current->journal_info)
110                 goto redirty;
111         /* Is the page fully outside i_size? (truncate in progress) */
112         offset = i_size & (PAGE_CACHE_SIZE-1);
113         if (page->index > end_index || (page->index == end_index && !offset)) {
114                 page->mapping->a_ops->invalidatepage(page, 0);
115                 goto out;
116         }
117         return 1;
118 redirty:
119         redirty_page_for_writepage(wbc, page);
120 out:
121         unlock_page(page);
122         return 0;
123 }
124
125 /**
126  * gfs2_writeback_writepage - Write page for writeback mappings
127  * @page: The page
128  * @wbc: The writeback control
129  *
130  */
131
132 static int gfs2_writeback_writepage(struct page *page,
133                                     struct writeback_control *wbc)
134 {
135         int ret;
136
137         ret = gfs2_writepage_common(page, wbc);
138         if (ret <= 0)
139                 return ret;
140
141         ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
142         if (ret == -EAGAIN)
143                 ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
144         return ret;
145 }
146
147 /**
148  * gfs2_ordered_writepage - Write page for ordered data files
149  * @page: The page to write
150  * @wbc: The writeback control
151  *
152  */
153
154 static int gfs2_ordered_writepage(struct page *page,
155                                   struct writeback_control *wbc)
156 {
157         struct inode *inode = page->mapping->host;
158         struct gfs2_inode *ip = GFS2_I(inode);
159         int ret;
160
161         ret = gfs2_writepage_common(page, wbc);
162         if (ret <= 0)
163                 return ret;
164
165         if (!page_has_buffers(page)) {
166                 create_empty_buffers(page, inode->i_sb->s_blocksize,
167                                      (1 << BH_Dirty)|(1 << BH_Uptodate));
168         }
169         gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
170         return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
171 }
172
173 /**
174  * __gfs2_jdata_writepage - The core of jdata writepage
175  * @page: The page to write
176  * @wbc: The writeback control
177  *
178  * This is shared between writepage and writepages and implements the
179  * core of the writepage operation. If a transaction is required then
180  * PageChecked will have been set and the transaction will have
181  * already been started before this is called.
182  */
183
184 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
185 {
186         struct inode *inode = page->mapping->host;
187         struct gfs2_inode *ip = GFS2_I(inode);
188         struct gfs2_sbd *sdp = GFS2_SB(inode);
189
190         if (PageChecked(page)) {
191                 ClearPageChecked(page);
192                 if (!page_has_buffers(page)) {
193                         create_empty_buffers(page, inode->i_sb->s_blocksize,
194                                              (1 << BH_Dirty)|(1 << BH_Uptodate));
195                 }
196                 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
197         }
198         return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
199 }
200
201 /**
202  * gfs2_jdata_writepage - Write complete page
203  * @page: Page to write
204  *
205  * Returns: errno
206  *
207  */
208
209 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
210 {
211         struct inode *inode = page->mapping->host;
212         struct gfs2_sbd *sdp = GFS2_SB(inode);
213         int error;
214         int done_trans = 0;
215
216         error = gfs2_writepage_common(page, wbc);
217         if (error <= 0)
218                 return error;
219
220         if (PageChecked(page)) {
221                 if (wbc->sync_mode != WB_SYNC_ALL)
222                         goto out_ignore;
223                 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
224                 if (error)
225                         goto out_ignore;
226                 done_trans = 1;
227         }
228         error = __gfs2_jdata_writepage(page, wbc);
229         if (done_trans)
230                 gfs2_trans_end(sdp);
231         return error;
232
233 out_ignore:
234         redirty_page_for_writepage(wbc, page);
235         unlock_page(page);
236         return 0;
237 }
238
239 /**
240  * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
241  * @mapping: The mapping to write
242  * @wbc: Write-back control
243  *
244  * For the data=writeback case we can already ignore buffer heads
245  * and write whole extents at once. This is a big reduction in the
246  * number of I/O requests we send and the bmap calls we make in this case.
247  */
248 static int gfs2_writeback_writepages(struct address_space *mapping,
249                                      struct writeback_control *wbc)
250 {
251         return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
252 }
253
254 /**
255  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
256  * @mapping: The mapping
257  * @wbc: The writeback control
258  * @writepage: The writepage function to call for each page
259  * @pvec: The vector of pages
260  * @nr_pages: The number of pages to write
261  *
262  * Returns: non-zero if loop should terminate, zero otherwise
263  */
264
265 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
266                                     struct writeback_control *wbc,
267                                     struct pagevec *pvec,
268                                     int nr_pages, pgoff_t end)
269 {
270         struct inode *inode = mapping->host;
271         struct gfs2_sbd *sdp = GFS2_SB(inode);
272         loff_t i_size = i_size_read(inode);
273         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
274         unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
275         unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
276         struct backing_dev_info *bdi = mapping->backing_dev_info;
277         int i;
278         int ret;
279
280         ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
281         if (ret < 0)
282                 return ret;
283
284         for(i = 0; i < nr_pages; i++) {
285                 struct page *page = pvec->pages[i];
286
287                 lock_page(page);
288
289                 if (unlikely(page->mapping != mapping)) {
290                         unlock_page(page);
291                         continue;
292                 }
293
294                 if (!wbc->range_cyclic && page->index > end) {
295                         ret = 1;
296                         unlock_page(page);
297                         continue;
298                 }
299
300                 if (wbc->sync_mode != WB_SYNC_NONE)
301                         wait_on_page_writeback(page);
302
303                 if (PageWriteback(page) ||
304                     !clear_page_dirty_for_io(page)) {
305                         unlock_page(page);
306                         continue;
307                 }
308
309                 /* Is the page fully outside i_size? (truncate in progress) */
310                 if (page->index > end_index || (page->index == end_index && !offset)) {
311                         page->mapping->a_ops->invalidatepage(page, 0);
312                         unlock_page(page);
313                         continue;
314                 }
315
316                 ret = __gfs2_jdata_writepage(page, wbc);
317
318                 if (ret || (--(wbc->nr_to_write) <= 0))
319                         ret = 1;
320                 if (wbc->nonblocking && bdi_write_congested(bdi)) {
321                         wbc->encountered_congestion = 1;
322                         ret = 1;
323                 }
324
325         }
326         gfs2_trans_end(sdp);
327         return ret;
328 }
329
330 /**
331  * gfs2_write_cache_jdata - Like write_cache_pages but different
332  * @mapping: The mapping to write
333  * @wbc: The writeback control
334  * @writepage: The writepage function to call
335  * @data: The data to pass to writepage
336  *
337  * The reason that we use our own function here is that we need to
338  * start transactions before we grab page locks. This allows us
339  * to get the ordering right.
340  */
341
342 static int gfs2_write_cache_jdata(struct address_space *mapping,
343                                   struct writeback_control *wbc)
344 {
345         struct backing_dev_info *bdi = mapping->backing_dev_info;
346         int ret = 0;
347         int done = 0;
348         struct pagevec pvec;
349         int nr_pages;
350         pgoff_t index;
351         pgoff_t end;
352         int scanned = 0;
353         int range_whole = 0;
354
355         if (wbc->nonblocking && bdi_write_congested(bdi)) {
356                 wbc->encountered_congestion = 1;
357                 return 0;
358         }
359
360         pagevec_init(&pvec, 0);
361         if (wbc->range_cyclic) {
362                 index = mapping->writeback_index; /* Start from prev offset */
363                 end = -1;
364         } else {
365                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
366                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
367                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
368                         range_whole = 1;
369                 scanned = 1;
370         }
371
372 retry:
373          while (!done && (index <= end) &&
374                 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
375                                                PAGECACHE_TAG_DIRTY,
376                                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
377                 scanned = 1;
378                 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
379                 if (ret)
380                         done = 1;
381                 if (ret > 0)
382                         ret = 0;
383
384                 pagevec_release(&pvec);
385                 cond_resched();
386         }
387
388         if (!scanned && !done) {
389                 /*
390                  * We hit the last page and there is more work to be done: wrap
391                  * back to the start of the file
392                  */
393                 scanned = 1;
394                 index = 0;
395                 goto retry;
396         }
397
398         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
399                 mapping->writeback_index = index;
400         return ret;
401 }
402
403
404 /**
405  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
406  * @mapping: The mapping to write
407  * @wbc: The writeback control
408  * 
409  */
410
411 static int gfs2_jdata_writepages(struct address_space *mapping,
412                                  struct writeback_control *wbc)
413 {
414         struct gfs2_inode *ip = GFS2_I(mapping->host);
415         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
416         int ret;
417
418         ret = gfs2_write_cache_jdata(mapping, wbc);
419         if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
420                 gfs2_log_flush(sdp, ip->i_gl);
421                 ret = gfs2_write_cache_jdata(mapping, wbc);
422         }
423         return ret;
424 }
425
426 /**
427  * stuffed_readpage - Fill in a Linux page with stuffed file data
428  * @ip: the inode
429  * @page: the page
430  *
431  * Returns: errno
432  */
433
434 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
435 {
436         struct buffer_head *dibh;
437         void *kaddr;
438         int error;
439
440         /*
441          * Due to the order of unstuffing files and ->nopage(), we can be
442          * asked for a zero page in the case of a stuffed file being extended,
443          * so we need to supply one here. It doesn't happen often.
444          */
445         if (unlikely(page->index)) {
446                 zero_user(page, 0, PAGE_CACHE_SIZE);
447                 return 0;
448         }
449
450         error = gfs2_meta_inode_buffer(ip, &dibh);
451         if (error)
452                 return error;
453
454         kaddr = kmap_atomic(page, KM_USER0);
455         memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
456                ip->i_di.di_size);
457         memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
458         kunmap_atomic(kaddr, KM_USER0);
459         flush_dcache_page(page);
460         brelse(dibh);
461         SetPageUptodate(page);
462
463         return 0;
464 }
465
466
467 /**
468  * __gfs2_readpage - readpage
469  * @file: The file to read a page for
470  * @page: The page to read
471  *
472  * This is the core of gfs2's readpage. Its used by the internal file
473  * reading code as in that case we already hold the glock. Also its
474  * called by gfs2_readpage() once the required lock has been granted.
475  *
476  */
477
478 static int __gfs2_readpage(void *file, struct page *page)
479 {
480         struct gfs2_inode *ip = GFS2_I(page->mapping->host);
481         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
482         int error;
483
484         if (gfs2_is_stuffed(ip)) {
485                 error = stuffed_readpage(ip, page);
486                 unlock_page(page);
487         } else {
488                 error = mpage_readpage(page, gfs2_block_map);
489         }
490
491         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
492                 return -EIO;
493
494         return error;
495 }
496
497 /**
498  * gfs2_readpage - read a page of a file
499  * @file: The file to read
500  * @page: The page of the file
501  *
502  * This deals with the locking required. We use a trylock in order to
503  * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE
504  * in the event that we are unable to get the lock.
505  */
506
507 static int gfs2_readpage(struct file *file, struct page *page)
508 {
509         struct gfs2_inode *ip = GFS2_I(page->mapping->host);
510         struct gfs2_holder *gh;
511         int error;
512
513         gh = gfs2_glock_is_locked_by_me(ip->i_gl);
514         if (!gh) {
515                 gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS);
516                 if (!gh)
517                         return -ENOBUFS;
518                 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh);
519                 unlock_page(page);
520                 error = gfs2_glock_nq_atime(gh);
521                 if (likely(error != 0))
522                         goto out;
523                 return AOP_TRUNCATED_PAGE;
524         }
525         error = __gfs2_readpage(file, page);
526         gfs2_glock_dq(gh);
527 out:
528         gfs2_holder_uninit(gh);
529         kfree(gh);
530         return error;
531 }
532
533 /**
534  * gfs2_internal_read - read an internal file
535  * @ip: The gfs2 inode
536  * @ra_state: The readahead state (or NULL for no readahead)
537  * @buf: The buffer to fill
538  * @pos: The file position
539  * @size: The amount to read
540  *
541  */
542
543 int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
544                        char *buf, loff_t *pos, unsigned size)
545 {
546         struct address_space *mapping = ip->i_inode.i_mapping;
547         unsigned long index = *pos / PAGE_CACHE_SIZE;
548         unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
549         unsigned copied = 0;
550         unsigned amt;
551         struct page *page;
552         void *p;
553
554         do {
555                 amt = size - copied;
556                 if (offset + size > PAGE_CACHE_SIZE)
557                         amt = PAGE_CACHE_SIZE - offset;
558                 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
559                 if (IS_ERR(page))
560                         return PTR_ERR(page);
561                 p = kmap_atomic(page, KM_USER0);
562                 memcpy(buf + copied, p + offset, amt);
563                 kunmap_atomic(p, KM_USER0);
564                 mark_page_accessed(page);
565                 page_cache_release(page);
566                 copied += amt;
567                 index++;
568                 offset = 0;
569         } while(copied < size);
570         (*pos) += size;
571         return size;
572 }
573
574 /**
575  * gfs2_readpages - Read a bunch of pages at once
576  *
577  * Some notes:
578  * 1. This is only for readahead, so we can simply ignore any things
579  *    which are slightly inconvenient (such as locking conflicts between
580  *    the page lock and the glock) and return having done no I/O. Its
581  *    obviously not something we'd want to do on too regular a basis.
582  *    Any I/O we ignore at this time will be done via readpage later.
583  * 2. We don't handle stuffed files here we let readpage do the honours.
584  * 3. mpage_readpages() does most of the heavy lifting in the common case.
585  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
586  */
587
588 static int gfs2_readpages(struct file *file, struct address_space *mapping,
589                           struct list_head *pages, unsigned nr_pages)
590 {
591         struct inode *inode = mapping->host;
592         struct gfs2_inode *ip = GFS2_I(inode);
593         struct gfs2_sbd *sdp = GFS2_SB(inode);
594         struct gfs2_holder gh;
595         int ret;
596
597         gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
598         ret = gfs2_glock_nq_atime(&gh);
599         if (unlikely(ret))
600                 goto out_uninit;
601         if (!gfs2_is_stuffed(ip))
602                 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
603         gfs2_glock_dq(&gh);
604 out_uninit:
605         gfs2_holder_uninit(&gh);
606         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
607                 ret = -EIO;
608         return ret;
609 }
610
611 /**
612  * gfs2_write_begin - Begin to write to a file
613  * @file: The file to write to
614  * @mapping: The mapping in which to write
615  * @pos: The file offset at which to start writing
616  * @len: Length of the write
617  * @flags: Various flags
618  * @pagep: Pointer to return the page
619  * @fsdata: Pointer to return fs data (unused by GFS2)
620  *
621  * Returns: errno
622  */
623
624 static int gfs2_write_begin(struct file *file, struct address_space *mapping,
625                             loff_t pos, unsigned len, unsigned flags,
626                             struct page **pagep, void **fsdata)
627 {
628         struct gfs2_inode *ip = GFS2_I(mapping->host);
629         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
630         unsigned int data_blocks, ind_blocks, rblocks;
631         int alloc_required;
632         int error = 0;
633         struct gfs2_alloc *al;
634         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
635         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
636         unsigned to = from + len;
637         struct page *page;
638
639         gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh);
640         error = gfs2_glock_nq_atime(&ip->i_gh);
641         if (unlikely(error))
642                 goto out_uninit;
643
644         gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
645         error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
646         if (error)
647                 goto out_unlock;
648
649         if (alloc_required) {
650                 al = gfs2_alloc_get(ip);
651                 if (!al) {
652                         error = -ENOMEM;
653                         goto out_unlock;
654                 }
655
656                 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
657                 if (error)
658                         goto out_alloc_put;
659
660                 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
661                 if (error)
662                         goto out_qunlock;
663
664                 al->al_requested = data_blocks + ind_blocks;
665                 error = gfs2_inplace_reserve(ip);
666                 if (error)
667                         goto out_qunlock;
668         }
669
670         rblocks = RES_DINODE + ind_blocks;
671         if (gfs2_is_jdata(ip))
672                 rblocks += data_blocks ? data_blocks : 1;
673         if (ind_blocks || data_blocks)
674                 rblocks += RES_STATFS + RES_QUOTA;
675
676         error = gfs2_trans_begin(sdp, rblocks,
677                                  PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
678         if (error)
679                 goto out_trans_fail;
680
681         error = -ENOMEM;
682         page = __grab_cache_page(mapping, index);
683         *pagep = page;
684         if (unlikely(!page))
685                 goto out_endtrans;
686
687         if (gfs2_is_stuffed(ip)) {
688                 error = 0;
689                 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
690                         error = gfs2_unstuff_dinode(ip, page);
691                         if (error == 0)
692                                 goto prepare_write;
693                 } else if (!PageUptodate(page)) {
694                         error = stuffed_readpage(ip, page);
695                 }
696                 goto out;
697         }
698
699 prepare_write:
700         error = block_prepare_write(page, from, to, gfs2_block_map);
701 out:
702         if (error == 0)
703                 return 0;
704
705         page_cache_release(page);
706         if (pos + len > ip->i_inode.i_size)
707                 vmtruncate(&ip->i_inode, ip->i_inode.i_size);
708 out_endtrans:
709         gfs2_trans_end(sdp);
710 out_trans_fail:
711         if (alloc_required) {
712                 gfs2_inplace_release(ip);
713 out_qunlock:
714                 gfs2_quota_unlock(ip);
715 out_alloc_put:
716                 gfs2_alloc_put(ip);
717         }
718 out_unlock:
719         gfs2_glock_dq(&ip->i_gh);
720 out_uninit:
721         gfs2_holder_uninit(&ip->i_gh);
722         return error;
723 }
724
725 /**
726  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
727  * @inode: the rindex inode
728  */
729 static void adjust_fs_space(struct inode *inode)
730 {
731         struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
732         struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
733         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
734         u64 fs_total, new_free;
735
736         /* Total up the file system space, according to the latest rindex. */
737         fs_total = gfs2_ri_total(sdp);
738
739         spin_lock(&sdp->sd_statfs_spin);
740         if (fs_total > (m_sc->sc_total + l_sc->sc_total))
741                 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
742         else
743                 new_free = 0;
744         spin_unlock(&sdp->sd_statfs_spin);
745         fs_warn(sdp, "File system extended by %llu blocks.\n",
746                 (unsigned long long)new_free);
747         gfs2_statfs_change(sdp, new_free, new_free, 0);
748 }
749
750 /**
751  * gfs2_stuffed_write_end - Write end for stuffed files
752  * @inode: The inode
753  * @dibh: The buffer_head containing the on-disk inode
754  * @pos: The file position
755  * @len: The length of the write
756  * @copied: How much was actually copied by the VFS
757  * @page: The page
758  *
759  * This copies the data from the page into the inode block after
760  * the inode data structure itself.
761  *
762  * Returns: errno
763  */
764 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
765                                   loff_t pos, unsigned len, unsigned copied,
766                                   struct page *page)
767 {
768         struct gfs2_inode *ip = GFS2_I(inode);
769         struct gfs2_sbd *sdp = GFS2_SB(inode);
770         u64 to = pos + copied;
771         void *kaddr;
772         unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
773         struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
774
775         BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
776         kaddr = kmap_atomic(page, KM_USER0);
777         memcpy(buf + pos, kaddr + pos, copied);
778         memset(kaddr + pos + copied, 0, len - copied);
779         flush_dcache_page(page);
780         kunmap_atomic(kaddr, KM_USER0);
781
782         if (!PageUptodate(page))
783                 SetPageUptodate(page);
784         unlock_page(page);
785         page_cache_release(page);
786
787         if (inode->i_size < to) {
788                 i_size_write(inode, to);
789                 ip->i_di.di_size = inode->i_size;
790                 di->di_size = cpu_to_be64(inode->i_size);
791                 mark_inode_dirty(inode);
792         }
793
794         if (inode == sdp->sd_rindex)
795                 adjust_fs_space(inode);
796
797         brelse(dibh);
798         gfs2_trans_end(sdp);
799         gfs2_glock_dq(&ip->i_gh);
800         gfs2_holder_uninit(&ip->i_gh);
801         return copied;
802 }
803
804 /**
805  * gfs2_write_end
806  * @file: The file to write to
807  * @mapping: The address space to write to
808  * @pos: The file position
809  * @len: The length of the data
810  * @copied:
811  * @page: The page that has been written
812  * @fsdata: The fsdata (unused in GFS2)
813  *
814  * The main write_end function for GFS2. We have a separate one for
815  * stuffed files as they are slightly different, otherwise we just
816  * put our locking around the VFS provided functions.
817  *
818  * Returns: errno
819  */
820
821 static int gfs2_write_end(struct file *file, struct address_space *mapping,
822                           loff_t pos, unsigned len, unsigned copied,
823                           struct page *page, void *fsdata)
824 {
825         struct inode *inode = page->mapping->host;
826         struct gfs2_inode *ip = GFS2_I(inode);
827         struct gfs2_sbd *sdp = GFS2_SB(inode);
828         struct buffer_head *dibh;
829         struct gfs2_alloc *al = ip->i_alloc;
830         struct gfs2_dinode *di;
831         unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
832         unsigned int to = from + len;
833         int ret;
834
835         BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
836
837         ret = gfs2_meta_inode_buffer(ip, &dibh);
838         if (unlikely(ret)) {
839                 unlock_page(page);
840                 page_cache_release(page);
841                 goto failed;
842         }
843
844         gfs2_trans_add_bh(ip->i_gl, dibh, 1);
845
846         if (gfs2_is_stuffed(ip))
847                 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
848
849         if (!gfs2_is_writeback(ip))
850                 gfs2_page_add_databufs(ip, page, from, to);
851
852         ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
853
854         if (likely(ret >= 0) && (inode->i_size > ip->i_di.di_size)) {
855                 di = (struct gfs2_dinode *)dibh->b_data;
856                 ip->i_di.di_size = inode->i_size;
857                 di->di_size = cpu_to_be64(inode->i_size);
858                 mark_inode_dirty(inode);
859         }
860
861         if (inode == sdp->sd_rindex)
862                 adjust_fs_space(inode);
863
864         brelse(dibh);
865         gfs2_trans_end(sdp);
866 failed:
867         if (al) {
868                 gfs2_inplace_release(ip);
869                 gfs2_quota_unlock(ip);
870                 gfs2_alloc_put(ip);
871         }
872         gfs2_glock_dq(&ip->i_gh);
873         gfs2_holder_uninit(&ip->i_gh);
874         return ret;
875 }
876
877 /**
878  * gfs2_set_page_dirty - Page dirtying function
879  * @page: The page to dirty
880  *
881  * Returns: 1 if it dirtyed the page, or 0 otherwise
882  */
883  
884 static int gfs2_set_page_dirty(struct page *page)
885 {
886         SetPageChecked(page);
887         return __set_page_dirty_buffers(page);
888 }
889
890 /**
891  * gfs2_bmap - Block map function
892  * @mapping: Address space info
893  * @lblock: The block to map
894  *
895  * Returns: The disk address for the block or 0 on hole or error
896  */
897
898 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
899 {
900         struct gfs2_inode *ip = GFS2_I(mapping->host);
901         struct gfs2_holder i_gh;
902         sector_t dblock = 0;
903         int error;
904
905         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
906         if (error)
907                 return 0;
908
909         if (!gfs2_is_stuffed(ip))
910                 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
911
912         gfs2_glock_dq_uninit(&i_gh);
913
914         return dblock;
915 }
916
917 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
918 {
919         struct gfs2_bufdata *bd;
920
921         lock_buffer(bh);
922         gfs2_log_lock(sdp);
923         clear_buffer_dirty(bh);
924         bd = bh->b_private;
925         if (bd) {
926                 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
927                         list_del_init(&bd->bd_le.le_list);
928                 else
929                         gfs2_remove_from_journal(bh, current->journal_info, 0);
930         }
931         bh->b_bdev = NULL;
932         clear_buffer_mapped(bh);
933         clear_buffer_req(bh);
934         clear_buffer_new(bh);
935         gfs2_log_unlock(sdp);
936         unlock_buffer(bh);
937 }
938
939 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
940 {
941         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
942         struct buffer_head *bh, *head;
943         unsigned long pos = 0;
944
945         BUG_ON(!PageLocked(page));
946         if (offset == 0)
947                 ClearPageChecked(page);
948         if (!page_has_buffers(page))
949                 goto out;
950
951         bh = head = page_buffers(page);
952         do {
953                 if (offset <= pos)
954                         gfs2_discard(sdp, bh);
955                 pos += bh->b_size;
956                 bh = bh->b_this_page;
957         } while (bh != head);
958 out:
959         if (offset == 0)
960                 try_to_release_page(page, 0);
961 }
962
963 /**
964  * gfs2_ok_for_dio - check that dio is valid on this file
965  * @ip: The inode
966  * @rw: READ or WRITE
967  * @offset: The offset at which we are reading or writing
968  *
969  * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
970  *          1 (to accept the i/o request)
971  */
972 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
973 {
974         /*
975          * Should we return an error here? I can't see that O_DIRECT for
976          * a stuffed file makes any sense. For now we'll silently fall
977          * back to buffered I/O
978          */
979         if (gfs2_is_stuffed(ip))
980                 return 0;
981
982         if (offset > i_size_read(&ip->i_inode))
983                 return 0;
984         return 1;
985 }
986
987
988
989 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
990                               const struct iovec *iov, loff_t offset,
991                               unsigned long nr_segs)
992 {
993         struct file *file = iocb->ki_filp;
994         struct inode *inode = file->f_mapping->host;
995         struct gfs2_inode *ip = GFS2_I(inode);
996         struct gfs2_holder gh;
997         int rv;
998
999         /*
1000          * Deferred lock, even if its a write, since we do no allocation
1001          * on this path. All we need change is atime, and this lock mode
1002          * ensures that other nodes have flushed their buffered read caches
1003          * (i.e. their page cache entries for this inode). We do not,
1004          * unfortunately have the option of only flushing a range like
1005          * the VFS does.
1006          */
1007         gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
1008         rv = gfs2_glock_nq_atime(&gh);
1009         if (rv)
1010                 return rv;
1011         rv = gfs2_ok_for_dio(ip, rw, offset);
1012         if (rv != 1)
1013                 goto out; /* dio not valid, fall back to buffered i/o */
1014
1015         rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
1016                                            iov, offset, nr_segs,
1017                                            gfs2_get_block_direct, NULL);
1018 out:
1019         gfs2_glock_dq_m(1, &gh);
1020         gfs2_holder_uninit(&gh);
1021         return rv;
1022 }
1023
1024 /**
1025  * gfs2_releasepage - free the metadata associated with a page
1026  * @page: the page that's being released
1027  * @gfp_mask: passed from Linux VFS, ignored by us
1028  *
1029  * Call try_to_free_buffers() if the buffers in this page can be
1030  * released.
1031  *
1032  * Returns: 0
1033  */
1034
1035 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1036 {
1037         struct inode *aspace = page->mapping->host;
1038         struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
1039         struct buffer_head *bh, *head;
1040         struct gfs2_bufdata *bd;
1041
1042         if (!page_has_buffers(page))
1043                 return 0;
1044
1045         gfs2_log_lock(sdp);
1046         head = bh = page_buffers(page);
1047         do {
1048                 if (atomic_read(&bh->b_count))
1049                         goto cannot_release;
1050                 bd = bh->b_private;
1051                 if (bd && bd->bd_ail)
1052                         goto cannot_release;
1053                 gfs2_assert_warn(sdp, !buffer_pinned(bh));
1054                 gfs2_assert_warn(sdp, !buffer_dirty(bh));
1055                 bh = bh->b_this_page;
1056         } while(bh != head);
1057         gfs2_log_unlock(sdp);
1058
1059         head = bh = page_buffers(page);
1060         do {
1061                 gfs2_log_lock(sdp);
1062                 bd = bh->b_private;
1063                 if (bd) {
1064                         gfs2_assert_warn(sdp, bd->bd_bh == bh);
1065                         gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
1066                         if (!list_empty(&bd->bd_le.le_list)) {
1067                                 if (!buffer_pinned(bh))
1068                                         list_del_init(&bd->bd_le.le_list);
1069                                 else
1070                                         bd = NULL;
1071                         }
1072                         if (bd)
1073                                 bd->bd_bh = NULL;
1074                         bh->b_private = NULL;
1075                 }
1076                 gfs2_log_unlock(sdp);
1077                 if (bd)
1078                         kmem_cache_free(gfs2_bufdata_cachep, bd);
1079
1080                 bh = bh->b_this_page;
1081         } while (bh != head);
1082
1083         return try_to_free_buffers(page);
1084 cannot_release:
1085         gfs2_log_unlock(sdp);
1086         return 0;
1087 }
1088
1089 static const struct address_space_operations gfs2_writeback_aops = {
1090         .writepage = gfs2_writeback_writepage,
1091         .writepages = gfs2_writeback_writepages,
1092         .readpage = gfs2_readpage,
1093         .readpages = gfs2_readpages,
1094         .sync_page = block_sync_page,
1095         .write_begin = gfs2_write_begin,
1096         .write_end = gfs2_write_end,
1097         .bmap = gfs2_bmap,
1098         .invalidatepage = gfs2_invalidatepage,
1099         .releasepage = gfs2_releasepage,
1100         .direct_IO = gfs2_direct_IO,
1101         .migratepage = buffer_migrate_page,
1102 };
1103
1104 static const struct address_space_operations gfs2_ordered_aops = {
1105         .writepage = gfs2_ordered_writepage,
1106         .readpage = gfs2_readpage,
1107         .readpages = gfs2_readpages,
1108         .sync_page = block_sync_page,
1109         .write_begin = gfs2_write_begin,
1110         .write_end = gfs2_write_end,
1111         .set_page_dirty = gfs2_set_page_dirty,
1112         .bmap = gfs2_bmap,
1113         .invalidatepage = gfs2_invalidatepage,
1114         .releasepage = gfs2_releasepage,
1115         .direct_IO = gfs2_direct_IO,
1116         .migratepage = buffer_migrate_page,
1117 };
1118
1119 static const struct address_space_operations gfs2_jdata_aops = {
1120         .writepage = gfs2_jdata_writepage,
1121         .writepages = gfs2_jdata_writepages,
1122         .readpage = gfs2_readpage,
1123         .readpages = gfs2_readpages,
1124         .sync_page = block_sync_page,
1125         .write_begin = gfs2_write_begin,
1126         .write_end = gfs2_write_end,
1127         .set_page_dirty = gfs2_set_page_dirty,
1128         .bmap = gfs2_bmap,
1129         .invalidatepage = gfs2_invalidatepage,
1130         .releasepage = gfs2_releasepage,
1131 };
1132
1133 void gfs2_set_aops(struct inode *inode)
1134 {
1135         struct gfs2_inode *ip = GFS2_I(inode);
1136
1137         if (gfs2_is_writeback(ip))
1138                 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1139         else if (gfs2_is_ordered(ip))
1140                 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1141         else if (gfs2_is_jdata(ip))
1142                 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1143         else
1144                 BUG();
1145 }
1146