2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/mpage.h>
17 #include <asm/semaphore.h>
26 #include "ops_address.h"
32 * gfs2_get_block - Fills in a buffer head with details about a block
34 * @lblock: The block number to look up
35 * @bh_result: The buffer head to return the result in
36 * @create: Non-zero if we may add block to the file
41 int gfs2_get_block(struct inode *inode, sector_t lblock,
42 struct buffer_head *bh_result, int create)
44 struct gfs2_inode *ip = get_v2ip(inode);
49 error = gfs2_block_map(ip, lblock, &new, &dblock, NULL);
56 map_bh(bh_result, inode->i_sb, dblock);
58 set_buffer_new(bh_result);
64 * get_block_noalloc - Fills in a buffer head with details about a block
66 * @lblock: The block number to look up
67 * @bh_result: The buffer head to return the result in
68 * @create: Non-zero if we may add block to the file
73 static int get_block_noalloc(struct inode *inode, sector_t lblock,
74 struct buffer_head *bh_result, int create)
76 struct gfs2_inode *ip = get_v2ip(inode);
81 error = gfs2_block_map(ip, lblock, &new, &dblock, NULL);
86 map_bh(bh_result, inode->i_sb, dblock);
87 else if (gfs2_assert_withdraw(ip->i_sbd, !create))
93 static int get_blocks(struct inode *inode, sector_t lblock,
94 unsigned long max_blocks, struct buffer_head *bh_result,
97 struct gfs2_inode *ip = get_v2ip(inode);
103 error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
110 map_bh(bh_result, inode->i_sb, dblock);
112 set_buffer_new(bh_result);
114 if (extlen > max_blocks)
116 bh_result->b_size = extlen << inode->i_blkbits;
121 static int get_blocks_noalloc(struct inode *inode, sector_t lblock,
122 unsigned long max_blocks,
123 struct buffer_head *bh_result, int create)
125 struct gfs2_inode *ip = get_v2ip(inode);
131 error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
136 map_bh(bh_result, inode->i_sb, dblock);
137 if (extlen > max_blocks)
139 bh_result->b_size = extlen << inode->i_blkbits;
140 } else if (gfs2_assert_withdraw(ip->i_sbd, !create))
147 * gfs2_writepage - Write complete page
148 * @page: Page to write
152 * Use Linux VFS block_write_full_page() to write one page,
153 * using GFS2's get_block_noalloc to find which blocks to write.
156 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
158 struct gfs2_inode *ip = get_v2ip(page->mapping->host);
159 struct gfs2_sbd *sdp = ip->i_sbd;
162 atomic_inc(&sdp->sd_ops_address);
164 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
168 if (get_transaction) {
169 redirty_page_for_writepage(wbc, page);
174 error = block_write_full_page(page, get_block_noalloc, wbc);
176 gfs2_meta_cache_flush(ip);
182 * stuffed_readpage - Fill in a Linux page with stuffed file data
189 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
191 struct buffer_head *dibh;
195 error = gfs2_meta_inode_buffer(ip, &dibh);
200 memcpy((char *)kaddr,
201 dibh->b_data + sizeof(struct gfs2_dinode),
203 memset((char *)kaddr + ip->i_di.di_size,
205 PAGE_CACHE_SIZE - ip->i_di.di_size);
210 SetPageUptodate(page);
215 static int zero_readpage(struct page *page)
220 memset(kaddr, 0, PAGE_CACHE_SIZE);
223 SetPageUptodate(page);
230 * jdata_readpage - readpage that goes through gfs2_jdata_read_mem()
232 * @page: The page to read
237 static int jdata_readpage(struct gfs2_inode *ip, struct page *page)
244 ret = gfs2_jdata_read_mem(ip, kaddr,
245 (uint64_t)page->index << PAGE_CACHE_SHIFT,
248 if (ret < PAGE_CACHE_SIZE)
249 memset(kaddr + ret, 0, PAGE_CACHE_SIZE - ret);
250 SetPageUptodate(page);
262 * gfs2_readpage - readpage with locking
263 * @file: The file to read a page for
264 * @page: The page to read
269 static int gfs2_readpage(struct file *file, struct page *page)
271 struct gfs2_inode *ip = get_v2ip(page->mapping->host);
272 struct gfs2_sbd *sdp = ip->i_sbd;
275 atomic_inc(&sdp->sd_ops_address);
277 if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl))) {
282 if (!gfs2_is_jdata(ip)) {
283 if (gfs2_is_stuffed(ip)) {
285 error = stuffed_readpage(ip, page);
288 error = zero_readpage(page);
290 error = mpage_readpage(page, gfs2_get_block);
292 error = jdata_readpage(ip, page);
294 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
301 * gfs2_prepare_write - Prepare to write a page to a file
302 * @file: The file to write to
303 * @page: The page which is to be prepared for writing
304 * @from: From (byte range within page)
305 * @to: To (byte range within page)
310 static int gfs2_prepare_write(struct file *file, struct page *page,
311 unsigned from, unsigned to)
313 struct gfs2_inode *ip = get_v2ip(page->mapping->host);
314 struct gfs2_sbd *sdp = ip->i_sbd;
317 atomic_inc(&sdp->sd_ops_address);
319 if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
322 if (gfs2_is_stuffed(ip)) {
324 file_size = ((uint64_t)page->index << PAGE_CACHE_SHIFT) + to;
326 if (file_size > sdp->sd_sb.sb_bsize -
327 sizeof(struct gfs2_dinode)) {
328 error = gfs2_unstuff_dinode(ip, gfs2_unstuffer_page,
331 error = block_prepare_write(page, from, to,
333 } else if (!PageUptodate(page))
334 error = stuffed_readpage(ip, page);
336 error = block_prepare_write(page, from, to, gfs2_get_block);
342 * gfs2_commit_write - Commit write to a file
343 * @file: The file to write to
344 * @page: The page containing the data
345 * @from: From (byte range within page)
346 * @to: To (byte range within page)
351 static int gfs2_commit_write(struct file *file, struct page *page,
352 unsigned from, unsigned to)
354 struct inode *inode = page->mapping->host;
355 struct gfs2_inode *ip = get_v2ip(inode);
356 struct gfs2_sbd *sdp = ip->i_sbd;
359 atomic_inc(&sdp->sd_ops_address);
361 if (gfs2_is_stuffed(ip)) {
362 struct buffer_head *dibh;
366 file_size = ((uint64_t)page->index << PAGE_CACHE_SHIFT) + to;
368 error = gfs2_meta_inode_buffer(ip, &dibh);
372 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
375 memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
376 (char *)kaddr + from,
382 SetPageUptodate(page);
384 if (inode->i_size < file_size)
385 i_size_write(inode, file_size);
387 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED)
388 gfs2_page_add_databufs(ip, page, from, to);
389 error = generic_commit_write(file, page, from, to);
397 ClearPageUptodate(page);
403 * gfs2_bmap - Block map function
404 * @mapping: Address space info
405 * @lblock: The block to map
407 * Returns: The disk address for the block or 0 on hole or error
410 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
412 struct gfs2_inode *ip = get_v2ip(mapping->host);
413 struct gfs2_holder i_gh;
417 atomic_inc(&ip->i_sbd->sd_ops_address);
419 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
423 if (!gfs2_is_stuffed(ip))
424 dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
426 gfs2_glock_dq_uninit(&i_gh);
431 static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
433 struct gfs2_bufdata *bd;
440 gfs2_log_unlock(sdp);
443 gfs2_log_unlock(sdp);
446 clear_buffer_dirty(bh);
448 clear_buffer_mapped(bh);
449 clear_buffer_req(bh);
450 clear_buffer_new(bh);
451 clear_buffer_delay(bh);
455 static int gfs2_invalidatepage(struct page *page, unsigned long offset)
457 struct gfs2_sbd *sdp = get_v2sdp(page->mapping->host->i_sb);
458 struct buffer_head *head, *bh, *next;
459 unsigned int curr_off = 0;
462 BUG_ON(!PageLocked(page));
463 if (!page_has_buffers(page))
466 bh = head = page_buffers(page);
468 unsigned int next_off = curr_off + bh->b_size;
469 next = bh->b_this_page;
471 if (offset <= curr_off)
472 discard_buffer(sdp, bh);
476 } while (bh != head);
479 ret = try_to_release_page(page, 0);
484 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
485 loff_t offset, unsigned long nr_segs)
487 struct file *file = iocb->ki_filp;
488 struct inode *inode = file->f_mapping->host;
489 struct gfs2_inode *ip = get_v2ip(inode);
490 struct gfs2_sbd *sdp = ip->i_sbd;
491 get_blocks_t *gb = get_blocks;
493 atomic_inc(&sdp->sd_ops_address);
495 if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)) ||
496 gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
499 if (rw == WRITE && !get_transaction)
500 gb = get_blocks_noalloc;
502 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
503 offset, nr_segs, gb, NULL);
506 struct address_space_operations gfs2_file_aops = {
507 .writepage = gfs2_writepage,
508 .readpage = gfs2_readpage,
509 .sync_page = block_sync_page,
510 .prepare_write = gfs2_prepare_write,
511 .commit_write = gfs2_commit_write,
513 .invalidatepage = gfs2_invalidatepage,
514 .direct_IO = gfs2_direct_IO,