1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * File open, close, extend, truncate
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/capability.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
38 #define MLOG_MASK_PREFIX ML_INODE
39 #include <cluster/masklog.h>
47 #include "extent_map.h"
57 #include "buffer_head_io.h"
59 static int ocfs2_sync_inode(struct inode *inode)
61 filemap_fdatawrite(inode->i_mapping);
62 return sync_mapping_buffers(inode->i_mapping);
65 static int ocfs2_file_open(struct inode *inode, struct file *file)
68 int mode = file->f_flags;
69 struct ocfs2_inode_info *oi = OCFS2_I(inode);
71 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
72 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
74 spin_lock(&oi->ip_lock);
76 /* Check that the inode hasn't been wiped from disk by another
77 * node. If it hasn't then we're safe as long as we hold the
78 * spin lock until our increment of open count. */
79 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
80 spin_unlock(&oi->ip_lock);
87 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
90 spin_unlock(&oi->ip_lock);
97 static int ocfs2_file_release(struct inode *inode, struct file *file)
99 struct ocfs2_inode_info *oi = OCFS2_I(inode);
101 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
102 file->f_path.dentry->d_name.len,
103 file->f_path.dentry->d_name.name);
105 spin_lock(&oi->ip_lock);
106 if (!--oi->ip_open_count)
107 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
108 spin_unlock(&oi->ip_lock);
115 static int ocfs2_sync_file(struct file *file,
116 struct dentry *dentry,
121 struct inode *inode = dentry->d_inode;
122 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
124 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
125 dentry->d_name.len, dentry->d_name.name);
127 err = ocfs2_sync_inode(dentry->d_inode);
131 journal = osb->journal->j_journal;
132 err = journal_force_commit(journal);
137 return (err < 0) ? -EIO : 0;
140 int ocfs2_should_update_atime(struct inode *inode,
141 struct vfsmount *vfsmnt)
144 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
146 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
149 if ((inode->i_flags & S_NOATIME) ||
150 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
154 * We can be called with no vfsmnt structure - NFSD will
157 * Note that our action here is different than touch_atime() -
158 * if we can't tell whether this is a noatime mount, then we
159 * don't know whether to trust the value of s_atime_quantum.
164 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
165 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
168 if (vfsmnt->mnt_flags & MNT_RELATIME) {
169 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
170 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
177 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
183 int ocfs2_update_inode_atime(struct inode *inode,
184 struct buffer_head *bh)
187 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
192 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
193 if (handle == NULL) {
199 inode->i_atime = CURRENT_TIME;
200 ret = ocfs2_mark_inode_dirty(handle, inode, bh);
204 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
210 static int ocfs2_set_inode_size(handle_t *handle,
212 struct buffer_head *fe_bh,
218 i_size_write(inode, new_i_size);
219 inode->i_blocks = ocfs2_inode_sector_count(inode);
220 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
222 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
233 static int ocfs2_simple_size_update(struct inode *inode,
234 struct buffer_head *di_bh,
238 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
239 handle_t *handle = NULL;
241 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
242 if (handle == NULL) {
248 ret = ocfs2_set_inode_size(handle, inode, di_bh,
253 ocfs2_commit_trans(osb, handle);
258 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
260 struct buffer_head *fe_bh,
265 struct ocfs2_dinode *di;
270 /* TODO: This needs to actually orphan the inode in this
273 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
274 if (IS_ERR(handle)) {
275 status = PTR_ERR(handle);
280 status = ocfs2_journal_access(handle, inode, fe_bh,
281 OCFS2_JOURNAL_ACCESS_WRITE);
288 * Do this before setting i_size.
290 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
291 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
298 i_size_write(inode, new_i_size);
299 inode->i_blocks = ocfs2_align_bytes_to_sectors(new_i_size);
300 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
302 di = (struct ocfs2_dinode *) fe_bh->b_data;
303 di->i_size = cpu_to_le64(new_i_size);
304 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
305 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
307 status = ocfs2_journal_dirty(handle, fe_bh);
312 ocfs2_commit_trans(osb, handle);
319 static int ocfs2_truncate_file(struct inode *inode,
320 struct buffer_head *di_bh,
324 struct ocfs2_dinode *fe = NULL;
325 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
326 struct ocfs2_truncate_context *tc = NULL;
328 mlog_entry("(inode = %llu, new_i_size = %llu\n",
329 (unsigned long long)OCFS2_I(inode)->ip_blkno,
330 (unsigned long long)new_i_size);
332 fe = (struct ocfs2_dinode *) di_bh->b_data;
333 if (!OCFS2_IS_VALID_DINODE(fe)) {
334 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
339 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
340 "Inode %llu, inode i_size = %lld != di "
341 "i_size = %llu, i_flags = 0x%x\n",
342 (unsigned long long)OCFS2_I(inode)->ip_blkno,
344 (unsigned long long)le64_to_cpu(fe->i_size),
345 le32_to_cpu(fe->i_flags));
347 if (new_i_size > le64_to_cpu(fe->i_size)) {
348 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
349 (unsigned long long)le64_to_cpu(fe->i_size),
350 (unsigned long long)new_i_size);
356 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
357 (unsigned long long)le64_to_cpu(fe->i_blkno),
358 (unsigned long long)le64_to_cpu(fe->i_size),
359 (unsigned long long)new_i_size);
361 /* lets handle the simple truncate cases before doing any more
362 * cluster locking. */
363 if (new_i_size == le64_to_cpu(fe->i_size))
366 down_write(&OCFS2_I(inode)->ip_alloc_sem);
368 /* This forces other nodes to sync and drop their pages. Do
369 * this even if we have a truncate without allocation change -
370 * ocfs2 cluster sizes can be much greater than page size, so
371 * we have to truncate them anyway. */
372 status = ocfs2_data_lock(inode, 1);
374 up_write(&OCFS2_I(inode)->ip_alloc_sem);
380 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
381 truncate_inode_pages(inode->i_mapping, new_i_size);
383 /* alright, we're going to need to do a full blown alloc size
384 * change. Orphan the inode so that recovery can complete the
385 * truncate if necessary. This does the task of marking
387 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
390 goto bail_unlock_data;
393 status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
396 goto bail_unlock_data;
399 status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
402 goto bail_unlock_data;
405 /* TODO: orphan dir cleanup here. */
407 ocfs2_data_unlock(inode, 1);
409 up_write(&OCFS2_I(inode)->ip_alloc_sem);
418 * extend allocation only here.
419 * we'll update all the disk stuff, and oip->alloc_size
421 * expect stuff to be locked, a transaction started and enough data /
422 * metadata reservations in the contexts.
424 * Will return -EAGAIN, and a reason if a restart is needed.
425 * If passed in, *reason will always be set, even in error.
427 int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
432 struct buffer_head *fe_bh,
434 struct ocfs2_alloc_context *data_ac,
435 struct ocfs2_alloc_context *meta_ac,
436 enum ocfs2_alloc_restarted *reason_ret)
440 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
441 enum ocfs2_alloc_restarted reason = RESTART_NONE;
442 u32 bit_off, num_bits;
446 BUG_ON(!clusters_to_add);
449 flags = OCFS2_EXT_UNWRITTEN;
451 free_extents = ocfs2_num_free_extents(osb, inode, fe);
452 if (free_extents < 0) {
453 status = free_extents;
458 /* there are two cases which could cause us to EAGAIN in the
459 * we-need-more-metadata case:
460 * 1) we haven't reserved *any*
461 * 2) we are so fragmented, we've needed to add metadata too
463 if (!free_extents && !meta_ac) {
464 mlog(0, "we haven't reserved any metadata!\n");
466 reason = RESTART_META;
468 } else if ((!free_extents)
469 && (ocfs2_alloc_context_bits_left(meta_ac)
470 < ocfs2_extend_meta_needed(fe))) {
471 mlog(0, "filesystem is really fragmented...\n");
473 reason = RESTART_META;
477 status = ocfs2_claim_clusters(osb, handle, data_ac, 1,
478 &bit_off, &num_bits);
480 if (status != -ENOSPC)
485 BUG_ON(num_bits > clusters_to_add);
487 /* reserve our write early -- insert_extent may update the inode */
488 status = ocfs2_journal_access(handle, inode, fe_bh,
489 OCFS2_JOURNAL_ACCESS_WRITE);
495 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
496 mlog(0, "Allocating %u clusters at block %u for inode %llu\n",
497 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
498 status = ocfs2_insert_extent(osb, handle, inode, fe_bh,
499 *logical_offset, block, num_bits,
506 status = ocfs2_journal_dirty(handle, fe_bh);
512 clusters_to_add -= num_bits;
513 *logical_offset += num_bits;
515 if (clusters_to_add) {
516 mlog(0, "need to alloc once more, clusters = %u, wanted = "
517 "%u\n", fe->i_clusters, clusters_to_add);
519 reason = RESTART_TRANS;
525 *reason_ret = reason;
530 * For a given allocation, determine which allocators will need to be
531 * accessed, and lock them, reserving the appropriate number of bits.
533 * Sparse file systems call this from ocfs2_write_begin_nolock()
534 * and ocfs2_allocate_unwritten_extents().
536 * File systems which don't support holes call this from
537 * ocfs2_extend_allocation().
539 int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
540 u32 clusters_to_add, u32 extents_to_split,
541 struct ocfs2_alloc_context **data_ac,
542 struct ocfs2_alloc_context **meta_ac)
544 int ret, num_free_extents;
545 unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
546 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
551 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
552 "clusters_to_add = %u, extents_to_split = %u\n",
553 (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode),
554 le32_to_cpu(di->i_clusters), clusters_to_add, extents_to_split);
556 num_free_extents = ocfs2_num_free_extents(osb, inode, di);
557 if (num_free_extents < 0) {
558 ret = num_free_extents;
564 * Sparse allocation file systems need to be more conservative
565 * with reserving room for expansion - the actual allocation
566 * happens while we've got a journal handle open so re-taking
567 * a cluster lock (because we ran out of room for another
568 * extent) will violate ordering rules.
570 * Most of the time we'll only be seeing this 1 cluster at a time
573 * Always lock for any unwritten extents - we might want to
574 * add blocks during a split.
576 if (!num_free_extents ||
577 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
578 ret = ocfs2_reserve_new_metadata(osb, di, meta_ac);
586 ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
596 ocfs2_free_alloc_context(*meta_ac);
601 * We cannot have an error and a non null *data_ac.
608 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
609 u32 clusters_to_add, int mark_unwritten)
612 int restart_func = 0;
615 struct buffer_head *bh = NULL;
616 struct ocfs2_dinode *fe = NULL;
617 handle_t *handle = NULL;
618 struct ocfs2_alloc_context *data_ac = NULL;
619 struct ocfs2_alloc_context *meta_ac = NULL;
620 enum ocfs2_alloc_restarted why;
621 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
623 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
626 * This function only exists for file systems which don't
629 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
631 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
632 OCFS2_BH_CACHED, inode);
638 fe = (struct ocfs2_dinode *) bh->b_data;
639 if (!OCFS2_IS_VALID_DINODE(fe)) {
640 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
646 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
648 status = ocfs2_lock_allocators(inode, fe, clusters_to_add, 0, &data_ac,
655 credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add);
656 handle = ocfs2_start_trans(osb, credits);
657 if (IS_ERR(handle)) {
658 status = PTR_ERR(handle);
664 restarted_transaction:
665 /* reserve a write to the file entry early on - that we if we
666 * run out of credits in the allocation path, we can still
668 status = ocfs2_journal_access(handle, inode, bh,
669 OCFS2_JOURNAL_ACCESS_WRITE);
675 prev_clusters = OCFS2_I(inode)->ip_clusters;
677 status = ocfs2_do_extend_allocation(osb,
687 if ((status < 0) && (status != -EAGAIN)) {
688 if (status != -ENOSPC)
693 status = ocfs2_journal_dirty(handle, bh);
699 spin_lock(&OCFS2_I(inode)->ip_lock);
700 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
701 spin_unlock(&OCFS2_I(inode)->ip_lock);
703 if (why != RESTART_NONE && clusters_to_add) {
704 if (why == RESTART_META) {
705 mlog(0, "restarting function.\n");
708 BUG_ON(why != RESTART_TRANS);
710 mlog(0, "restarting transaction.\n");
711 /* TODO: This can be more intelligent. */
712 credits = ocfs2_calc_extend_credits(osb->sb,
715 status = ocfs2_extend_trans(handle, credits);
717 /* handle still has to be committed at
723 goto restarted_transaction;
727 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
728 le32_to_cpu(fe->i_clusters),
729 (unsigned long long)le64_to_cpu(fe->i_size));
730 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
731 OCFS2_I(inode)->ip_clusters, i_size_read(inode));
735 ocfs2_commit_trans(osb, handle);
739 ocfs2_free_alloc_context(data_ac);
743 ocfs2_free_alloc_context(meta_ac);
746 if ((!status) && restart_func) {
759 static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
760 u32 clusters_to_add, int mark_unwritten)
765 * The alloc sem blocks peope in read/write from reading our
766 * allocation until we're done changing it. We depend on
767 * i_mutex to block other extend/truncate calls while we're
770 down_write(&OCFS2_I(inode)->ip_alloc_sem);
771 ret = __ocfs2_extend_allocation(inode, logical_start, clusters_to_add,
773 up_write(&OCFS2_I(inode)->ip_alloc_sem);
778 /* Some parts of this taken from generic_cont_expand, which turned out
779 * to be too fragile to do exactly what we need without us having to
780 * worry about recursive locking in ->prepare_write() and
781 * ->commit_write(). */
782 static int ocfs2_write_zero_page(struct inode *inode,
785 struct address_space *mapping = inode->i_mapping;
789 handle_t *handle = NULL;
792 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
793 /* ugh. in prepare/commit_write, if from==to==start of block, we
794 ** skip the prepare. make sure we never send an offset for the start
797 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
800 index = size >> PAGE_CACHE_SHIFT;
802 page = grab_cache_page(mapping, index);
809 ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
815 if (ocfs2_should_order_data(inode)) {
816 handle = ocfs2_start_walk_page_trans(inode, page, offset,
818 if (IS_ERR(handle)) {
819 ret = PTR_ERR(handle);
825 /* must not update i_size! */
826 ret = block_commit_write(page, offset, offset);
833 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
836 page_cache_release(page);
841 static int ocfs2_zero_extend(struct inode *inode,
846 struct super_block *sb = inode->i_sb;
848 start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
849 while (start_off < zero_to_size) {
850 ret = ocfs2_write_zero_page(inode, start_off);
856 start_off += sb->s_blocksize;
859 * Very large extends have the potential to lock up
860 * the cpu for extended periods of time.
870 * A tail_to_skip value > 0 indicates that we're being called from
871 * ocfs2_file_aio_write(). This has the following implications:
873 * - we don't want to update i_size
874 * - di_bh will be NULL, which is fine because it's only used in the
875 * case where we want to update i_size.
876 * - ocfs2_zero_extend() will then only be filling the hole created
877 * between i_size and the start of the write.
879 static int ocfs2_extend_file(struct inode *inode,
880 struct buffer_head *di_bh,
885 u32 clusters_to_add = 0;
887 BUG_ON(!tail_to_skip && !di_bh);
889 /* setattr sometimes calls us like this. */
893 if (i_size_read(inode) == new_i_size)
895 BUG_ON(new_i_size < i_size_read(inode));
897 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
898 BUG_ON(tail_to_skip != 0);
899 goto out_update_size;
902 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size) -
903 OCFS2_I(inode)->ip_clusters;
906 * protect the pages that ocfs2_zero_extend is going to be
907 * pulling into the page cache.. we do this before the
908 * metadata extend so that we don't get into the situation
909 * where we've extended the metadata but can't get the data
912 ret = ocfs2_data_lock(inode, 1);
918 if (clusters_to_add) {
919 ret = ocfs2_extend_allocation(inode,
920 OCFS2_I(inode)->ip_clusters,
929 * Call this even if we don't add any clusters to the tree. We
930 * still need to zero the area between the old i_size and the
933 ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
941 /* We're being called from ocfs2_setattr() which wants
942 * us to update i_size */
943 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
949 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
950 ocfs2_data_unlock(inode, 1);
956 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
958 int status = 0, size_change;
959 struct inode *inode = dentry->d_inode;
960 struct super_block *sb = inode->i_sb;
961 struct ocfs2_super *osb = OCFS2_SB(sb);
962 struct buffer_head *bh = NULL;
963 handle_t *handle = NULL;
965 mlog_entry("(0x%p, '%.*s')\n", dentry,
966 dentry->d_name.len, dentry->d_name.name);
968 if (attr->ia_valid & ATTR_MODE)
969 mlog(0, "mode change: %d\n", attr->ia_mode);
970 if (attr->ia_valid & ATTR_UID)
971 mlog(0, "uid change: %d\n", attr->ia_uid);
972 if (attr->ia_valid & ATTR_GID)
973 mlog(0, "gid change: %d\n", attr->ia_gid);
974 if (attr->ia_valid & ATTR_SIZE)
975 mlog(0, "size change...\n");
976 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
977 mlog(0, "time change...\n");
979 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
980 | ATTR_GID | ATTR_UID | ATTR_MODE)
981 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
982 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
986 status = inode_change_ok(inode, attr);
990 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
992 status = ocfs2_rw_lock(inode, 1);
999 status = ocfs2_meta_lock(inode, &bh, 1);
1001 if (status != -ENOENT)
1003 goto bail_unlock_rw;
1006 if (size_change && attr->ia_size != i_size_read(inode)) {
1007 if (i_size_read(inode) > attr->ia_size)
1008 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1010 status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
1012 if (status != -ENOSPC)
1019 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1020 if (IS_ERR(handle)) {
1021 status = PTR_ERR(handle);
1027 * This will intentionally not wind up calling vmtruncate(),
1028 * since all the work for a size change has been done above.
1029 * Otherwise, we could get into problems with truncate as
1030 * ip_alloc_sem is used there to protect against i_size
1033 status = inode_setattr(inode, attr);
1039 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1044 ocfs2_commit_trans(osb, handle);
1046 ocfs2_meta_unlock(inode, 1);
1049 ocfs2_rw_unlock(inode, 1);
1058 int ocfs2_getattr(struct vfsmount *mnt,
1059 struct dentry *dentry,
1062 struct inode *inode = dentry->d_inode;
1063 struct super_block *sb = dentry->d_inode->i_sb;
1064 struct ocfs2_super *osb = sb->s_fs_info;
1069 err = ocfs2_inode_revalidate(dentry);
1076 generic_fillattr(inode, stat);
1078 /* We set the blksize from the cluster size for performance */
1079 stat->blksize = osb->s_clustersize;
1087 int ocfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
1093 ret = ocfs2_meta_lock(inode, NULL, 0);
1100 ret = generic_permission(inode, mask, NULL);
1102 ocfs2_meta_unlock(inode, 0);
1108 static int ocfs2_write_remove_suid(struct inode *inode)
1111 struct buffer_head *bh = NULL;
1112 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1114 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1115 struct ocfs2_dinode *di;
1117 mlog_entry("(Inode %llu, mode 0%o)\n",
1118 (unsigned long long)oi->ip_blkno, inode->i_mode);
1120 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1121 if (handle == NULL) {
1127 ret = ocfs2_read_block(osb, oi->ip_blkno, &bh, OCFS2_BH_CACHED, inode);
1133 ret = ocfs2_journal_access(handle, inode, bh,
1134 OCFS2_JOURNAL_ACCESS_WRITE);
1140 inode->i_mode &= ~S_ISUID;
1141 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1142 inode->i_mode &= ~S_ISGID;
1144 di = (struct ocfs2_dinode *) bh->b_data;
1145 di->i_mode = cpu_to_le16(inode->i_mode);
1147 ret = ocfs2_journal_dirty(handle, bh);
1153 ocfs2_commit_trans(osb, handle);
1160 * Will look for holes and unwritten extents in the range starting at
1161 * pos for count bytes (inclusive).
1163 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1167 unsigned int extent_flags;
1168 u32 cpos, clusters, extent_len, phys_cpos;
1169 struct super_block *sb = inode->i_sb;
1171 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1172 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1175 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1182 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1187 if (extent_len > clusters)
1188 extent_len = clusters;
1190 clusters -= extent_len;
1198 * Allocate enough extents to cover the region starting at byte offset
1199 * start for len bytes. Existing extents are skipped, any extents
1200 * added are marked as "unwritten".
1202 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1206 u32 cpos, phys_cpos, clusters, alloc_size;
1209 * We consider both start and len to be inclusive.
1211 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1212 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1216 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1224 * Hole or existing extent len can be arbitrary, so
1225 * cap it to our own allocation request.
1227 if (alloc_size > clusters)
1228 alloc_size = clusters;
1232 * We already have an allocation at this
1233 * region so we can safely skip it.
1238 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1247 clusters -= alloc_size;
1255 static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1261 int ret = 0, meta_level = appending;
1262 struct inode *inode = dentry->d_inode;
1264 loff_t newsize, saved_pos;
1267 * We sample i_size under a read level meta lock to see if our write
1268 * is extending the file, if it is we back off and get a write level
1272 ret = ocfs2_meta_lock(inode, NULL, meta_level);
1279 /* Clear suid / sgid if necessary. We do this here
1280 * instead of later in the write path because
1281 * remove_suid() calls ->setattr without any hint that
1282 * we may have already done our cluster locking. Since
1283 * ocfs2_setattr() *must* take cluster locks to
1284 * proceeed, this will lead us to recursively lock the
1285 * inode. There's also the dinode i_size state which
1286 * can be lost via setattr during extending writes (we
1287 * set inode->i_size at the end of a write. */
1288 if (should_remove_suid(dentry)) {
1289 if (meta_level == 0) {
1290 ocfs2_meta_unlock(inode, meta_level);
1295 ret = ocfs2_write_remove_suid(inode);
1302 /* work on a copy of ppos until we're sure that we won't have
1303 * to recalculate it due to relocking. */
1305 saved_pos = i_size_read(inode);
1306 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
1311 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
1312 loff_t end = saved_pos + count;
1315 * Skip the O_DIRECT checks if we don't need
1318 if (!direct_io || !(*direct_io))
1322 * Allowing concurrent direct writes means
1323 * i_size changes wouldn't be synchronized, so
1324 * one node could wind up truncating another
1327 if (end > i_size_read(inode)) {
1333 * We don't fill holes during direct io, so
1334 * check for them here. If any are found, the
1335 * caller will have to retake some cluster
1336 * locks and initiate the io as buffered.
1338 ret = ocfs2_check_range_for_holes(inode, saved_pos,
1349 * The rest of this loop is concerned with legacy file
1350 * systems which don't support sparse files.
1353 newsize = count + saved_pos;
1355 mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
1356 (long long) saved_pos, (long long) newsize,
1357 (long long) i_size_read(inode));
1359 /* No need for a higher level metadata lock if we're
1360 * never going past i_size. */
1361 if (newsize <= i_size_read(inode))
1364 if (meta_level == 0) {
1365 ocfs2_meta_unlock(inode, meta_level);
1370 spin_lock(&OCFS2_I(inode)->ip_lock);
1371 clusters = ocfs2_clusters_for_bytes(inode->i_sb, newsize) -
1372 OCFS2_I(inode)->ip_clusters;
1373 spin_unlock(&OCFS2_I(inode)->ip_lock);
1375 mlog(0, "Writing at EOF, may need more allocation: "
1376 "i_size = %lld, newsize = %lld, need %u clusters\n",
1377 (long long) i_size_read(inode), (long long) newsize,
1380 /* We only want to continue the rest of this loop if
1381 * our extend will actually require more
1386 ret = ocfs2_extend_file(inode, NULL, newsize, count);
1399 ocfs2_meta_unlock(inode, meta_level);
1406 ocfs2_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
1408 const struct iovec *iov = *iovp;
1409 size_t base = *basep;
1412 int copy = min(bytes, iov->iov_len - base);
1416 if (iov->iov_len == base) {
1425 static struct page * ocfs2_get_write_source(char **ret_src_buf,
1426 const struct iovec *cur_iov,
1430 char *buf = cur_iov->iov_base + iov_offset;
1431 struct page *src_page = NULL;
1434 off = (unsigned long)(buf) & ~PAGE_CACHE_MASK;
1436 if (!segment_eq(get_fs(), KERNEL_DS)) {
1438 * Pull in the user page. We want to do this outside
1439 * of the meta data locks in order to preserve locking
1440 * order in case of page fault.
1442 ret = get_user_pages(current, current->mm,
1443 (unsigned long)buf & PAGE_CACHE_MASK, 1,
1444 0, 0, &src_page, NULL);
1446 *ret_src_buf = kmap(src_page) + off;
1448 src_page = ERR_PTR(-EFAULT);
1456 static void ocfs2_put_write_source(struct page *page)
1460 page_cache_release(page);
1464 static ssize_t ocfs2_file_buffered_write(struct file *file, loff_t *ppos,
1465 const struct iovec *iov,
1466 unsigned long nr_segs,
1468 ssize_t o_direct_written)
1471 ssize_t copied, total = 0;
1472 size_t iov_offset = 0, bytes;
1474 const struct iovec *cur_iov = iov;
1475 struct page *user_page, *page;
1480 * handle partial DIO write. Adjust cur_iov if needed.
1482 ocfs2_set_next_iovec(&cur_iov, &iov_offset, o_direct_written);
1487 user_page = ocfs2_get_write_source(&buf, cur_iov, iov_offset);
1488 if (IS_ERR(user_page)) {
1489 ret = PTR_ERR(user_page);
1493 /* Stay within our page boundaries */
1494 bytes = min((PAGE_CACHE_SIZE - ((unsigned long)pos & ~PAGE_CACHE_MASK)),
1495 (PAGE_CACHE_SIZE - ((unsigned long)buf & ~PAGE_CACHE_MASK)));
1496 /* Stay within the vector boundary */
1497 bytes = min_t(size_t, bytes, cur_iov->iov_len - iov_offset);
1498 /* Stay within count */
1499 bytes = min(bytes, count);
1502 ret = ocfs2_write_begin(file, file->f_mapping, pos, bytes, 0,
1509 dst = kmap_atomic(page, KM_USER0);
1510 memcpy(dst + (pos & (PAGE_CACHE_SIZE - 1)), buf, bytes);
1511 kunmap_atomic(dst, KM_USER0);
1512 flush_dcache_page(page);
1513 ocfs2_put_write_source(user_page);
1515 copied = ocfs2_write_end(file, file->f_mapping, pos, bytes,
1516 bytes, page, fsdata);
1524 *ppos = pos + copied;
1527 ocfs2_set_next_iovec(&cur_iov, &iov_offset, copied);
1531 return total ? total : ret;
1534 static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1535 const struct iovec *iov,
1536 unsigned long nr_segs,
1539 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
1540 int can_do_direct, sync = 0;
1541 ssize_t written = 0;
1542 size_t ocount; /* original count */
1543 size_t count; /* after file limit checks */
1544 loff_t *ppos = &iocb->ki_pos;
1545 struct file *file = iocb->ki_filp;
1546 struct inode *inode = file->f_path.dentry->d_inode;
1548 mlog_entry("(0x%p, %u, '%.*s')\n", file,
1549 (unsigned int)nr_segs,
1550 file->f_path.dentry->d_name.len,
1551 file->f_path.dentry->d_name.name);
1553 if (iocb->ki_left == 0)
1556 ret = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1562 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1564 appending = file->f_flags & O_APPEND ? 1 : 0;
1565 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
1567 mutex_lock(&inode->i_mutex);
1570 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
1572 down_read(&inode->i_alloc_sem);
1576 /* concurrent O_DIRECT writes are allowed */
1577 rw_level = !direct_io;
1578 ret = ocfs2_rw_lock(inode, rw_level);
1584 can_do_direct = direct_io;
1585 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
1586 iocb->ki_left, appending,
1594 * We can't complete the direct I/O as requested, fall back to
1597 if (direct_io && !can_do_direct) {
1598 ocfs2_rw_unlock(inode, rw_level);
1599 up_read(&inode->i_alloc_sem);
1609 if (!sync && ((file->f_flags & O_SYNC) || IS_SYNC(inode)))
1613 * XXX: Is it ok to execute these checks a second time?
1615 ret = generic_write_checks(file, ppos, &count, S_ISBLK(inode->i_mode));
1620 * Set pos so that sync_page_range_nolock() below understands
1621 * where to start from. We might've moved it around via the
1622 * calls above. The range we want to actually sync starts from
1628 /* communicate with ocfs2_dio_end_io */
1629 ocfs2_iocb_set_rw_locked(iocb, rw_level);
1632 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
1633 ppos, count, ocount);
1639 written = ocfs2_file_buffered_write(file, ppos, iov, nr_segs,
1643 if (ret != -EFAULT || ret != -ENOSPC)
1650 /* buffered aio wouldn't have proper lock coverage today */
1651 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
1654 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
1655 * function pointer which is called when o_direct io completes so that
1656 * it can unlock our rw lock. (it's the clustered equivalent of
1657 * i_alloc_sem; protects truncate from racing with pending ios).
1658 * Unfortunately there are error cases which call end_io and others
1659 * that don't. so we don't have to unlock the rw_lock if either an
1660 * async dio is going to do it in the future or an end_io after an
1661 * error has already done it.
1663 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
1670 ocfs2_rw_unlock(inode, rw_level);
1674 up_read(&inode->i_alloc_sem);
1676 if (written > 0 && sync) {
1679 err = sync_page_range_nolock(inode, file->f_mapping, pos, count);
1684 mutex_unlock(&inode->i_mutex);
1687 return written ? written : ret;
1690 static int ocfs2_splice_write_actor(struct pipe_inode_info *pipe,
1691 struct pipe_buffer *buf,
1692 struct splice_desc *sd)
1696 struct file *file = sd->u.file;
1697 unsigned int offset;
1698 struct page *page = NULL;
1702 ret = buf->ops->confirm(pipe, buf);
1706 offset = sd->pos & ~PAGE_CACHE_MASK;
1708 if (count + offset > PAGE_CACHE_SIZE)
1709 count = PAGE_CACHE_SIZE - offset;
1711 ret = ocfs2_write_begin(file, file->f_mapping, sd->pos, count, 0,
1718 src = buf->ops->map(pipe, buf, 1);
1719 dst = kmap_atomic(page, KM_USER1);
1720 memcpy(dst + offset, src + buf->offset, count);
1721 kunmap_atomic(page, KM_USER1);
1722 buf->ops->unmap(pipe, buf, src);
1724 copied = ocfs2_write_end(file, file->f_mapping, sd->pos, count, count,
1733 return copied ? copied : ret;
1736 static ssize_t __ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1743 struct address_space *mapping = out->f_mapping;
1744 struct inode *inode = mapping->host;
1745 struct splice_desc sd = {
1752 ret = __splice_from_pipe(pipe, &sd, ocfs2_splice_write_actor);
1756 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
1757 err = generic_osync_inode(inode, mapping,
1758 OSYNC_METADATA|OSYNC_DATA);
1767 static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1774 struct inode *inode = out->f_path.dentry->d_inode;
1776 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
1778 out->f_path.dentry->d_name.len,
1779 out->f_path.dentry->d_name.name);
1781 inode_double_lock(inode, pipe->inode);
1783 ret = ocfs2_rw_lock(inode, 1);
1789 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0,
1796 /* ok, we're done with i_size and alloc work */
1797 ret = __ocfs2_file_splice_write(pipe, out, ppos, len, flags);
1800 ocfs2_rw_unlock(inode, 1);
1802 inode_double_unlock(inode, pipe->inode);
1808 static ssize_t ocfs2_file_splice_read(struct file *in,
1810 struct pipe_inode_info *pipe,
1815 struct inode *inode = in->f_path.dentry->d_inode;
1817 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
1819 in->f_path.dentry->d_name.len,
1820 in->f_path.dentry->d_name.name);
1823 * See the comment in ocfs2_file_aio_read()
1825 ret = ocfs2_meta_lock(inode, NULL, 0);
1830 ocfs2_meta_unlock(inode, 0);
1832 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
1839 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
1840 const struct iovec *iov,
1841 unsigned long nr_segs,
1844 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
1845 struct file *filp = iocb->ki_filp;
1846 struct inode *inode = filp->f_path.dentry->d_inode;
1848 mlog_entry("(0x%p, %u, '%.*s')\n", filp,
1849 (unsigned int)nr_segs,
1850 filp->f_path.dentry->d_name.len,
1851 filp->f_path.dentry->d_name.name);
1860 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
1861 * need locks to protect pending reads from racing with truncate.
1863 if (filp->f_flags & O_DIRECT) {
1864 down_read(&inode->i_alloc_sem);
1867 ret = ocfs2_rw_lock(inode, 0);
1873 /* communicate with ocfs2_dio_end_io */
1874 ocfs2_iocb_set_rw_locked(iocb, rw_level);
1878 * We're fine letting folks race truncates and extending
1879 * writes with read across the cluster, just like they can
1880 * locally. Hence no rw_lock during read.
1882 * Take and drop the meta data lock to update inode fields
1883 * like i_size. This allows the checks down below
1884 * generic_file_aio_read() a chance of actually working.
1886 ret = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
1891 ocfs2_meta_unlock(inode, lock_level);
1893 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
1895 mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
1897 /* buffered aio wouldn't have proper lock coverage today */
1898 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
1900 /* see ocfs2_file_aio_write */
1901 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
1908 up_read(&inode->i_alloc_sem);
1910 ocfs2_rw_unlock(inode, rw_level);
1916 const struct inode_operations ocfs2_file_iops = {
1917 .setattr = ocfs2_setattr,
1918 .getattr = ocfs2_getattr,
1919 .permission = ocfs2_permission,
1922 const struct inode_operations ocfs2_special_file_iops = {
1923 .setattr = ocfs2_setattr,
1924 .getattr = ocfs2_getattr,
1925 .permission = ocfs2_permission,
1928 const struct file_operations ocfs2_fops = {
1929 .read = do_sync_read,
1930 .write = do_sync_write,
1932 .fsync = ocfs2_sync_file,
1933 .release = ocfs2_file_release,
1934 .open = ocfs2_file_open,
1935 .aio_read = ocfs2_file_aio_read,
1936 .aio_write = ocfs2_file_aio_write,
1937 .ioctl = ocfs2_ioctl,
1938 #ifdef CONFIG_COMPAT
1939 .compat_ioctl = ocfs2_compat_ioctl,
1941 .splice_read = ocfs2_file_splice_read,
1942 .splice_write = ocfs2_file_splice_write,
1945 const struct file_operations ocfs2_dops = {
1946 .read = generic_read_dir,
1947 .readdir = ocfs2_readdir,
1948 .fsync = ocfs2_sync_file,
1949 .ioctl = ocfs2_ioctl,
1950 #ifdef CONFIG_COMPAT
1951 .compat_ioctl = ocfs2_compat_ioctl,