2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/posix_acl.h>
16 #include <linux/sort.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/crc32.h>
19 #include <linux/lm_interface.h>
20 #include <linux/security.h>
33 #include "ops_address.h"
35 #include "ops_inode.h"
42 * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode
43 * @ip: The GFS2 inode (with embedded disk inode data)
44 * @inode: The Linux VFS inode
48 void gfs2_inode_attr_in(struct gfs2_inode *ip)
50 struct inode *inode = &ip->i_inode;
51 struct gfs2_dinode_host *di = &ip->i_di;
53 inode->i_ino = ip->i_num.no_addr;
54 i_size_write(inode, di->di_size);
55 inode->i_blocks = di->di_blocks <<
56 (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
59 static int iget_test(struct inode *inode, void *opaque)
61 struct gfs2_inode *ip = GFS2_I(inode);
62 struct gfs2_inum_host *inum = opaque;
64 if (ip && ip->i_num.no_addr == inum->no_addr)
70 static int iget_set(struct inode *inode, void *opaque)
72 struct gfs2_inode *ip = GFS2_I(inode);
73 struct gfs2_inum_host *inum = opaque;
79 struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum)
81 return ilookup5(sb, (unsigned long)inum->no_formal_ino,
85 static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum_host *inum)
87 return iget5_locked(sb, (unsigned long)inum->no_formal_ino,
88 iget_test, iget_set, inum);
92 * gfs2_inode_lookup - Lookup an inode
93 * @sb: The super block
94 * @inum: The inode number
95 * @type: The type of the inode
97 * Returns: A VFS inode, or an error
100 struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned int type)
102 struct inode *inode = gfs2_iget(sb, inum);
103 struct gfs2_inode *ip = GFS2_I(inode);
104 struct gfs2_glock *io_gl;
108 return ERR_PTR(-ENOBUFS);
110 if (inode->i_state & I_NEW) {
111 struct gfs2_sbd *sdp = GFS2_SB(inode);
112 umode_t mode = DT2IF(type);
113 inode->i_private = ip;
114 inode->i_mode = mode;
117 inode->i_op = &gfs2_file_iops;
118 inode->i_fop = &gfs2_file_fops;
119 inode->i_mapping->a_ops = &gfs2_file_aops;
120 } else if (S_ISDIR(mode)) {
121 inode->i_op = &gfs2_dir_iops;
122 inode->i_fop = &gfs2_dir_fops;
123 } else if (S_ISLNK(mode)) {
124 inode->i_op = &gfs2_symlink_iops;
126 inode->i_op = &gfs2_dev_iops;
129 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
132 ip->i_gl->gl_object = ip;
134 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
138 set_bit(GIF_INVALID, &ip->i_flags);
139 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
143 gfs2_glock_put(io_gl);
144 unlock_new_inode(inode);
149 gfs2_glock_put(io_gl);
151 ip->i_gl->gl_object = NULL;
152 gfs2_glock_put(ip->i_gl);
155 return ERR_PTR(error);
158 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
160 struct gfs2_dinode_host *di = &ip->i_di;
161 const struct gfs2_dinode *str = buf;
163 if (ip->i_num.no_addr != be64_to_cpu(str->di_num.no_addr)) {
164 if (gfs2_consist_inode(ip))
165 gfs2_dinode_print(ip);
168 if (ip->i_num.no_formal_ino != be64_to_cpu(str->di_num.no_formal_ino))
171 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
172 ip->i_inode.i_rdev = 0;
173 switch (ip->i_inode.i_mode & S_IFMT) {
176 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
177 be32_to_cpu(str->di_minor));
181 ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
182 ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
184 * We will need to review setting the nlink count here in the
185 * light of the forthcoming ro bind mount work. This is a reminder
188 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
189 di->di_size = be64_to_cpu(str->di_size);
190 di->di_blocks = be64_to_cpu(str->di_blocks);
191 ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
192 ip->i_inode.i_atime.tv_nsec = 0;
193 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
194 ip->i_inode.i_mtime.tv_nsec = 0;
195 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
196 ip->i_inode.i_ctime.tv_nsec = 0;
198 di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
199 di->di_goal_data = be64_to_cpu(str->di_goal_data);
200 di->di_generation = be64_to_cpu(str->di_generation);
202 di->di_flags = be32_to_cpu(str->di_flags);
203 gfs2_set_inode_flags(&ip->i_inode);
204 di->di_height = be16_to_cpu(str->di_height);
206 di->di_depth = be16_to_cpu(str->di_depth);
207 di->di_entries = be32_to_cpu(str->di_entries);
209 di->di_eattr = be64_to_cpu(str->di_eattr);
214 * gfs2_inode_refresh - Refresh the incore copy of the dinode
215 * @ip: The GFS2 inode
220 int gfs2_inode_refresh(struct gfs2_inode *ip)
222 struct buffer_head *dibh;
225 error = gfs2_meta_inode_buffer(ip, &dibh);
229 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
234 error = gfs2_dinode_in(ip, dibh->b_data);
236 clear_bit(GIF_INVALID, &ip->i_flags);
241 int gfs2_dinode_dealloc(struct gfs2_inode *ip)
243 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
244 struct gfs2_alloc *al;
245 struct gfs2_rgrpd *rgd;
248 if (ip->i_di.di_blocks != 1) {
249 if (gfs2_consist_inode(ip))
250 gfs2_dinode_print(ip);
254 al = gfs2_alloc_get(ip);
256 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
260 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
264 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
266 gfs2_consist_inode(ip);
268 goto out_rindex_relse;
271 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
274 goto out_rindex_relse;
276 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
280 gfs2_trans_add_gl(ip->i_gl);
282 gfs2_free_di(rgd, ip);
285 clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
288 gfs2_glock_dq_uninit(&al->al_rgd_gh);
290 gfs2_glock_dq_uninit(&al->al_ri_gh);
292 gfs2_quota_unhold(ip);
299 * gfs2_change_nlink - Change nlink count on inode
300 * @ip: The GFS2 inode
301 * @diff: The change in the nlink count required
306 int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
308 struct gfs2_sbd *sdp = ip->i_inode.i_sb->s_fs_info;
309 struct buffer_head *dibh;
313 BUG_ON(diff != 1 && diff != -1);
314 nlink = ip->i_inode.i_nlink + diff;
316 /* If we are reducing the nlink count, but the new value ends up being
317 bigger than the old one, we must have underflowed. */
318 if (diff < 0 && nlink > ip->i_inode.i_nlink) {
319 if (gfs2_consist_inode(ip))
320 gfs2_dinode_print(ip);
324 error = gfs2_meta_inode_buffer(ip, &dibh);
329 inc_nlink(&ip->i_inode);
331 drop_nlink(&ip->i_inode);
333 ip->i_inode.i_ctime.tv_sec = get_seconds();
335 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
336 gfs2_dinode_out(ip, dibh->b_data);
338 mark_inode_dirty(&ip->i_inode);
340 if (ip->i_inode.i_nlink == 0) {
341 struct gfs2_rgrpd *rgd;
342 struct gfs2_holder ri_gh, rg_gh;
344 error = gfs2_rindex_hold(sdp, &ri_gh);
348 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
351 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
355 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
356 gfs2_glock_dq_uninit(&rg_gh);
358 gfs2_glock_dq_uninit(&ri_gh);
364 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
367 gfs2_str2qstr(&qstr, name);
368 return gfs2_lookupi(dip, &qstr, 1, NULL);
373 * gfs2_lookupi - Look up a filename in a directory and return its inode
374 * @d_gh: An initialized holder for the directory glock
375 * @name: The name of the inode to look for
376 * @is_root: If 1, ignore the caller's permissions
377 * @i_gh: An uninitialized holder for the new inode glock
379 * There will always be a vnode (Linux VFS inode) for the d_gh inode unless
385 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
386 int is_root, struct nameidata *nd)
388 struct super_block *sb = dir->i_sb;
389 struct gfs2_inode *dip = GFS2_I(dir);
390 struct gfs2_holder d_gh;
391 struct gfs2_inum_host inum;
394 struct inode *inode = NULL;
396 if (!name->len || name->len > GFS2_FNAMESIZE)
397 return ERR_PTR(-ENAMETOOLONG);
399 if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
400 (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
401 dir == sb->s_root->d_inode)) {
406 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
408 return ERR_PTR(error);
411 error = permission(dir, MAY_EXEC, NULL);
416 error = gfs2_dir_search(dir, name, &inum, &type);
420 inode = gfs2_inode_lookup(sb, &inum, type);
423 gfs2_glock_dq_uninit(&d_gh);
424 if (error == -ENOENT)
429 static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
431 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
432 struct buffer_head *bh;
433 struct gfs2_inum_range_host ir;
436 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
439 mutex_lock(&sdp->sd_inum_mutex);
441 error = gfs2_meta_inode_buffer(ip, &bh);
443 mutex_unlock(&sdp->sd_inum_mutex);
448 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
451 *formal_ino = ir.ir_start++;
453 gfs2_trans_add_bh(ip->i_gl, bh, 1);
454 gfs2_inum_range_out(&ir,
455 bh->b_data + sizeof(struct gfs2_dinode));
457 mutex_unlock(&sdp->sd_inum_mutex);
464 mutex_unlock(&sdp->sd_inum_mutex);
470 static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
472 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
473 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
474 struct gfs2_holder gh;
475 struct buffer_head *bh;
476 struct gfs2_inum_range_host ir;
479 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
483 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
486 mutex_lock(&sdp->sd_inum_mutex);
488 error = gfs2_meta_inode_buffer(ip, &bh);
492 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
495 struct buffer_head *m_bh;
499 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
503 z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
504 x = y = be64_to_cpu(z);
506 ir.ir_length = GFS2_INUM_QUANTUM;
507 x += GFS2_INUM_QUANTUM;
509 gfs2_consist_inode(m_ip);
511 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
512 *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
517 *formal_ino = ir.ir_start++;
520 gfs2_trans_add_bh(ip->i_gl, bh, 1);
521 gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
526 mutex_unlock(&sdp->sd_inum_mutex);
529 gfs2_glock_dq_uninit(&gh);
533 static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
537 error = pick_formal_ino_1(sdp, inum);
541 error = pick_formal_ino_2(sdp, inum);
547 * create_ok - OK to create a new on-disk inode here?
548 * @dip: Directory in which dinode is to be created
549 * @name: Name of new dinode
555 static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
560 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
564 /* Don't create entries in an unlinked directory */
565 if (!dip->i_inode.i_nlink)
568 error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL);
579 if (dip->i_di.di_entries == (u32)-1)
581 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
587 static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
588 unsigned int *uid, unsigned int *gid)
590 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
591 (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
594 else if (dip->i_inode.i_uid != current->fsuid)
596 *uid = dip->i_inode.i_uid;
598 *uid = current->fsuid;
600 if (dip->i_inode.i_mode & S_ISGID) {
603 *gid = dip->i_inode.i_gid;
605 *gid = current->fsgid;
608 static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum_host *inum,
611 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
616 dip->i_alloc.al_requested = RES_DINODE;
617 error = gfs2_inplace_reserve(dip);
621 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
625 inum->no_addr = gfs2_alloc_di(dip, generation);
630 gfs2_inplace_release(dip);
637 * init_dinode - Fill in a new dinode structure
638 * @dip: the directory this inode is being created in
639 * @gl: The glock covering the new inode
640 * @inum: the inode number
641 * @mode: the file permissions
647 static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
648 const struct gfs2_inum_host *inum, unsigned int mode,
649 unsigned int uid, unsigned int gid,
650 const u64 *generation, dev_t dev)
652 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
653 struct gfs2_dinode *di;
654 struct buffer_head *dibh;
656 dibh = gfs2_meta_new(gl, inum->no_addr);
657 gfs2_trans_add_bh(gl, dibh, 1);
658 gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
659 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
660 di = (struct gfs2_dinode *)dibh->b_data;
662 di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
663 di->di_num.no_addr = cpu_to_be64(inum->no_addr);
664 di->di_mode = cpu_to_be32(mode);
665 di->di_uid = cpu_to_be32(uid);
666 di->di_gid = cpu_to_be32(gid);
669 di->di_blocks = cpu_to_be64(1);
670 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
671 di->di_major = cpu_to_be32(MAJOR(dev));
672 di->di_minor = cpu_to_be32(MINOR(dev));
673 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
674 di->di_generation = cpu_to_be64(*generation);
678 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
679 gfs2_tune_get(sdp, gt_new_files_jdata))
680 di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
681 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
682 gfs2_tune_get(sdp, gt_new_files_directio))
683 di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
684 } else if (S_ISDIR(mode)) {
685 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
686 GFS2_DIF_INHERIT_DIRECTIO);
687 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
688 GFS2_DIF_INHERIT_JDATA);
692 di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
698 memset(&di->__pad4, 0, sizeof(di->__pad4));
700 memset(&di->di_reserved, 0, sizeof(di->di_reserved));
705 static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
706 unsigned int mode, const struct gfs2_inum_host *inum,
707 const u64 *generation, dev_t dev)
709 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
710 unsigned int uid, gid;
713 munge_mode_uid_gid(dip, &mode, &uid, &gid);
716 error = gfs2_quota_lock(dip, uid, gid);
720 error = gfs2_quota_check(dip, uid, gid);
724 error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
728 init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
729 gfs2_quota_change(dip, +1, uid, gid);
733 gfs2_quota_unlock(dip);
739 static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
740 struct gfs2_inode *ip)
742 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
743 struct gfs2_alloc *al;
745 struct buffer_head *dibh;
748 al = gfs2_alloc_get(dip);
750 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
754 error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
755 if (alloc_required < 0)
757 if (alloc_required) {
758 error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
760 goto fail_quota_locks;
762 al->al_requested = sdp->sd_max_dirres;
764 error = gfs2_inplace_reserve(dip);
766 goto fail_quota_locks;
768 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
769 al->al_rgd->rd_ri.ri_length +
771 RES_STATFS + RES_QUOTA, 0);
775 error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
777 goto fail_quota_locks;
780 error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_inode.i_mode));
784 error = gfs2_meta_inode_buffer(ip, &dibh);
787 ip->i_inode.i_nlink = 1;
788 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
789 gfs2_dinode_out(ip, dibh->b_data);
797 if (dip->i_alloc.al_rgd)
798 gfs2_inplace_release(dip);
801 gfs2_quota_unlock(dip);
808 static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
814 struct gfs2_ea_request er;
816 err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
817 &name, &value, &len);
820 if (err == -EOPNOTSUPP)
825 memset(&er, 0, sizeof(struct gfs2_ea_request));
827 er.er_type = GFS2_EATYPE_SECURITY;
830 er.er_name_len = strlen(name);
831 er.er_data_len = len;
833 err = gfs2_ea_set_i(ip, &er);
842 * gfs2_createi - Create a new inode
843 * @ghs: An array of two holders
844 * @name: The name of the new file
845 * @mode: the permissions on the new inode
847 * @ghs[0] is an initialized holder for the directory
848 * @ghs[1] is the holder for the inode lock
850 * If the return value is not NULL, the glocks on both the directory and the new
851 * file are held. A transaction has been started and an inplace reservation
857 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
858 unsigned int mode, dev_t dev)
861 struct gfs2_inode *dip = ghs->gh_gl->gl_object;
862 struct inode *dir = &dip->i_inode;
863 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
864 struct gfs2_inum_host inum;
868 if (!name->len || name->len > GFS2_FNAMESIZE)
869 return ERR_PTR(-ENAMETOOLONG);
871 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
872 error = gfs2_glock_nq(ghs);
876 error = create_ok(dip, name, mode);
880 error = pick_formal_ino(sdp, &inum.no_formal_ino);
884 error = alloc_dinode(dip, &inum, &generation);
888 if (inum.no_addr < dip->i_num.no_addr) {
891 error = gfs2_glock_nq_num(sdp, inum.no_addr,
892 &gfs2_inode_glops, LM_ST_EXCLUSIVE,
895 return ERR_PTR(error);
898 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
899 error = gfs2_glock_nq(ghs);
901 gfs2_glock_dq_uninit(ghs + 1);
902 return ERR_PTR(error);
905 error = create_ok(dip, name, mode);
909 error = gfs2_glock_nq_num(sdp, inum.no_addr,
910 &gfs2_inode_glops, LM_ST_EXCLUSIVE,
916 error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
920 inode = gfs2_inode_lookup(dir->i_sb, &inum, IF2DT(mode));
924 error = gfs2_inode_refresh(GFS2_I(inode));
928 error = gfs2_acl_create(dip, GFS2_I(inode));
932 error = gfs2_security_init(dip, GFS2_I(inode));
936 error = link_dinode(dip, name, GFS2_I(inode));
941 return ERR_PTR(-ENOMEM);
947 gfs2_glock_dq_uninit(ghs + 1);
951 return ERR_PTR(error);
955 * gfs2_rmdiri - Remove a directory
956 * @dip: The parent directory of the directory to be removed
957 * @name: The name of the directory to be removed
958 * @ip: The GFS2 inode of the directory to be removed
960 * Assumes Glocks on dip and ip are held
965 int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
966 struct gfs2_inode *ip)
971 if (ip->i_di.di_entries != 2) {
972 if (gfs2_consist_inode(ip))
973 gfs2_dinode_print(ip);
977 error = gfs2_dir_del(dip, name);
981 error = gfs2_change_nlink(dip, -1);
985 gfs2_str2qstr(&dotname, ".");
986 error = gfs2_dir_del(ip, &dotname);
990 gfs2_str2qstr(&dotname, "..");
991 error = gfs2_dir_del(ip, &dotname);
995 /* It looks odd, but it really should be done twice */
996 error = gfs2_change_nlink(ip, -1);
1000 error = gfs2_change_nlink(ip, -1);
1008 * gfs2_unlink_ok - check to see that a inode is still in a directory
1009 * @dip: the directory
1010 * @name: the name of the file
1013 * Assumes that the lock on (at least) @dip is held.
1015 * Returns: 0 if the parent/child relationship is correct, errno if it isn't
1018 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1019 struct gfs2_inode *ip)
1021 struct gfs2_inum_host inum;
1025 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1028 if ((dip->i_inode.i_mode & S_ISVTX) &&
1029 dip->i_inode.i_uid != current->fsuid &&
1030 ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
1033 if (IS_APPEND(&dip->i_inode))
1036 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
1040 error = gfs2_dir_search(&dip->i_inode, name, &inum, &type);
1044 if (!gfs2_inum_equal(&inum, &ip->i_num))
1047 if (IF2DT(ip->i_inode.i_mode) != type) {
1048 gfs2_consist_inode(dip);
1056 * gfs2_ok_to_move - check if it's ok to move a directory to another directory
1060 * Follow @to back to the root and make sure we don't encounter @this
1061 * Assumes we already hold the rename lock.
1066 int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1068 struct inode *dir = &to->i_inode;
1069 struct super_block *sb = dir->i_sb;
1074 gfs2_str2qstr(&dotdot, "..");
1079 if (dir == &this->i_inode) {
1083 if (dir == sb->s_root->d_inode) {
1088 tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
1090 error = PTR_ERR(tmp);
1104 * gfs2_readlinki - return the contents of a symlink
1105 * @ip: the symlink's inode
1106 * @buf: a pointer to the buffer to be filled
1107 * @len: a pointer to the length of @buf
1109 * If @buf is too small, a piece of memory is kmalloc()ed and needs
1110 * to be freed by the caller.
1115 int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1117 struct gfs2_holder i_gh;
1118 struct buffer_head *dibh;
1122 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
1123 error = gfs2_glock_nq_atime(&i_gh);
1125 gfs2_holder_uninit(&i_gh);
1129 if (!ip->i_di.di_size) {
1130 gfs2_consist_inode(ip);
1135 error = gfs2_meta_inode_buffer(ip, &dibh);
1139 x = ip->i_di.di_size + 1;
1141 *buf = kmalloc(x, GFP_KERNEL);
1148 memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1154 gfs2_glock_dq_uninit(&i_gh);
1159 * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
1160 * conditionally update the inode's atime
1161 * @gh: the holder to acquire
1163 * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
1164 * Update if the difference between the current time and the inode's current
1165 * atime is greater than an interval specified at mount.
1170 int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1172 struct gfs2_glock *gl = gh->gh_gl;
1173 struct gfs2_sbd *sdp = gl->gl_sbd;
1174 struct gfs2_inode *ip = gl->gl_object;
1175 s64 curtime, quantum = gfs2_tune_get(sdp, gt_atime_quantum);
1180 if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
1181 gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
1182 gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
1185 state = gh->gh_state;
1186 flags = gh->gh_flags;
1188 error = gfs2_glock_nq(gh);
1192 if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
1193 (sdp->sd_vfs->s_flags & MS_RDONLY))
1196 curtime = get_seconds();
1197 if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
1199 gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
1201 error = gfs2_glock_nq(gh);
1205 /* Verify that atime hasn't been updated while we were
1206 trying to get exclusive lock. */
1208 curtime = get_seconds();
1209 if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
1210 struct buffer_head *dibh;
1211 struct gfs2_dinode *di;
1213 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1214 if (error == -EROFS)
1219 error = gfs2_meta_inode_buffer(ip, &dibh);
1221 goto fail_end_trans;
1223 ip->i_inode.i_atime.tv_sec = curtime;
1225 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1226 di = (struct gfs2_dinode *)dibh->b_data;
1227 di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1230 gfs2_trans_end(sdp);
1233 /* If someone else has asked for the glock,
1234 unlock and let them have it. Then reacquire
1235 in the original state. */
1236 if (gfs2_glock_is_blocking(gl)) {
1238 gfs2_holder_reinit(state, flags, gh);
1239 return gfs2_glock_nq(gh);
1246 gfs2_trans_end(sdp);
1253 * glock_compare_atime - Compare two struct gfs2_glock structures for sort
1254 * @arg_a: the first structure
1255 * @arg_b: the second structure
1257 * Returns: 1 if A > B
1262 static int glock_compare_atime(const void *arg_a, const void *arg_b)
1264 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1265 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1266 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1267 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1269 if (a->ln_number > b->ln_number)
1271 if (a->ln_number < b->ln_number)
1273 if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
1275 if (gh_a->gh_state == LM_ST_SHARED && (gh_b->gh_flags & GL_ATIME))
1282 * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an
1284 * @num_gh: the number of structures
1285 * @ghs: an array of struct gfs2_holder structures
1287 * Returns: 0 on success (all glocks acquired),
1288 * errno on failure (no glocks acquired)
1291 int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs)
1293 struct gfs2_holder **p;
1301 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1302 if (ghs->gh_flags & GL_ATIME)
1303 error = gfs2_glock_nq_atime(ghs);
1305 error = gfs2_glock_nq(ghs);
1309 p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1313 for (x = 0; x < num_gh; x++)
1316 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL);
1318 for (x = 0; x < num_gh; x++) {
1319 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1321 if (p[x]->gh_flags & GL_ATIME)
1322 error = gfs2_glock_nq_atime(p[x]);
1324 error = gfs2_glock_nq(p[x]);
1328 gfs2_glock_dq(p[x]);
1339 __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1341 struct buffer_head *dibh;
1344 error = gfs2_meta_inode_buffer(ip, &dibh);
1346 error = inode_setattr(&ip->i_inode, attr);
1347 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1348 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1349 gfs2_dinode_out(ip, dibh->b_data);
1356 * gfs2_setattr_simple -
1360 * Called with a reference on the vnode.
1365 int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1369 if (current->journal_info)
1370 return __gfs2_setattr_simple(ip, attr);
1372 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
1376 error = __gfs2_setattr_simple(ip, attr);
1377 gfs2_trans_end(GFS2_SB(&ip->i_inode));