/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
typedef void (*glock_examiner) (struct gfs2_glock * gl);
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
+static int dump_glock(struct gfs2_glock *gl);
/**
* relaxed_state_ok - is a requested lock compatible with the current lock mode?
spin_lock_init(&gl->gl_spin);
gl->gl_state = LM_ST_UNLOCKED;
+ gl->gl_owner = NULL;
+ gl->gl_ip = 0;
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_waiters1);
INIT_LIST_HEAD(&gl->gl_waiters2);
/* If this glock protects actual on-disk data or metadata blocks,
create a VFS inode to manage the pages/buffers holding them. */
if (glops == &gfs2_inode_glops ||
- glops == &gfs2_rgrp_glops ||
- glops == &gfs2_meta_glops) {
+ glops == &gfs2_rgrp_glops) {
gl->gl_aspace = gfs2_aspace_get(sdp);
if (!gl->gl_aspace) {
error = -ENOMEM;
* @gl: the glock
* @state: the state we're requesting
* @flags: the modifier flags
- * @gfp_flags: __GFP_NOFAIL
+ * @gfp_flags:
*
* Figure out how big an impact this function has. Either:
* 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
* Gives caller exclusive access to manipulate a glock structure.
*/
-void gfs2_glmutex_lock(struct gfs2_glock *gl)
+static void gfs2_glmutex_lock(struct gfs2_glock *gl)
{
struct gfs2_holder gh;
spin_lock(&gl->gl_spin);
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
list_add_tail(&gh.gh_list, &gl->gl_waiters1);
- else
+ else {
+ gl->gl_owner = current;
+ gl->gl_ip = (unsigned long)__builtin_return_address(0);
complete(&gh.gh_wait);
+ }
spin_unlock(&gl->gl_spin);
wait_for_completion(&gh.gh_wait);
spin_lock(&gl->gl_spin);
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
acquired = 0;
+ else {
+ gl->gl_owner = current;
+ gl->gl_ip = (unsigned long)__builtin_return_address(0);
+ }
spin_unlock(&gl->gl_spin);
return acquired;
*
*/
-void gfs2_glmutex_unlock(struct gfs2_glock *gl)
+static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
{
spin_lock(&gl->gl_spin);
clear_bit(GLF_LOCK, &gl->gl_flags);
+ gl->gl_owner = NULL;
+ gl->gl_ip = 0;
run_queue(gl);
BUG_ON(!spin_is_locked(&gl->gl_spin));
spin_unlock(&gl->gl_spin);
* @gl: the glock
* @state: the state the caller wants us to change to
*
+ * Note: This may fail sliently if we are out of memory.
*/
static void handle_callback(struct gfs2_glock *gl, unsigned int state)
{
struct gfs2_holder *gh, *new_gh = NULL;
- restart:
+restart:
spin_lock(&gl->gl_spin);
list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
} else {
spin_unlock(&gl->gl_spin);
- new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY,
- GFP_KERNEL | __GFP_NOFAIL),
+ new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL);
+ if (!new_gh)
+ return;
set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
goto restart;
}
- out:
+out:
spin_unlock(&gl->gl_spin);
if (new_gh)
gfs2_holder_put(new_gh);
}
+void gfs2_glock_inode_squish(struct inode *inode)
+{
+ struct gfs2_holder gh;
+ struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
+ gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
+ set_bit(HIF_DEMOTE, &gh.gh_iflags);
+ spin_lock(&gl->gl_spin);
+ gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
+ list_add_tail(&gh.gh_list, &gl->gl_waiters2);
+ run_queue(gl);
+ spin_unlock(&gl->gl_spin);
+ wait_for_completion(&gh.gh_wait);
+ gfs2_holder_uninit(&gh);
+}
+
/**
* state_change - record that the glock is now in a different state
* @gl: the glock
struct gfs2_sbd *sdp = gl->gl_sbd;
int error = 0;
- restart:
+restart:
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
set_bit(HIF_ABORTED, &gh->gh_iflags);
return -EIO;
clear_bit(GLF_PREFETCH, &gl->gl_flags);
+ if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
+ dump_glock(gl);
+
return error;
}
struct greedy *gr;
struct gfs2_holder *gh;
- if (!time ||
- gl->gl_sbd->sd_args.ar_localcaching ||
+ if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
return 1;
}
}
-/**
- * gfs2_try_toss_inode - try to remove a particular inode struct from cache
- * sdp: the filesystem
- * inum: the inode number
- *
- */
-
-void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum)
-{
- struct gfs2_glock *gl;
- struct gfs2_inode *ip;
- int error;
-
- error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops,
- NO_CREATE, &gl);
- if (error || !gl)
- return;
-
- if (!gfs2_glmutex_trylock(gl))
- goto out;
-
- ip = gl->gl_object;
- if (!ip)
- goto out_unlock;
-
- if (atomic_read(&ip->i_count))
- goto out_unlock;
-
- gfs2_inode_destroy(ip, 1);
-
- out_unlock:
- gfs2_glmutex_unlock(gl);
-
- out:
- gfs2_glock_put(gl);
-}
-
/**
* gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
* iopen glock from memory
void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
{
- struct gfs2_glock *i_gl;
if (state != LM_ST_UNLOCKED)
return;
-
- spin_lock(&io_gl->gl_spin);
- i_gl = io_gl->gl_object;
- if (i_gl) {
- gfs2_glock_hold(i_gl);
- spin_unlock(&io_gl->gl_spin);
- } else {
- spin_unlock(&io_gl->gl_spin);
- return;
- }
-
- if (gfs2_glmutex_trylock(i_gl)) {
- struct gfs2_inode *ip = i_gl->gl_object;
- if (ip) {
- gfs2_try_toss_vnode(ip);
- gfs2_glmutex_unlock(i_gl);
- gfs2_glock_schedule_for_reclaim(i_gl);
- goto out;
- }
- gfs2_glmutex_unlock(i_gl);
- }
-
- out:
- gfs2_glock_put(i_gl);
+ /* FIXME: remove this? */
}
/**
atomic_inc(&sdp->sd_reclaimed);
if (gfs2_glmutex_trylock(gl)) {
- if (gl->gl_ops == &gfs2_inode_glops) {
- struct gfs2_inode *ip = gl->gl_object;
- if (ip && !atomic_read(&ip->i_count))
- gfs2_inode_destroy(ip, 1);
- }
if (queue_empty(gl, &gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED &&
demote_ok(gl))
static void scan_glock(struct gfs2_glock *gl)
{
if (gfs2_glmutex_trylock(gl)) {
- if (gl->gl_ops == &gfs2_inode_glops) {
- struct gfs2_inode *ip = gl->gl_object;
- if (ip && !atomic_read(&ip->i_count))
- goto out_schedule;
- }
+ if (gl->gl_ops == &gfs2_inode_glops)
+ goto out;
if (queue_empty(gl, &gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED &&
demote_ok(gl))
goto out_schedule;
-
+out:
gfs2_glmutex_unlock(gl);
}
return;
- out_schedule:
+out_schedule:
gfs2_glmutex_unlock(gl);
gfs2_glock_schedule_for_reclaim(gl);
gfs2_glock_put(gl);
}
if (gfs2_glmutex_trylock(gl)) {
- if (gl->gl_ops == &gfs2_inode_glops) {
- struct gfs2_inode *ip = gl->gl_object;
- if (ip && !atomic_read(&ip->i_count))
- gfs2_inode_destroy(ip, 1);
- }
if (queue_empty(gl, &gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED);
printk(KERN_INFO " Inode:\n");
printk(KERN_INFO " num = %llu %llu\n",
- ip->i_num.no_formal_ino, ip->i_num.no_addr);
+ (unsigned long long)ip->i_num.no_formal_ino,
+ (unsigned long long)ip->i_num.no_addr);
printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
- printk(KERN_INFO " i_count = %d\n", atomic_read(&ip->i_count));
printk(KERN_INFO " i_flags =");
for (x = 0; x < 32; x++)
if (test_bit(x, &ip->i_flags))
printk(" %u", x);
printk(" \n");
- printk(KERN_INFO " vnode = %s\n", (ip->i_vnode) ? "yes" : "no");
error = 0;
spin_lock(&gl->gl_spin);
- printk(KERN_INFO "Glock (%u, %llu)\n",
- gl->gl_name.ln_type,
- gl->gl_name.ln_number);
+ printk(KERN_INFO "Glock (%u, %llu)\n", gl->gl_name.ln_type,
+ (unsigned long long)gl->gl_name.ln_number);
printk(KERN_INFO " gl_flags =");
for (x = 0; x < 32; x++)
if (test_bit(x, &gl->gl_flags))
printk(" \n");
printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
+ printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
+ print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));