kfree(ff);
                        ff = NULL;
                }
+               INIT_LIST_HEAD(&ff->write_entry);
                atomic_set(&ff->count, 0);
        }
        return ff;
 {
        struct fuse_file *ff = file->private_data;
        if (ff) {
+               struct fuse_conn *fc = get_fuse_conn(inode);
+
                fuse_release_fill(ff, get_node_id(inode), file->f_flags,
                                  isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
 
                /* Hold vfsmount and dentry until release is finished */
                ff->reserved_req->vfsmount = mntget(file->f_path.mnt);
                ff->reserved_req->dentry = dget(file->f_path.dentry);
+
+               spin_lock(&fc->lock);
+               list_del(&ff->write_entry);
+               spin_unlock(&fc->lock);
                /*
                 * Normally this will send the RELEASE request,
                 * however if some asynchronous READ or WRITE requests
 
 
        /** Version of last attribute change */
        u64 attr_version;
+
+       /** Files usable in writepage.  Protected by fc->lock */
+       struct list_head write_files;
 };
 
 /** FUSE specific file data */
 
        /** Refcount */
        atomic_t count;
+
+       /** Entry on inode's write_files list */
+       struct list_head write_entry;
 };
 
 /** One input argument of a request */
 
        fi->i_time = 0;
        fi->nodeid = 0;
        fi->nlookup = 0;
+       INIT_LIST_HEAD(&fi->write_files);
        fi->forget_req = fuse_request_alloc();
        if (!fi->forget_req) {
                kmem_cache_free(fuse_inode_cachep, inode);
 static void fuse_destroy_inode(struct inode *inode)
 {
        struct fuse_inode *fi = get_fuse_inode(inode);
+       BUG_ON(!list_empty(&fi->write_files));
        if (fi->forget_req)
                fuse_request_free(fi->forget_req);
        kmem_cache_free(fuse_inode_cachep, inode);