sb = get_super(bdev);
if (sb && !(sb->s_flags & MS_RDONLY)) {
sb->s_frozen = SB_FREEZE_WRITE;
- wmb();
+ smp_wmb();
sync_inodes_sb(sb, 0);
DQUOT_SYNC(sb);
sync_inodes_sb(sb, 1);
sb->s_frozen = SB_FREEZE_TRANS;
- wmb();
+ smp_wmb();
sync_blockdev(sb->s_bdev);
if (sb->s_op->unlockfs)
sb->s_op->unlockfs(sb);
sb->s_frozen = SB_UNFROZEN;
- wmb();
+ smp_wmb();
wake_up(&sb->s_wait_unfrozen);
drop_super(sb);
}
*/
static void do_sync(unsigned long wait)
{
- wakeup_bdflush(0);
+ wakeup_pdflush(0);
sync_inodes(0); /* All mappings, inodes and their blockdevs */
DQUOT_SYNC(NULL);
sync_supers(); /* Write the superblocks */
return ret;
}
-asmlinkage long sys_fsync(unsigned int fd)
+static long do_fsync(unsigned int fd, int datasync)
{
struct file * file;
struct address_space *mapping;
if (!file)
goto out;
- mapping = file->f_mapping;
-
ret = -EINVAL;
if (!file->f_op || !file->f_op->fsync) {
/* Why? We can still call filemap_fdatawrite */
goto out_putf;
}
+ mapping = file->f_mapping;
+
current->flags |= PF_SYNCWRITE;
ret = filemap_fdatawrite(mapping);
* which could cause livelocks in fsync_buffers_list
*/
down(&mapping->host->i_sem);
- err = file->f_op->fsync(file, file->f_dentry, 0);
+ err = file->f_op->fsync(file, file->f_dentry, datasync);
if (!ret)
ret = err;
up(&mapping->host->i_sem);
return ret;
}
-asmlinkage long sys_fdatasync(unsigned int fd)
+asmlinkage long sys_fsync(unsigned int fd)
{
- struct file * file;
- struct address_space *mapping;
- int ret, err;
-
- ret = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
-
- ret = -EINVAL;
- if (!file->f_op || !file->f_op->fsync)
- goto out_putf;
-
- mapping = file->f_mapping;
-
- current->flags |= PF_SYNCWRITE;
- ret = filemap_fdatawrite(mapping);
- down(&mapping->host->i_sem);
- err = file->f_op->fsync(file, file->f_dentry, 1);
- if (!ret)
- ret = err;
- up(&mapping->host->i_sem);
- err = filemap_fdatawait(mapping);
- if (!ret)
- ret = err;
- current->flags &= ~PF_SYNCWRITE;
+ return do_fsync(fd, 0);
+}
-out_putf:
- fput(file);
-out:
- return ret;
+asmlinkage long sys_fdatasync(unsigned int fd)
+{
+ return do_fsync(fd, 1);
}
/*
struct zone **zones;
pg_data_t *pgdat;
- wakeup_bdflush(1024);
+ wakeup_pdflush(1024);
yield();
for_each_pgdat(pgdat) {
zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
if (*zones)
- try_to_free_pages(zones, GFP_NOFS, 0);
+ try_to_free_pages(zones, GFP_NOFS);
}
}
*/
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
- static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
+ struct buffer_head *first;
struct buffer_head *tmp;
struct page *page;
int page_uptodate = 1;
* two buffer heads end IO at almost the same time and both
* decide that the page is now completely done.
*/
- spin_lock_irqsave(&page_uptodate_lock, flags);
+ first = page_buffers(page);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
/*
* If none of the buffers had errors and they are all
return;
still_busy:
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
return;
}
void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
- static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
+ struct buffer_head *first;
struct buffer_head *tmp;
struct page *page;
SetPageError(page);
}
- spin_lock_irqsave(&page_uptodate_lock, flags);
+ first = page_buffers(page);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+
clear_buffer_async_write(bh);
unlock_buffer(bh);
tmp = bh->b_this_page;
}
tmp = tmp->b_this_page;
}
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
end_page_writeback(page);
return;
still_busy:
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
return;
}
/**
* sync_mapping_buffers - write out and wait upon a mapping's "associated"
* buffers
- * @buffer_mapping - the mapping which backs the buffers' data
- * @mapping - the mapping which wants those buffers written
+ * @mapping: the mapping which wants those buffers written
*
* Starts I/O against the buffers at mapping->private_list, and waits upon
* that I/O.
*
- * Basically, this is a convenience function for fsync(). @buffer_mapping is
- * the blockdev which "owns" the buffers and @mapping is a file or directory
- * which needs those buffers to be written for a successful fsync().
+ * Basically, this is a convenience function for fsync().
+ * @mapping is a file or directory which needs those buffers to be written for
+ * a successful fsync().
*/
int sync_mapping_buffers(struct address_space *mapping)
{
return 1;
}
-struct buffer_head *
+static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
/* Size must be multiple of hard sectorsize */
/**
* mark_buffer_dirty - mark a buffer_head as needing writeout
+ * @bh: the buffer_head to mark dirty
*
* mark_buffer_dirty() will set the dirty bit against the buffer, then set its
* backing page dirty, then tag the page as dirty in its address_space's radix
/**
* __bread() - reads a specified block and returns the bh
+ * @bdev: the block_device to read from
* @block: number of block
* @size: size (in bytes) to read
*
} while (bh != head);
do {
- get_bh(bh);
if (!buffer_mapped(bh))
continue;
/*
*/
BUG_ON(PageWriteback(page));
set_page_writeback(page);
- unlock_page(page);
do {
struct buffer_head *next = bh->b_this_page;
submit_bh(WRITE, bh);
nr_underway++;
}
- put_bh(bh);
bh = next;
} while (bh != head);
+ unlock_page(page);
err = 0;
done:
bh = head;
/* Recovery: lock and submit the mapped buffers */
do {
- get_bh(bh);
if (buffer_mapped(bh) && buffer_dirty(bh)) {
lock_buffer(bh);
mark_buffer_async_write(bh);
submit_bh(WRITE, bh);
nr_underway++;
}
- put_bh(bh);
bh = next;
} while (bh != head);
goto done;
if (!buffer_mapped(bh)) {
err = get_block(inode, block, bh, 1);
if (err)
- goto out;
+ break;
if (buffer_new(bh)) {
- clear_buffer_new(bh);
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
while(wait_bh > wait) {
wait_on_buffer(*--wait_bh);
if (!buffer_uptodate(*wait_bh))
- return -EIO;
+ err = -EIO;
}
- return 0;
-out:
+ if (!err) {
+ bh = head;
+ do {
+ if (buffer_new(bh))
+ clear_buffer_new(bh);
+ } while ((bh = bh->b_this_page) != head);
+ return 0;
+ }
+ /* Error case: */
/*
* Zero out any newly allocated blocks to avoid exposing stale
* data. If BH_New is set, we know that the block was newly
int nr, i;
int fully_mapped = 1;
- if (!PageLocked(page))
- PAGE_BUG(page);
+ BUG_ON(!PageLocked(page));
blocksize = 1 << inode->i_blkbits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
continue;
if (!buffer_mapped(bh)) {
+ int err = 0;
+
fully_mapped = 0;
if (iblock < lblock) {
- if (get_block(inode, iblock, bh, 0))
+ err = get_block(inode, iblock, bh, 0);
+ if (err)
SetPageError(page);
}
if (!buffer_mapped(bh)) {
memset(kaddr + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
- set_buffer_uptodate(bh);
+ if (!err)
+ set_buffer_uptodate(bh);
continue;
}
/*
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
- SLAB_PANIC, init_buffer_head, NULL);
+ SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL