X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=fs%2Fbuffer.c;h=7d51e649b19a7d8f84061186fbd84326420f44f8;hb=308c05e35e3517d19bb67a7e97772235c9e15cd7;hp=826baf4f04bc738fdfea27f6652bc3c0bfe9969d;hpb=6e5565f949af1322f8f3d3f43d044645ae448499;p=linux-2.6-omap-h63xx.git diff --git a/fs/buffer.c b/fs/buffer.c index 826baf4f04b..7d51e649b19 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -67,14 +67,14 @@ static int sync_buffer(void *word) return 0; } -void fastcall __lock_buffer(struct buffer_head *bh) +void __lock_buffer(struct buffer_head *bh) { wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__lock_buffer); -void fastcall unlock_buffer(struct buffer_head *bh) +void unlock_buffer(struct buffer_head *bh) { smp_mb__before_clear_bit(); clear_buffer_locked(bh); @@ -360,16 +360,19 @@ void invalidate_bdev(struct block_device *bdev) */ static void free_more_memory(void) { - struct zone **zones; - pg_data_t *pgdat; + struct zone *zone; + int nid; wakeup_pdflush(1024); yield(); - for_each_online_pgdat(pgdat) { - zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; - if (*zones) - try_to_free_pages(zones, 0, GFP_NOFS); + for_each_online_node(nid) { + (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS), + gfp_zone(GFP_NOFS), NULL, + &zone); + if (zone) + try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, + GFP_NOFS); } } @@ -627,8 +630,7 @@ repeat: } /** - * sync_mapping_buffers - write out and wait upon a mapping's "associated" - * buffers + * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers * @mapping: the mapping which wants those buffers written * * Starts I/O against the buffers at mapping->private_list, and waits upon @@ -678,7 +680,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) } else { BUG_ON(mapping->assoc_mapping != buffer_mapping); } - if (list_empty(&bh->b_assoc_buffers)) { + if (!bh->b_assoc_map) { spin_lock(&buffer_mapping->private_lock); list_move_tail(&bh->b_assoc_buffers, &mapping->private_list); @@ -794,6 +796,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) { struct buffer_head *bh; struct list_head tmp; + struct address_space *mapping; int err = 0, err2; INIT_LIST_HEAD(&tmp); @@ -801,9 +804,14 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) spin_lock(lock); while (!list_empty(list)) { bh = BH_ENTRY(list->next); + mapping = bh->b_assoc_map; __remove_assoc_queue(bh); + /* Avoid race with mark_buffer_dirty_inode() which does + * a lockless check and we rely on seeing the dirty bit */ + smp_mb(); if (buffer_dirty(bh) || buffer_locked(bh)) { list_add(&bh->b_assoc_buffers, &tmp); + bh->b_assoc_map = mapping; if (buffer_dirty(bh)) { get_bh(bh); spin_unlock(lock); @@ -822,8 +830,17 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) while (!list_empty(&tmp)) { bh = BH_ENTRY(tmp.prev); - list_del_init(&bh->b_assoc_buffers); get_bh(bh); + mapping = bh->b_assoc_map; + __remove_assoc_queue(bh); + /* Avoid race with mark_buffer_dirty_inode() which does + * a lockless check and we rely on seeing the dirty bit */ + smp_mb(); + if (buffer_dirty(bh)) { + list_add(&bh->b_assoc_buffers, + &mapping->private_list); + bh->b_assoc_map = mapping; + } spin_unlock(lock); wait_on_buffer(bh); if (!buffer_uptodate(bh)) @@ -1164,10 +1181,23 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, * mapping->tree_lock and the global inode_lock. */ -void fastcall mark_buffer_dirty(struct buffer_head *bh) +void mark_buffer_dirty(struct buffer_head *bh) { WARN_ON_ONCE(!buffer_uptodate(bh)); - if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) + + /* + * Very *carefully* optimize the it-is-already-dirty case. + * + * Don't let the final "is it dirty" escape to before we + * perhaps modified the buffer. + */ + if (buffer_dirty(bh)) { + smp_mb(); + if (buffer_dirty(bh)) + return; + } + + if (!test_set_buffer_dirty(bh)) __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0); } @@ -1195,7 +1225,7 @@ void __brelse(struct buffer_head * buf) void __bforget(struct buffer_head *bh) { clear_buffer_dirty(bh); - if (!list_empty(&bh->b_assoc_buffers)) { + if (bh->b_assoc_map) { struct address_space *buffer_mapping = bh->b_page->mapping; spin_lock(&buffer_mapping->private_lock); @@ -1436,6 +1466,7 @@ void invalidate_bh_lrus(void) { on_each_cpu(invalidate_bh_lru, NULL, 1, 1); } +EXPORT_SYMBOL_GPL(invalidate_bh_lrus); void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) @@ -2549,14 +2580,13 @@ int nobh_write_end(struct file *file, struct address_space *mapping, struct inode *inode = page->mapping->host; struct buffer_head *head = fsdata; struct buffer_head *bh; + BUG_ON(fsdata != NULL && page_has_buffers(page)); - if (!PageMappedToDisk(page)) { - if (unlikely(copied < len) && !page_has_buffers(page)) - attach_nobh_buffers(page, head); - if (page_has_buffers(page)) - return generic_write_end(file, mapping, pos, len, - copied, page, fsdata); - } + if (unlikely(copied < len) && !page_has_buffers(page)) + attach_nobh_buffers(page, head); + if (page_has_buffers(page)) + return generic_write_end(file, mapping, pos, len, + copied, page, fsdata); SetPageUptodate(page); set_page_dirty(page); @@ -3021,7 +3051,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free) do { struct buffer_head *next = bh->b_this_page; - if (!list_empty(&bh->b_assoc_buffers)) + if (bh->b_assoc_map) __remove_assoc_queue(bh); bh = next; } while (bh != head); @@ -3153,8 +3183,7 @@ static void recalc_bh_state(void) struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) { - struct buffer_head *ret = kmem_cache_alloc(bh_cachep, - set_migrateflags(gfp_flags, __GFP_RECLAIMABLE)); + struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); get_cpu_var(bh_accounting).nr++; @@ -3198,7 +3227,7 @@ static int buffer_cpu_notify(struct notifier_block *self, } /** - * bh_uptodate_or_lock: Test whether the buffer is uptodate + * bh_uptodate_or_lock - Test whether the buffer is uptodate * @bh: struct buffer_head * * Return true if the buffer is up-to-date and false, @@ -3217,7 +3246,7 @@ int bh_uptodate_or_lock(struct buffer_head *bh) EXPORT_SYMBOL(bh_uptodate_or_lock); /** - * bh_submit_read: Submit a locked buffer for reading + * bh_submit_read - Submit a locked buffer for reading * @bh: struct buffer_head * * Returns zero on success and -EIO on error.