X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=block%2Fblk-settings.c;h=bb93d4c32775abdc2fdba16c3bc208fb20045f47;hb=0527168522c25121bdd5d5f1d3c5b484d972ea14;hp=1344a0ea5cc6c00a89ef9536cbbfda7183d2b28b;hpb=103926c689650396901002c3a8c38970fff70391;p=linux-2.6-omap-h63xx.git diff --git a/block/blk-settings.c b/block/blk-settings.c index 1344a0ea5cc..bb93d4c3277 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -14,7 +14,6 @@ unsigned long blk_max_low_pfn; EXPORT_SYMBOL(blk_max_low_pfn); unsigned long blk_max_pfn; -EXPORT_SYMBOL(blk_max_pfn); /** * blk_queue_prep_rq - set a prepare_request function for queue @@ -140,7 +139,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) /* Assume anything <= 4GB can be handled by IOMMU. Actually some IOMMUs can handle everything, but I don't know of a way to test this here. */ - if (b_pfn <= (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) + if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; q->bounce_pfn = max_low_pfn; #else @@ -169,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) { max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); - printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, - max_sectors); + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_sectors); } if (BLK_DEF_MAX_SECTORS > max_sectors) @@ -197,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q, { if (!max_segments) { max_segments = 1; - printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, - max_segments); + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_segments); } q->max_phys_segments = max_segments; @@ -221,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q, { if (!max_segments) { max_segments = 1; - printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, - max_segments); + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_segments); } q->max_hw_segments = max_segments; @@ -242,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { if (max_size < PAGE_CACHE_SIZE) { max_size = PAGE_CACHE_SIZE; - printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, - max_size); + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_size); } q->max_segment_size = max_size; @@ -288,7 +287,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) t->max_segment_size = min(t->max_segment_size, b->max_segment_size); t->hardsect_size = max(t->hardsect_size, b->hardsect_size); if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) - clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); + queue_flag_clear(QUEUE_FLAG_CLUSTER, t); } EXPORT_SYMBOL(blk_queue_stack_limits); @@ -358,8 +357,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) { if (mask < PAGE_CACHE_SIZE - 1) { mask = PAGE_CACHE_SIZE - 1; - printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, - mask); + printk(KERN_INFO "%s: set to minimum %lx\n", + __func__, mask); } q->seg_boundary_mask = mask;