/* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */
- if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+ if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1;
q->bounce_pfn = max_low_pfn;
#else
EXPORT_SYMBOL(blk_queue_stack_limits);
/**
- * blk_queue_dma_drain - Set up a drain buffer for excess dma.
+ * blk_queue_dma_pad - set pad mask
+ * @q: the request queue for the device
+ * @mask: pad mask
+ *
+ * Set pad mask. Direct IO requests are padded to the mask specified.
*
+ * Appending pad buffer to a request modifies ->data_len such that it
+ * includes the pad buffer. The original requested data length can be
+ * obtained using blk_rq_raw_data_len().
+ **/
+void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
+{
+ q->dma_pad_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_dma_pad);
+
+/**
+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
* @q: the request queue for the device
+ * @dma_drain_needed: fn which returns non-zero if drain is necessary
* @buf: physically contiguous buffer
* @size: size of the buffer in bytes
*
* device can support otherwise there won't be room for the drain
* buffer.
*/
-int blk_queue_dma_drain(struct request_queue *q, void *buf,
- unsigned int size)
+int blk_queue_dma_drain(struct request_queue *q,
+ dma_drain_needed_fn *dma_drain_needed,
+ void *buf, unsigned int size)
{
if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
return -EINVAL;
/* make room for appending the drain */
--q->max_hw_segments;
--q->max_phys_segments;
+ q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-int __init blk_settings_init(void)
+static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
blk_max_pfn = max_pfn - 1;