start = max(from, block_start);
                                        size = min(to, block_end) - start;
 
-                                       zero_user_page(page, start, size, KM_USER0);
+                                       zero_user(page, start, size);
                                        set_buffer_uptodate(bh);
                                }
 
                                        mark_buffer_dirty(bh);
                                        continue;
                                }
-                               if (block_end > to || block_start < from) {
-                                       void *kaddr;
-
-                                       kaddr = kmap_atomic(page, KM_USER0);
-                                       if (block_end > to)
-                                               memset(kaddr+to, 0,
-                                                       block_end-to);
-                                       if (block_start < from)
-                                               memset(kaddr+block_start,
-                                                       0, from-block_start);
-                                       flush_dcache_page(page);
-                                       kunmap_atomic(kaddr, KM_USER0);
-                               }
+                               if (block_end > to || block_start < from)
+                                       zero_user_segments(page,
+                                               to, block_end,
+                                               block_start, from);
                                continue;
                        }
                }
                                        SetPageError(page);
                        }
                        if (!buffer_mapped(bh)) {
-                               zero_user_page(page, i * blocksize, blocksize,
-                                               KM_USER0);
+                               zero_user(page, i * blocksize, blocksize);
                                if (!err)
                                        set_buffer_uptodate(bh);
                                continue;
                                                &page, &fsdata);
                if (err)
                        goto out;
-               zero_user_page(page, zerofrom, len, KM_USER0);
+               zero_user(page, zerofrom, len);
                err = pagecache_write_end(file, mapping, curpos, len, len,
                                                page, fsdata);
                if (err < 0)
                                                &page, &fsdata);
                if (err)
                        goto out;
-               zero_user_page(page, zerofrom, len, KM_USER0);
+               zero_user(page, zerofrom, len);
                err = pagecache_write_end(file, mapping, curpos, len, len,
                                                page, fsdata);
                if (err < 0)
        unsigned block_in_page;
        unsigned block_start, block_end;
        sector_t block_in_file;
-       char *kaddr;
        int nr_reads = 0;
        int ret = 0;
        int is_mapped_to_disk = 1;
                        continue;
                }
                if (buffer_new(bh) || !buffer_mapped(bh)) {
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       if (block_start < from)
-                               memset(kaddr+block_start, 0, from-block_start);
-                       if (block_end > to)
-                               memset(kaddr + to, 0, block_end - to);
-                       flush_dcache_page(page);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       zero_user_segments(page, block_start, from,
+                                                       to, block_end);
                        continue;
                }
                if (buffer_uptodate(bh))
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
+       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
 out:
        ret = mpage_writepage(page, get_block, wbc);
        if (ret == -EAGAIN)
                if (page_has_buffers(page))
                        goto has_buffers;
        }
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
        set_page_dirty(page);
        err = 0;
 
                        goto unlock;
        }
 
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
        mark_buffer_dirty(bh);
        err = 0;
 
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
+       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
        return __block_write_full_page(inode, page, get_block, wbc);
 }
 
 
        if (!page)
                return -ENOMEM;
 
-       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
+       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
        unlock_page(page);
        page_cache_release(page);
        return rc;
 
                                        page_cache_release(page);
                                        goto out;
                                }
-                               zero_user_page(page, block_in_page << blkbits,
-                                               1 << blkbits, KM_USER0);
+                               zero_user(page, block_in_page << blkbits,
+                                               1 << blkbits);
                                dio->block_in_file++;
                                block_in_page++;
                                goto next_block;
 
        end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
        if (to > end_byte_in_page)
                end_byte_in_page = to;
-       zero_user_page(page, end_byte_in_page,
-               PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0);
+       zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
 out:
        return 0;
 }
         */
        if ((i_size_read(page->mapping->host) == prev_page_end_size) &&
            (from != 0)) {
-               zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+               zero_user(page, 0, PAGE_CACHE_SIZE);
        }
 out:
        return rc;
 
         */
        if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
             ext3_should_writeback_data(inode) && PageUptodate(page)) {
-               zero_user_page(page, offset, length, KM_USER0);
+               zero_user(page, offset, length);
                set_page_dirty(page);
                goto unlock;
        }
                        goto unlock;
        }
 
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
        BUFFER_TRACE(bh, "zeroed end of block");
 
        err = 0;
 
         */
        if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
             ext4_should_writeback_data(inode) && PageUptodate(page)) {
-               zero_user_page(page, offset, length, KM_USER0);
+               zero_user(page, offset, length);
                set_page_dirty(page);
                goto unlock;
        }
                        goto unlock;
        }
 
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
 
        BUFFER_TRACE(bh, "zeroed end of block");
 
 
        if (!gfs2_is_writeback(ip))
                gfs2_trans_add_bh(ip->i_gl, bh, 0);
 
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
 
 unlock:
        unlock_page(page);
 
         * so we need to supply one here. It doesn't happen often.
         */
        if (unlikely(page->index)) {
-               zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+               zero_user(page, 0, PAGE_CACHE_SIZE);
                return 0;
        }
 
 
                        unsigned from, unsigned to)
 {
        if (!PageUptodate(page)) {
-               if (to - from != PAGE_CACHE_SIZE) {
-                       void *kaddr = kmap_atomic(page, KM_USER0);
-                       memset(kaddr, 0, from);
-                       memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
-                       flush_dcache_page(page);
-                       kunmap_atomic(kaddr, KM_USER0);
-               }
+               if (to - from != PAGE_CACHE_SIZE)
+                       zero_user_segments(page,
+                               0, from,
+                               to, PAGE_CACHE_SIZE);
        }
        return 0;
 }
 
        }
 
        if (first_hole != blocks_per_page) {
-               zero_user_page(page, first_hole << blkbits,
-                               PAGE_CACHE_SIZE - (first_hole << blkbits),
-                               KM_USER0);
+               zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
                if (first_hole == 0) {
                        SetPageUptodate(page);
                        unlock_page(page);
 
                if (page->index > end_index || !offset)
                        goto confused;
-               zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
-                               KM_USER0);
+               zero_user_segment(page, offset, PAGE_CACHE_SIZE);
        }
 
        /*
 
 static
 int nfs_return_empty_page(struct page *page)
 {
-       zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+       zero_user(page, 0, PAGE_CACHE_SIZE);
        SetPageUptodate(page);
        unlock_page(page);
        return 0;
        pglen = PAGE_CACHE_SIZE - base;
        for (;;) {
                if (remainder <= pglen) {
-                       zero_user_page(*pages, base, remainder, KM_USER0);
+                       zero_user(*pages, base, remainder);
                        break;
                }
-               zero_user_page(*pages, base, pglen, KM_USER0);
+               zero_user(*pages, base, pglen);
                pages++;
                remainder -= pglen;
                pglen = PAGE_CACHE_SIZE;
                return PTR_ERR(new);
        }
        if (len < PAGE_CACHE_SIZE)
-               zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
+               zero_user_segment(page, len, PAGE_CACHE_SIZE);
 
        nfs_list_add_request(new, &one_request);
        if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
                goto out_error;
 
        if (len < PAGE_CACHE_SIZE)
-               zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
+               zero_user_segment(page, len, PAGE_CACHE_SIZE);
        nfs_pageio_add_request(desc->pgio, new);
        return 0;
 out_error:
 
         * then we need to zero any uninitalised data. */
        if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
                        && !PageUptodate(req->wb_page))
-               zero_user_page(req->wb_page, req->wb_bytes,
-                               PAGE_CACHE_SIZE - req->wb_bytes,
-                               KM_USER0);
+               zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
        return req;
 }
 
 
                /* Check for the current buffer head overflowing. */
                if (unlikely(file_ofs + bh->b_size > init_size)) {
                        int ofs;
+                       void *kaddr;
 
                        ofs = 0;
                        if (file_ofs < init_size)
                                ofs = init_size - file_ofs;
                        local_irq_save(flags);
-                       zero_user_page(page, bh_offset(bh) + ofs,
-                                        bh->b_size - ofs, KM_BIO_SRC_IRQ);
+                       kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+                       memset(kaddr + bh_offset(bh) + ofs, 0,
+                                       bh->b_size - ofs);
+                       flush_dcache_page(page);
+                       kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
                        local_irq_restore(flags);
                }
        } else {
                bh->b_blocknr = -1UL;
                clear_buffer_mapped(bh);
 handle_zblock:
-               zero_user_page(page, i * blocksize, blocksize, KM_USER0);
+               zero_user(page, i * blocksize, blocksize);
                if (likely(!err))
                        set_buffer_uptodate(bh);
        } while (i++, iblock++, (bh = bh->b_this_page) != head);
        /* Is the page fully outside i_size? (truncate in progress) */
        if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
                        PAGE_CACHE_SHIFT)) {
-               zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+               zero_user(page, 0, PAGE_CACHE_SIZE);
                ntfs_debug("Read outside i_size - truncated?");
                goto done;
        }
         * ok to ignore the compressed flag here.
         */
        if (unlikely(page->index > 0)) {
-               zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+               zero_user(page, 0, PAGE_CACHE_SIZE);
                goto done;
        }
        if (!NInoAttr(ni))
                if (err == -ENOENT || lcn == LCN_ENOENT) {
                        bh->b_blocknr = -1;
                        clear_buffer_dirty(bh);
-                       zero_user_page(page, bh_offset(bh), blocksize,
-                                       KM_USER0);
+                       zero_user(page, bh_offset(bh), blocksize);
                        set_buffer_uptodate(bh);
                        err = 0;
                        continue;
                if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
                        /* The page straddles i_size. */
                        unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
-                       zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs,
-                                       KM_USER0);
+                       zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
                }
                /* Handle mst protected attributes. */
                if (NInoMstProtected(ni))
 
        if (xpage >= max_page) {
                kfree(bhs);
                kfree(pages);
-               zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+               zero_user(page, 0, PAGE_CACHE_SIZE);
                ntfs_debug("Compressed read outside i_size - truncated?");
                SetPageUptodate(page);
                unlock_page(page);
 
                                        ntfs_submit_bh_for_read(bh);
                                        *wait_bh++ = bh;
                                } else {
-                                       zero_user_page(page, bh_offset(bh),
-                                                       blocksize, KM_USER0);
+                                       zero_user(page, bh_offset(bh),
+                                                       blocksize);
                                        set_buffer_uptodate(bh);
                                }
                        }
                                                ntfs_submit_bh_for_read(bh);
                                                *wait_bh++ = bh;
                                        } else {
-                                               zero_user_page(page,
-                                                       bh_offset(bh),
-                                                       blocksize, KM_USER0);
+                                               zero_user(page, bh_offset(bh),
+                                                               blocksize);
                                                set_buffer_uptodate(bh);
                                        }
                                }
                         */
                        if (bh_end <= pos || bh_pos >= end) {
                                if (!buffer_uptodate(bh)) {
-                                       zero_user_page(page, bh_offset(bh),
-                                                       blocksize, KM_USER0);
+                                       zero_user(page, bh_offset(bh),
+                                                       blocksize);
                                        set_buffer_uptodate(bh);
                                }
                                mark_buffer_dirty(bh);
                                if (!buffer_uptodate(bh))
                                        set_buffer_uptodate(bh);
                        } else if (!buffer_uptodate(bh)) {
-                               zero_user_page(page, bh_offset(bh), blocksize,
-                                               KM_USER0);
+                               zero_user(page, bh_offset(bh), blocksize);
                                set_buffer_uptodate(bh);
                        }
                        continue;
                                        if (!buffer_uptodate(bh))
                                                set_buffer_uptodate(bh);
                                } else if (!buffer_uptodate(bh)) {
-                                       zero_user_page(page, bh_offset(bh),
-                                                       blocksize, KM_USER0);
+                                       zero_user(page, bh_offset(bh),
+                                               blocksize);
                                        set_buffer_uptodate(bh);
                                }
                                continue;
 
                                if (likely(bh_pos < initialized_size))
                                        ofs = initialized_size - bh_pos;
-                               zero_user_page(page, bh_offset(bh) + ofs,
-                                               blocksize - ofs, KM_USER0);
+                               zero_user_segment(page, bh_offset(bh) + ofs,
+                                               blocksize);
                        }
                } else /* if (unlikely(!buffer_uptodate(bh))) */
                        err = -EIO;
                                if (PageUptodate(page))
                                        set_buffer_uptodate(bh);
                                else {
-                                       zero_user_page(page, bh_offset(bh),
-                                                       blocksize, KM_USER0);
+                                       zero_user(page, bh_offset(bh),
+                                                       blocksize);
                                        set_buffer_uptodate(bh);
                                }
                        }
                len = PAGE_CACHE_SIZE;
                if (len > bytes)
                        len = bytes;
-               zero_user_page(*pages, 0, len, KM_USER0);
+               zero_user(*pages, 0, len);
        }
        goto out;
 }
                len = PAGE_CACHE_SIZE;
                if (len > bytes)
                        len = bytes;
-               zero_user_page(*pages, 0, len, KM_USER0);
+               zero_user(*pages, 0, len);
        }
        goto out;
 }
 
                mlog_errno(ret);
 
        if (zero)
-               zero_user_page(page, from, to - from, KM_USER0);
+               zero_user_segment(page, from, to);
 
        /*
         * Need to set the buffers we zero'd into uptodate
 
         * XXX sys_readahead() seems to get that wrong?
         */
        if (start >= i_size_read(inode)) {
-               zero_user_page(page, 0, PAGE_SIZE, KM_USER0);
+               zero_user(page, 0, PAGE_SIZE);
                SetPageUptodate(page);
                ret = 0;
                goto out_alloc;
                if (block_start >= to)
                        break;
 
-               zero_user_page(page, block_start, bh->b_size, KM_USER0);
+               zero_user(page, block_start, bh->b_size);
                set_buffer_uptodate(bh);
                mark_buffer_dirty(bh);
 
                                        start = max(from, block_start);
                                        end = min(to, block_end);
 
-                                       zero_user_page(page, start, end - start, KM_USER0);
+                                       zero_user_segment(page, start, end);
                                        set_buffer_uptodate(bh);
                                }
 
 
                /* if we are not on a block boundary */
                if (length) {
                        length = blocksize - length;
-                       zero_user_page(page, offset, length, KM_USER0);
+                       zero_user(page, offset, length);
                        if (buffer_mapped(bh) && bh->b_blocknr != 0) {
                                mark_buffer_dirty(bh);
                        }
                        unlock_page(page);
                        return 0;
                }
-               zero_user_page(page, last_offset, PAGE_CACHE_SIZE - last_offset, KM_USER0);
+               zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
        }
        bh = head;
        block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
 
                if (status)
                        break;
 
-               zero_user_page(page, offset, bytes, KM_USER0);
+               zero_user(page, offset, bytes);
 
                status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
                                        page, fsdata);
 
        kunmap_atomic(kaddr, KM_USER0);
 }
 
-/*
- * Same but also flushes aliased cache contents to RAM.
- *
- * This must be a macro because KM_USER0 and friends aren't defined if
- * !CONFIG_HIGHMEM
- */
-#define zero_user_page(page, offset, size, km_type)            \
-       do {                                                    \
-               void *kaddr;                                    \
-                                                               \
-               BUG_ON((offset) + (size) > PAGE_SIZE);          \
-                                                               \
-               kaddr = kmap_atomic(page, km_type);             \
-               memset((char *)kaddr + (offset), 0, (size));    \
-               flush_dcache_page(page);                        \
-               kunmap_atomic(kaddr, (km_type));                \
-       } while (0)
+static inline void zero_user_segments(struct page *page,
+       unsigned start1, unsigned end1,
+       unsigned start2, unsigned end2)
+{
+       void *kaddr = kmap_atomic(page, KM_USER0);
+
+       BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+
+       if (end1 > start1)
+               memset(kaddr + start1, 0, end1 - start1);
+
+       if (end2 > start2)
+               memset(kaddr + start2, 0, end2 - start2);
+
+       kunmap_atomic(kaddr, KM_USER0);
+       flush_dcache_page(page);
+}
+
+static inline void zero_user_segment(struct page *page,
+       unsigned start, unsigned end)
+{
+       zero_user_segments(page, start, end, 0, 0);
+}
+
+static inline void zero_user(struct page *page,
+       unsigned start, unsigned size)
+{
+       zero_user_segments(page, start, start + size, 0, 0);
+}
 
 static inline void __deprecated memclear_highpage_flush(struct page *page,
                        unsigned int offset, unsigned int size)
 {
-       zero_user_page(page, offset, size, KM_USER0);
+       zero_user(page, offset, size);
 }
 
 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
 
                else
                        return PTR_ERR(page);
        }
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
        return 0;
 }
 EXPORT_SYMBOL_GPL(xip_truncate_page);
 
 
 static inline void truncate_partial_page(struct page *page, unsigned partial)
 {
-       zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0);
+       zero_user_segment(page, partial, PAGE_CACHE_SIZE);
        if (PagePrivate(page))
                do_invalidatepage(page, partial);
 }