struct mm_struct;
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
+/*
+ * All "charge" functions with gfp_mask should use GFP_KERNEL or
+ * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
+ * alloc memory but reclaims memory from all available zones. So, "where I want
+ * memory from" bits of gfp_mask has no meaning. So any bits of that field is
+ * available but adding a rule is better. charge functions' gfp_mask should
+ * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
+ * codes.
+ * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
+ */
 
 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask);
 
        VM_BUG_ON(!PageLocked(page));
 
        error = mem_cgroup_cache_charge(page, current->mm,
-                                       gfp_mask & ~__GFP_HIGHMEM);
+                                       gfp_mask & GFP_RECLAIM_MASK);
        if (error)
                goto out;
 
 
        unlock_page_cgroup(pc);
 
        if (mem) {
-               ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem);
+               ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
                css_put(&mem->css);
        }
        *ptr = mem;
                        break;
 
                progress = try_to_free_mem_cgroup_pages(memcg,
-                               GFP_HIGHUSER_MOVABLE, false);
+                               GFP_KERNEL, false);
                if (!progress)                  retry_count--;
        }
        return ret;
                        break;
 
                oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
-               try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true);
+               try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
                curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
                if (curusage >= oldusage)
                        retry_count--;
                }
                spin_unlock_irqrestore(&zone->lru_lock, flags);
 
-               ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE);
+               ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
                if (ret == -ENOMEM)
                        break;
 
                        goto out;
                }
                progress = try_to_free_mem_cgroup_pages(mem,
-                                                 GFP_HIGHUSER_MOVABLE, false);
+                                                 GFP_KERNEL, false);
                if (!progress) {
                        nr_retries--;
                        /* maybe some writeback is necessary */
 
        cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
-       if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE))
+       if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
 
        /*
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       if (mem_cgroup_try_charge_swapin(mm, page,
-                               GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
+       if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
                ret = VM_FAULT_OOM;
                unlock_page(page);
                goto out;
                goto oom;
        __SetPageUptodate(page);
 
-       if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE))
+       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
                                ret = VM_FAULT_OOM;
                                goto out;
                        }
-                       if (mem_cgroup_newpage_charge(page,
-                                               mm, GFP_HIGHUSER_MOVABLE)) {
+                       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
                                ret = VM_FAULT_OOM;
                                page_cache_release(page);
                                goto out;
 
         * Charge page using GFP_HIGHUSER_MOVABLE while we can wait.
         * charged back to the user(not to caller) when swap account is used.
         */
-       error = mem_cgroup_cache_charge_swapin(page,
-                       current->mm, GFP_HIGHUSER_MOVABLE, true);
+       error = mem_cgroup_cache_charge_swapin(page, current->mm, GFP_KERNEL,
+                                       true);
        if (error)
                goto out;
        error = radix_tree_preload(GFP_KERNEL);
                         * charge against this swap cache here.
                         */
                        if (mem_cgroup_cache_charge_swapin(swappage,
-                                               current->mm, gfp, false)) {
+                               current->mm, gfp & GFP_RECLAIM_MASK, false)) {
                                page_cache_release(swappage);
                                error = -ENOMEM;
                                goto failed;
 
                        /* Precharge page while we can wait, compensate after */
                        error = mem_cgroup_cache_charge(filepage, current->mm,
-                                       GFP_HIGHUSER_MOVABLE);
+                                       GFP_KERNEL);
                        if (error) {
                                page_cache_release(filepage);
                                shmem_unacct_blocks(info->flags, 1);
 
        pte_t *pte;
        int ret = 1;
 
-       if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
-                                       GFP_HIGHUSER_MOVABLE, &ptr))
+       if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
                ret = -ENOMEM;
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);