X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fmmzone.h;h=bad9486ee0cce5ceaeb982cd47e0ed15fff91046;hb=e815af95f94914993bbad279c71cf5fef9f4eaac;hp=3ea68cd3b61f29a04b708c8c8f4043cccf6b2450;hpb=bced13738405b62c8203df9c103d4ba63d747872;p=linux-2.6-omap-h63xx.git diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3ea68cd3b61..bad9486ee0c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -7,12 +7,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include @@ -32,8 +34,29 @@ */ #define PAGE_ALLOC_COSTLY_ORDER 3 +#define MIGRATE_UNMOVABLE 0 +#define MIGRATE_RECLAIMABLE 1 +#define MIGRATE_MOVABLE 2 +#define MIGRATE_RESERVE 3 +#define MIGRATE_ISOLATE 4 /* can't allocate from here */ +#define MIGRATE_TYPES 5 + +#define for_each_migratetype_order(order, type) \ + for (order = 0; order < MAX_ORDER; order++) \ + for (type = 0; type < MIGRATE_TYPES; type++) + +extern int page_group_by_mobility_disabled; + +static inline int get_pageblock_migratetype(struct page *page) +{ + if (unlikely(page_group_by_mobility_disabled)) + return MIGRATE_UNMOVABLE; + + return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); +} + struct free_area { - struct list_head free_list; + struct list_head free_list[MIGRATE_TYPES]; unsigned long nr_free; }; @@ -222,6 +245,14 @@ struct zone { #endif struct free_area free_area[MAX_ORDER]; +#ifndef CONFIG_SPARSEMEM + /* + * Flags for a pageblock_nr_pages block. See pageblock-flags.h. + * In SPARSEMEM, this map is stored in struct mem_section + */ + unsigned long *pageblock_flags; +#endif /* CONFIG_SPARSEMEM */ + ZONE_PADDING(_pad1_) @@ -232,10 +263,7 @@ struct zone { unsigned long nr_scan_active; unsigned long nr_scan_inactive; unsigned long pages_scanned; /* since last reclaim */ - int all_unreclaimable; /* All pages pinned */ - - /* A count of how many reclaimers are scanning this zone */ - atomic_t reclaim_in_progress; + unsigned long flags; /* zone flags, see below */ /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; @@ -313,6 +341,29 @@ struct zone { const char *name; } ____cacheline_internodealigned_in_smp; +typedef enum { + ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */ + ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ +} zone_flags_t; + +static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) +{ + set_bit(flag, &zone->flags); +} +static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) +{ + clear_bit(flag, &zone->flags); +} + +static inline int zone_is_all_unreclaimable(const struct zone *zone) +{ + return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); +} +static inline int zone_is_reclaim_locked(const struct zone *zone) +{ + return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); +} + /* * The "priority" of VM scanning is how much of the queues we will scan in one * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the @@ -324,6 +375,17 @@ struct zone { #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) #ifdef CONFIG_NUMA + +/* + * The NUMA zonelists are doubled becausse we need zonelists that restrict the + * allocations to a single node for GFP_THISNODE. + * + * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback + * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE) + */ +#define MAX_ZONELISTS (2 * MAX_NR_ZONES) + + /* * We cache key information from each zonelist for smaller cache * footprint when scanning for free pages in get_page_from_freelist(). @@ -389,6 +451,7 @@ struct zonelist_cache { unsigned long last_full_zap; /* when last zap'd (jiffies) */ }; #else +#define MAX_ZONELISTS MAX_NR_ZONES struct zonelist_cache; #endif @@ -410,6 +473,24 @@ struct zonelist { #endif }; +#ifdef CONFIG_NUMA +/* + * Only custom zonelists like MPOL_BIND need to be filtered as part of + * policies. As described in the comment for struct zonelist_cache, these + * zonelists will not have a zlcache so zlcache_ptr will not be set. Use + * that to determine if the zonelists needs to be filtered or not. + */ +static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) +{ + return !zonelist->zlcache_ptr; +} +#else +static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) +{ + return 0; +} +#endif /* CONFIG_NUMA */ + #ifdef CONFIG_ARCH_POPULATES_NODE_MAP struct node_active_region { unsigned long start_pfn; @@ -437,7 +518,7 @@ extern struct page *mem_map; struct bootmem_data; typedef struct pglist_data { struct zone node_zones[MAX_NR_ZONES]; - struct zonelist node_zonelists[MAX_NR_ZONES]; + struct zonelist node_zonelists[MAX_ZONELISTS]; int nr_zones; #ifdef CONFIG_FLAT_NODE_MEM_MAP struct page *node_mem_map; @@ -690,6 +771,9 @@ extern struct zone *next_zone(struct zone *zone); #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) +#define SECTION_BLOCKFLAGS_BITS \ + ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) + #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS #error Allocator MAX_ORDER exceeds SECTION_SIZE #endif @@ -709,6 +793,9 @@ struct mem_section { * before using it wrong. */ unsigned long section_mem_map; + + /* See declaration of similar field in struct zone */ + unsigned long *pageblock_flags; }; #ifdef CONFIG_SPARSEMEM_EXTREME @@ -753,12 +840,17 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section) return (struct page *)map; } -static inline int valid_section(struct mem_section *section) +static inline int present_section(struct mem_section *section) { return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); } -static inline int section_has_mem_map(struct mem_section *section) +static inline int present_section_nr(unsigned long nr) +{ + return present_section(__nr_to_section(nr)); +} + +static inline int valid_section(struct mem_section *section) { return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); } @@ -780,6 +872,13 @@ static inline int pfn_valid(unsigned long pfn) return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); } +static inline int pfn_present(unsigned long pfn) +{ + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + return present_section(__nr_to_section(pfn_to_section_nr(pfn))); +} + /* * These are _only_ used during initialisation, therefore they * can use __initdata ... They could have names to indicate