+static LIST_HEAD(page_pool);
+static unsigned long pool_size, pool_pages, pool_low;
+static unsigned long pool_used, pool_failed;
+
+static void cpa_fill_pool(struct page **ret)
+{
+ gfp_t gfp = GFP_KERNEL;
+ unsigned long flags;
+ struct page *p;
+
+ /*
+ * Avoid recursion (on debug-pagealloc) and also signal
+ * our priority to get to these pagetables:
+ */
+ if (current->flags & PF_MEMALLOC)
+ return;
+ current->flags |= PF_MEMALLOC;
+
+ /*
+ * Allocate atomically from atomic contexts:
+ */
+ if (in_atomic() || irqs_disabled() || debug_pagealloc)
+ gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
+
+ while (pool_pages < pool_size || (ret && !*ret)) {
+ p = alloc_pages(gfp, 0);
+ if (!p) {
+ pool_failed++;
+ break;
+ }
+ /*
+ * If the call site needs a page right now, provide it:
+ */
+ if (ret && !*ret) {
+ *ret = p;
+ continue;
+ }
+ spin_lock_irqsave(&pgd_lock, flags);
+ list_add(&p->lru, &page_pool);
+ pool_pages++;
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ }
+
+ current->flags &= ~PF_MEMALLOC;
+}
+
+#define SHIFT_MB (20 - PAGE_SHIFT)
+#define ROUND_MB_GB ((1 << 10) - 1)
+#define SHIFT_MB_GB 10
+#define POOL_PAGES_PER_GB 16
+
+void __init cpa_init(void)
+{
+ struct sysinfo si;
+ unsigned long gb;
+
+ si_meminfo(&si);
+ /*
+ * Calculate the number of pool pages:
+ *
+ * Convert totalram (nr of pages) to MiB and round to the next
+ * GiB. Shift MiB to Gib and multiply the result by
+ * POOL_PAGES_PER_GB:
+ */
+ if (debug_pagealloc) {
+ gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
+ pool_size = POOL_PAGES_PER_GB * gb;
+ } else {
+ pool_size = 1;
+ }
+ pool_low = pool_size;
+
+ cpa_fill_pool(NULL);
+ printk(KERN_DEBUG
+ "CPA: page pool initialized %lu of %lu pages preallocated\n",
+ pool_pages, pool_size);
+}
+