#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
 #define MAX_RETRIES 20000
 
-static DEFINE_SPINLOCK(skb_list_lock);
-static int nr_skbs;
-static struct sk_buff *skbs;
+static struct sk_buff_head skb_pool;
 
 static DEFINE_SPINLOCK(queue_lock);
 static int queue_depth;
        struct sk_buff *skb;
        unsigned long flags;
 
-       spin_lock_irqsave(&skb_list_lock, flags);
-       while (nr_skbs < MAX_SKBS) {
+       spin_lock_irqsave(&skb_pool.lock, flags);
+       while (skb_pool.qlen < MAX_SKBS) {
                skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
                if (!skb)
                        break;
 
-               skb->next = skbs;
-               skbs = skb;
-               nr_skbs++;
+               __skb_queue_tail(&skb_pool, skb);
        }
-       spin_unlock_irqrestore(&skb_list_lock, flags);
+       spin_unlock_irqrestore(&skb_pool.lock, flags);
 }
 
 static void zap_completion_queue(void)
        put_cpu_var(softnet_data);
 }
 
-static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
+static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
 {
-       int once = 1, count = 0;
-       unsigned long flags;
-       struct sk_buff *skb = NULL;
+       int count = 0;
+       struct sk_buff *skb;
 
        zap_completion_queue();
+       refill_skbs();
 repeat:
-       if (nr_skbs < MAX_SKBS)
-               refill_skbs();
 
        skb = alloc_skb(len, GFP_ATOMIC);
-
-       if (!skb) {
-               spin_lock_irqsave(&skb_list_lock, flags);
-               skb = skbs;
-               if (skb) {
-                       skbs = skb->next;
-                       skb->next = NULL;
-                       nr_skbs--;
-               }
-               spin_unlock_irqrestore(&skb_list_lock, flags);
-       }
+       if (!skb)
+               skb = skb_dequeue(&skb_pool);
 
        if(!skb) {
-               count++;
-               if (once && (count == 1000000)) {
-                       printk("out of netpoll skbs!\n");
-                       once = 0;
+               if (++count < 10) {
+                       netpoll_poll(np);
+                       goto repeat;
                }
-               netpoll_poll(np);
-               goto repeat;
+               return NULL;
        }
 
        atomic_set(&skb->users, 1);
        return -1;
 }
 
+static int __init netpoll_init(void) {
+       skb_queue_head_init(&skb_pool);
+       return 0;
+}
+core_initcall(netpoll_init);
+
 void netpoll_cleanup(struct netpoll *np)
 {
        struct netpoll_info *npinfo;