2 * Basic general purpose allocator for managing special purpose memory
3 * not managed by the regular kmalloc/kfree interface.
4 * Uses for this includes on-device special memory, uncached memory
7 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
13 #include <linux/module.h>
14 #include <linux/genalloc.h>
18 * Create a new special memory pool.
20 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
21 * @nid: node id of the node the pool structure should be allocated on, or -1
23 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
25 struct gen_pool *pool;
27 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
29 rwlock_init(&pool->lock);
30 INIT_LIST_HEAD(&pool->chunks);
31 pool->min_alloc_order = min_alloc_order;
35 EXPORT_SYMBOL(gen_pool_create);
39 * Add a new chunk of memory to the specified pool.
41 * @pool: pool to add new memory chunk to
42 * @addr: starting address of memory chunk to add to pool
43 * @size: size in bytes of the memory chunk to add to pool
44 * @nid: node id of the node the chunk structure and bitmap should be
47 int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
50 struct gen_pool_chunk *chunk;
51 int nbits = size >> pool->min_alloc_order;
52 int nbytes = sizeof(struct gen_pool_chunk) +
53 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
55 chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
56 if (unlikely(chunk == NULL))
59 memset(chunk, 0, nbytes);
60 spin_lock_init(&chunk->lock);
61 chunk->start_addr = addr;
62 chunk->end_addr = addr + size;
64 write_lock(&pool->lock);
65 list_add(&chunk->next_chunk, &pool->chunks);
66 write_unlock(&pool->lock);
70 EXPORT_SYMBOL(gen_pool_add);
74 * Destroy a memory pool. Verifies that there are no outstanding allocations.
76 * @pool: pool to destroy
78 void gen_pool_destroy(struct gen_pool *pool)
80 struct list_head *_chunk, *_next_chunk;
81 struct gen_pool_chunk *chunk;
82 int order = pool->min_alloc_order;
86 write_lock(&pool->lock);
87 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
88 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
89 list_del(&chunk->next_chunk);
91 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
92 bit = find_next_bit(chunk->bits, end_bit, 0);
93 BUG_ON(bit < end_bit);
100 EXPORT_SYMBOL(gen_pool_destroy);
104 * Allocate the requested number of bytes from the specified pool.
105 * Uses a first-fit algorithm.
107 * @pool: pool to allocate from
108 * @size: number of bytes to allocate from the pool
110 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
112 struct list_head *_chunk;
113 struct gen_pool_chunk *chunk;
114 unsigned long addr, flags;
115 int order = pool->min_alloc_order;
116 int nbits, bit, start_bit, end_bit;
121 nbits = (size + (1UL << order) - 1) >> order;
123 read_lock(&pool->lock);
124 list_for_each(_chunk, &pool->chunks) {
125 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
127 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
128 end_bit -= nbits + 1;
130 spin_lock_irqsave(&chunk->lock, flags);
132 while (bit + 1 < end_bit) {
133 bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
139 bit = find_next_bit(chunk->bits, bit + nbits,
141 if (bit - start_bit < nbits)
145 addr = chunk->start_addr +
146 ((unsigned long)start_bit << order);
148 __set_bit(start_bit++, &chunk->bits);
149 spin_unlock_irqrestore(&chunk->lock, flags);
150 read_unlock(&pool->lock);
153 spin_unlock_irqrestore(&chunk->lock, flags);
155 read_unlock(&pool->lock);
158 EXPORT_SYMBOL(gen_pool_alloc);
162 * Free the specified memory back to the specified pool.
164 * @pool: pool to free to
165 * @addr: starting address of memory to free back to pool
166 * @size: size in bytes of memory to free
168 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
170 struct list_head *_chunk;
171 struct gen_pool_chunk *chunk;
173 int order = pool->min_alloc_order;
176 nbits = (size + (1UL << order) - 1) >> order;
178 read_lock(&pool->lock);
179 list_for_each(_chunk, &pool->chunks) {
180 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
182 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
183 BUG_ON(addr + size > chunk->end_addr);
184 spin_lock_irqsave(&chunk->lock, flags);
185 bit = (addr - chunk->start_addr) >> order;
187 __clear_bit(bit++, &chunk->bits);
188 spin_unlock_irqrestore(&chunk->lock, flags);
193 read_unlock(&pool->lock);
195 EXPORT_SYMBOL(gen_pool_free);