lib: genalloc: Use 64 bit types for holding allocations

genalloc may be used to allocate physical addresses in addition
to virtual addresses. On LPAE based systems, physical addresses
are 64-bit which does not fit in an unsigned long. Change the
type used for allocation internally to be 64 bit to ensure that
genalloc can properly allocate physical addresses > 4GB.

Ideally this will just be a temporary workaround until either
a proper fix comes elsewhere or an alternative API is used to
manage physical memory.

Change-Id: Ib10b887730e0c6916de5d1e6f77e771c6cde14bb
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
This commit is contained in:
Laura Abbott 2013-04-08 14:43:14 -07:00 committed by Stephen Boyd
parent d01470a40d
commit ed1d138e55
2 changed files with 19 additions and 19 deletions

View file

@ -41,7 +41,7 @@ struct device_node;
* @nr: The number of zeroed bits we're looking for
* @data: optional additional data used by @genpool_algo_t
*/
typedef unsigned long (*genpool_algo_t)(unsigned long *map,
typedef u64 (*genpool_algo_t)(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
@ -66,14 +66,14 @@ struct gen_pool_chunk {
struct list_head next_chunk; /* next chunk in pool */
atomic_t avail;
phys_addr_t phys_addr; /* physical starting address of memory chunk */
unsigned long start_addr; /* starting address of memory chunk */
unsigned long end_addr; /* ending address of memory chunk */
u64 start_addr; /* starting address of memory chunk */
u64 end_addr; /* ending address of memory chunk */
unsigned long bits[0]; /* bitmap for allocating memory chunk */
};
extern struct gen_pool *gen_pool_create(int, int);
extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, u64);
extern int gen_pool_add_virt(struct gen_pool *, u64, phys_addr_t,
size_t, int);
/**
* gen_pool_add - add a new chunk of special memory to the pool
@ -87,13 +87,13 @@ extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
*
* Returns 0 on success or a -ve errno on failure.
*/
static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
static inline int gen_pool_add(struct gen_pool *pool, u64 addr,
size_t size, int nid)
{
return gen_pool_add_virt(pool, addr, -1, size, nid);
}
extern void gen_pool_destroy(struct gen_pool *);
extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
extern void gen_pool_free(struct gen_pool *, u64, size_t);
extern void gen_pool_for_each_chunk(struct gen_pool *,
void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
extern size_t gen_pool_avail(struct gen_pool *);
@ -102,17 +102,17 @@ extern size_t gen_pool_size(struct gen_pool *);
extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
void *data);
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
extern u64 gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data);
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
extern u64 gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data);
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
int min_alloc_order, int nid);
extern struct gen_pool *dev_get_gen_pool(struct device *dev);
unsigned long __must_check
u64 __must_check
gen_pool_alloc_aligned(struct gen_pool *pool, size_t size,
unsigned alignment_order);
@ -124,7 +124,7 @@ gen_pool_alloc_aligned(struct gen_pool *pool, size_t size,
* Allocate the requested number of bytes from the specified pool.
* Uses a first-fit algorithm.
*/
static inline unsigned long __must_check
static inline u64 __must_check
gen_pool_alloc(struct gen_pool *pool, size_t size)
{
return gen_pool_alloc_aligned(pool, size, 0);

View file

@ -175,7 +175,7 @@ EXPORT_SYMBOL(gen_pool_create);
*
* Returns 0 on success or a -ve errno on failure.
*/
int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
int gen_pool_add_virt(struct gen_pool *pool, u64 virt, phys_addr_t phys,
size_t size, int nid)
{
struct gen_pool_chunk *chunk;
@ -212,7 +212,7 @@ EXPORT_SYMBOL(gen_pool_add_virt);
*
* Returns the physical address on success, or -1 on error.
*/
phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, u64 addr)
{
struct gen_pool_chunk *chunk;
phys_addr_t paddr = -1;
@ -278,11 +278,11 @@ EXPORT_SYMBOL(gen_pool_destroy);
* Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
*/
unsigned long gen_pool_alloc_aligned(struct gen_pool *pool, size_t size,
u64 gen_pool_alloc_aligned(struct gen_pool *pool, size_t size,
unsigned alignment_order)
{
struct gen_pool_chunk *chunk;
unsigned long addr = 0, align_mask = 0;
u64 addr = 0, align_mask = 0;
int order = pool->min_alloc_order;
int nbits, start_bit = 0, remain;
@ -319,7 +319,7 @@ retry:
goto retry;
}
addr = chunk->start_addr + ((unsigned long)start_bit << order);
addr = chunk->start_addr + ((u64)start_bit << order);
size = nbits << pool->min_alloc_order;
atomic_sub(size, &chunk->avail);
break;
@ -339,7 +339,7 @@ EXPORT_SYMBOL(gen_pool_alloc_aligned);
* pool. Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
*/
void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
void gen_pool_free(struct gen_pool *pool, u64 addr, size_t size)
{
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
@ -461,7 +461,7 @@ EXPORT_SYMBOL(gen_pool_set_algo);
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
*/
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
u64 gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data)
{
return bitmap_find_next_zero_area(map, size, start, nr, 0);
@ -480,7 +480,7 @@ EXPORT_SYMBOL(gen_pool_first_fit);
* Iterate over the bitmap to find the smallest free region
* which we can allocate the memory.
*/
unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
u64 gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data)
{
unsigned long start_bit = size;