diff --git a/include/nuttx/mm/mempool.h b/include/nuttx/mm/mempool.h index 8fdac0358f..761d388c24 100644 --- a/include/nuttx/mm/mempool.h +++ b/include/nuttx/mm/mempool.h @@ -38,7 +38,7 @@ struct mempool_s; typedef CODE void *(*mempool_alloc_t)(FAR struct mempool_s *pool, - size_t size); + size_t alignment, size_t size); typedef CODE void (*mempool_free_t)(FAR struct mempool_s *pool, FAR void *addr); @@ -322,6 +322,36 @@ void mempool_multiple_free(FAR struct mempool_multiple_s *mpool, size_t mempool_multiple_alloc_size(FAR void *blk); +/**************************************************************************** + * Name: mempool_multiple_memalign + * + * Description: + * This function requests more than enough space from malloc, finds a + * region within that chunk that meets the alignment request. + * + * The alignment argument must be a power of two. + * + * The memalign is special to multiple mempool because multiple mempool + * doesn't support split and shrink chunk operate. So When you alloc a + * memory block and find an aligned address in this block, you need to + * occupy 8 bytes before the address to save the address of the padding + * size and pool to ensure correct use in realloc and free operations. + * So we will use bit1 in the previous address of the address to represent + * that it is applied by memalign. + * + * Input Parameters: + * mpool - The handle of multiple memory pool to be used. + * alignment - The alignment request of memory block. + * size - The size of alloc blk. + * + * Returned Value: + * The size of memory block. + * + ****************************************************************************/ + +FAR void *mempool_multiple_memalign(FAR struct mempool_multiple_s *mpool, + size_t alignment, size_t size); + /**************************************************************************** * Name: mempool_multiple_fixed_alloc * diff --git a/mm/mempool/mempool.c b/mm/mempool/mempool.c index 0870f29f94..d9892ba200 100644 --- a/mm/mempool/mempool.c +++ b/mm/mempool/mempool.c @@ -45,15 +45,19 @@ static inline void mempool_add_list(FAR struct list_node *list, } static inline FAR void *mempool_malloc(FAR struct mempool_s *pool, - size_t size) + size_t alignment, size_t size) { if (pool->alloc != NULL) { - return pool->alloc(pool, size); + return pool->alloc(pool, alignment, size); + } + else if (alignment == 0) + { + return kmm_malloc(size); } else { - return kmm_malloc(size); + return kmm_memalign(alignment, size); } } @@ -92,7 +96,6 @@ static inline void mempool_mfree(FAR struct mempool_s *pool, FAR void *addr) int mempool_init(FAR struct mempool_s *pool, FAR const char *name) { - FAR struct list_node *base; size_t ninterrupt; size_t ninitial; size_t count; @@ -109,20 +112,27 @@ int mempool_init(FAR struct mempool_s *pool, FAR const char *name) count = ninitial + ninterrupt; if (count != 0) { - base = mempool_malloc(pool, sizeof(*base) + - pool->blocksize * count); + size_t alignment = 0; + FAR char *base; + + if ((pool->blocksize & (pool->blocksize - 1)) == 0) + { + alignment = pool->blocksize; + } + + base = mempool_malloc(pool, alignment, pool->blocksize * count + + sizeof(struct list_node)); if (base == NULL) { return -ENOMEM; } - list_add_head(&pool->elist, base); - mempool_add_list(&pool->ilist, base + 1, - ninterrupt, pool->blocksize); - mempool_add_list(&pool->list, (FAR char *)(base + 1) + - ninterrupt * pool->blocksize, + mempool_add_list(&pool->ilist, base, ninterrupt, pool->blocksize); + mempool_add_list(&pool->list, base + ninterrupt * pool->blocksize, ninitial, pool->blocksize); - kasan_poison(base + 1, pool->blocksize * count); + list_add_head(&pool->elist, (FAR struct list_node *) + (base + count * pool->blocksize)); + kasan_poison(base, pool->blocksize * count); } if (pool->wait && pool->expandsize == 0) @@ -180,18 +190,25 @@ retry: if (pool->expandsize != 0) { size_t nexpand = pool->expandsize / pool->blocksize; - blk = mempool_malloc(pool, sizeof(*blk) + pool->blocksize * - nexpand); + size_t alignment = 0; + + if ((pool->blocksize & (pool->blocksize - 1)) == 0) + { + alignment = pool->blocksize; + } + + blk = mempool_malloc(pool, alignment, + pool->blocksize * nexpand + sizeof(*blk)); if (blk == NULL) { return NULL; } - kasan_poison(blk + 1, pool->blocksize * nexpand); + kasan_poison(blk, pool->blocksize * nexpand); flags = spin_lock_irqsave(&pool->lock); - list_add_head(&pool->elist, blk); - mempool_add_list(&pool->list, blk + 1, nexpand, - pool->blocksize); + mempool_add_list(&pool->list, blk, nexpand, pool->blocksize); + list_add_head(&pool->elist, (FAR struct list_node *) + ((FAR char *)blk + nexpand * pool->blocksize)); blk = list_remove_head(&pool->list); } else if (!pool->wait || @@ -324,6 +341,9 @@ int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info) int mempool_deinit(FAR struct mempool_s *pool) { FAR struct list_node *blk; + size_t ninterrupt; + size_t ninitial; + size_t count; DEBUGASSERT(pool != NULL); @@ -336,10 +356,21 @@ int mempool_deinit(FAR struct mempool_s *pool) mempool_procfs_unregister(&pool->procfs); #endif + ninitial = pool->initialsize / pool->blocksize; + ninterrupt = pool->interruptsize / pool->blocksize; + count = ninitial + ninterrupt; + if (count == 0) + { + count = pool->expandsize / pool->blocksize; + } + while ((blk = list_remove_head(&pool->elist)) != NULL) { + blk = (FAR struct list_node *)((FAR char *)blk - + count * pool->blocksize); kasan_unpoison(blk, mm_malloc_size(blk)); mempool_mfree(pool, blk); + count = pool->expandsize / pool->blocksize; } if (pool->wait && pool->expandsize == 0) diff --git a/mm/mempool/mempool_multiple.c b/mm/mempool/mempool_multiple.c index da19efbbb3..35bdc9724d 100644 --- a/mm/mempool/mempool_multiple.c +++ b/mm/mempool/mempool_multiple.c @@ -32,6 +32,7 @@ #define SIZEOF_HEAD sizeof(FAR struct mempool_s *) #define MAX(a, b) ((a) > (b) ? (a) : (b)) #define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define ALIGN_BIT (1 << 1) /**************************************************************************** * Private Functions @@ -214,7 +215,19 @@ FAR void *mempool_multiple_realloc(FAR struct mempool_multiple_s *mpool, oldpool = *(FAR struct mempool_s **) ((FAR char *)oldblk - SIZEOF_HEAD); - memcpy(blk, oldblk, MIN(oldpool->blocksize - SIZEOF_HEAD, size)); + if ((uintptr_t)oldpool & ALIGN_BIT) + { + oldpool = (FAR struct mempool_s *) + ((uintptr_t)oldpool & ~ALIGN_BIT); + size = MIN(size, oldpool->blocksize - + *(FAR size_t *)((FAR char *)oldblk - 2 * SIZEOF_HEAD)); + } + else + { + size = MIN(size, oldpool->blocksize - SIZEOF_HEAD); + } + + memcpy(blk, oldblk, size); mempool_multiple_free(mpool, oldblk); } @@ -237,12 +250,18 @@ void mempool_multiple_free(FAR struct mempool_multiple_s *mpool, FAR void *blk) { FAR struct mempool_s *pool; - FAR void *mem; + FAR char *mem; DEBUGASSERT(mpool != NULL && blk != NULL); mem = (FAR char *)blk - SIZEOF_HEAD; pool = *(FAR struct mempool_s **)mem; + if ((uintptr_t)pool & ALIGN_BIT) + { + pool = (FAR struct mempool_s *)((uintptr_t)pool & ~ALIGN_BIT); + mem = (FAR char *)blk - *(FAR size_t *)(mem - SIZEOF_HEAD); + } + mempool_free(pool, mem); } @@ -263,13 +282,83 @@ void mempool_multiple_free(FAR struct mempool_multiple_s *mpool, size_t mempool_multiple_alloc_size(FAR void *blk) { FAR struct mempool_s *pool; - FAR void *mem; + FAR char *mem; DEBUGASSERT(blk != NULL); mem = (FAR char *)blk - SIZEOF_HEAD; pool = *(FAR struct mempool_s **)mem; - return pool->blocksize; + if ((uintptr_t)pool & ALIGN_BIT) + { + pool = (FAR struct mempool_s *)((uintptr_t)pool & ~ALIGN_BIT); + return pool->blocksize - *(FAR size_t *)(mem - SIZEOF_HEAD); + } + else + { + return pool->blocksize - SIZEOF_HEAD; + } +} + +/**************************************************************************** + * Name: mempool_multiple_memalign + * + * Description: + * This function requests more than enough space from malloc, finds a + * region within that chunk that meets the alignment request. + * + * The alignment argument must be a power of two. + * + * The memalign is special to multiple mempool because multiple mempool + * doesn't support split and shrink chunk operate. So When you alloc a + * memory block and find an aligned address in this block, you need to + * occupy 8 bytes before the address to save the address of the padding + * size and pool to ensure correct use in realloc and free operations. + * So we will use bit1 in the previous address of the address to represent + * that it is applied by memalign. + * + * Input Parameters: + * mpool - The handle of multiple memory pool to be used. + * alignment - The alignment request of memory block. + * size - The size of alloc blk. + * + * Returned Value: + * The size of memory block. + * + ****************************************************************************/ + +FAR void *mempool_multiple_memalign(FAR struct mempool_multiple_s *mpool, + size_t alignment, size_t size) +{ + FAR struct mempool_s *end = mpool->pools + mpool->npools; + FAR struct mempool_s *pool; + + DEBUGASSERT((alignment & (alignment - 1)) == 0); + + pool = mempool_multiple_find(mpool, size + alignment + 2 * SIZEOF_HEAD); + if (pool == NULL) + { + return NULL; + } + + do + { + FAR char *blk = mempool_alloc(pool); + if (blk != NULL) + { + FAR char *mem; + + mem = blk + 2 * SIZEOF_HEAD; + mem = (FAR char *)(((uintptr_t)mem + alignment - 1) & + ~(alignment - 1)); + *(FAR uintptr_t *)(mem - SIZEOF_HEAD) = + (uintptr_t)pool | ALIGN_BIT; + *(FAR size_t *)(mem - 2 * SIZEOF_HEAD) = mem - blk; + return mem; + } + } + while (++pool < end); + + return NULL; } /****************************************************************************