Waldemar Kozaczuk
2018-11-20 22:44:39 UTC
This patch builds on previous commit #a58f446482edc5b4e2991c7ee9b20e42431d10c1
to further refine malloc pool allocations to correctly handle more scenarios
involving various size, alignment combinations.
This patch also greatly enhances tst-small-malloc unit test to
cover more allocation scenarios.
Signed-off-by: Waldemar Kozaczuk <***@gmail.com>
---
core/mempool.cc | 13 +++++++---
tests/tst-small-malloc.cc | 52 ++++++++++++++++++++++++++++++---------
2 files changed, 49 insertions(+), 16 deletions(-)
diff --git a/core/mempool.cc b/core/mempool.cc
index deb7a0bb..23dc67da 100644
--- a/core/mempool.cc
+++ b/core/mempool.cc
@@ -1547,14 +1547,19 @@ static inline void* std_malloc(size_t size, size_t alignment)
return libc_error_ptr<void *>(ENOMEM);
void *ret;
size_t minimum_size = std::max(size, memory::pool::min_object_size);
- if (size <= memory::pool::max_object_size && alignment <= minimum_size && smp_allocator) {
- size = minimum_size;
- unsigned n = ilog2_roundup(size);
+ if (minimum_size <= memory::pool::max_object_size && alignment <= minimum_size && smp_allocator) {
+ unsigned n = ilog2_roundup(minimum_size);
ret = memory::malloc_pools[n].alloc();
ret = translate_mem_area(mmu::mem_area::main, mmu::mem_area::mempool,
ret);
trace_memory_malloc_mempool(ret, size, 1 << n, alignment);
- } else if (size <= mmu::page_size && alignment <= mmu::page_size) {
+ } else if (alignment <= memory::pool::max_object_size && minimum_size <= alignment && smp_allocator) {
+ unsigned n = ilog2_roundup(alignment);
+ ret = memory::malloc_pools[n].alloc();
+ ret = translate_mem_area(mmu::mem_area::main, mmu::mem_area::mempool,
+ ret);
+ trace_memory_malloc_mempool(ret, size, 1 << n, alignment);
+ } else if (minimum_size <= mmu::page_size && alignment <= mmu::page_size) {
ret = mmu::translate_mem_area(mmu::mem_area::main, mmu::mem_area::page,
memory::alloc_page());
trace_memory_malloc_page(ret, size, mmu::page_size, alignment);
diff --git a/tests/tst-small-malloc.cc b/tests/tst-small-malloc.cc
index b0c97c03..7c826fa7 100644
--- a/tests/tst-small-malloc.cc
+++ b/tests/tst-small-malloc.cc
@@ -9,6 +9,18 @@
#include <cassert>
#include <osv/trace-count.hh>
+void test_malloc(size_t size) {
+ void *addr = malloc(size);
+ assert(addr);
+ free(addr);
+}
+
+void test_aligned_alloc(size_t alignment, size_t size) {
+ void *addr = aligned_alloc(alignment, size);
+ assert(addr);
+ free(addr);
+}
+
int main() {
tracepoint_counter *memory_malloc_mempool_counter = nullptr,
*memory_malloc_page_counter = nullptr;
@@ -27,19 +39,35 @@ int main() {
auto memory_malloc_mempool_counter_now = memory_malloc_mempool_counter->read();
auto memory_malloc_page_counter_now = memory_malloc_page_counter->read();
- const int allocation_count = 1024;
+ const int allocation_count = 256;
for( int i = 0; i < allocation_count; i++) {
- void *addr = malloc(7);
- assert(addr);
- free(addr);
+ // Expects malloc_pool allocations
+ test_malloc(3);
+ test_malloc(4);
+ test_malloc(6);
+ test_malloc(7);
+ test_malloc(8);
+ test_malloc(9);
+ test_malloc(15);
+ test_malloc(16);
+ test_malloc(17);
+ test_malloc(32);
+ test_malloc(1024);
- addr = malloc(17);
- assert(addr);
- free(addr);
+ // Expects malloc_pool allocations
+ test_aligned_alloc(16, 5);
+ test_aligned_alloc(16, 19);
+ test_aligned_alloc(32, 17);
+ test_aligned_alloc(1024, 255);
+
+ // Expects full page allocations
+ test_malloc(1025);
+ test_aligned_alloc(2048, 1027);
}
- // Verify all allocations above were handled by malloc_pool
- assert(memory_malloc_mempool_counter->read() - memory_malloc_mempool_counter_now >= 2 * allocation_count);
- // Verify that NO allocations were handled by alloc_page
- assert(memory_malloc_page_counter->read() - memory_malloc_page_counter_now == 0);
-}
\ No newline at end of file
+ // Verify correct number of allocations above were handled by malloc_pool
+ assert(memory_malloc_mempool_counter->read() - memory_malloc_mempool_counter_now >= 15 * allocation_count);
+
+ // Verify correct number of allocations were handled by alloc_page
+ assert(memory_malloc_page_counter->read() - memory_malloc_page_counter_now == 2 * allocation_count);
+}
to further refine malloc pool allocations to correctly handle more scenarios
involving various size, alignment combinations.
This patch also greatly enhances tst-small-malloc unit test to
cover more allocation scenarios.
Signed-off-by: Waldemar Kozaczuk <***@gmail.com>
---
core/mempool.cc | 13 +++++++---
tests/tst-small-malloc.cc | 52 ++++++++++++++++++++++++++++++---------
2 files changed, 49 insertions(+), 16 deletions(-)
diff --git a/core/mempool.cc b/core/mempool.cc
index deb7a0bb..23dc67da 100644
--- a/core/mempool.cc
+++ b/core/mempool.cc
@@ -1547,14 +1547,19 @@ static inline void* std_malloc(size_t size, size_t alignment)
return libc_error_ptr<void *>(ENOMEM);
void *ret;
size_t minimum_size = std::max(size, memory::pool::min_object_size);
- if (size <= memory::pool::max_object_size && alignment <= minimum_size && smp_allocator) {
- size = minimum_size;
- unsigned n = ilog2_roundup(size);
+ if (minimum_size <= memory::pool::max_object_size && alignment <= minimum_size && smp_allocator) {
+ unsigned n = ilog2_roundup(minimum_size);
ret = memory::malloc_pools[n].alloc();
ret = translate_mem_area(mmu::mem_area::main, mmu::mem_area::mempool,
ret);
trace_memory_malloc_mempool(ret, size, 1 << n, alignment);
- } else if (size <= mmu::page_size && alignment <= mmu::page_size) {
+ } else if (alignment <= memory::pool::max_object_size && minimum_size <= alignment && smp_allocator) {
+ unsigned n = ilog2_roundup(alignment);
+ ret = memory::malloc_pools[n].alloc();
+ ret = translate_mem_area(mmu::mem_area::main, mmu::mem_area::mempool,
+ ret);
+ trace_memory_malloc_mempool(ret, size, 1 << n, alignment);
+ } else if (minimum_size <= mmu::page_size && alignment <= mmu::page_size) {
ret = mmu::translate_mem_area(mmu::mem_area::main, mmu::mem_area::page,
memory::alloc_page());
trace_memory_malloc_page(ret, size, mmu::page_size, alignment);
diff --git a/tests/tst-small-malloc.cc b/tests/tst-small-malloc.cc
index b0c97c03..7c826fa7 100644
--- a/tests/tst-small-malloc.cc
+++ b/tests/tst-small-malloc.cc
@@ -9,6 +9,18 @@
#include <cassert>
#include <osv/trace-count.hh>
+void test_malloc(size_t size) {
+ void *addr = malloc(size);
+ assert(addr);
+ free(addr);
+}
+
+void test_aligned_alloc(size_t alignment, size_t size) {
+ void *addr = aligned_alloc(alignment, size);
+ assert(addr);
+ free(addr);
+}
+
int main() {
tracepoint_counter *memory_malloc_mempool_counter = nullptr,
*memory_malloc_page_counter = nullptr;
@@ -27,19 +39,35 @@ int main() {
auto memory_malloc_mempool_counter_now = memory_malloc_mempool_counter->read();
auto memory_malloc_page_counter_now = memory_malloc_page_counter->read();
- const int allocation_count = 1024;
+ const int allocation_count = 256;
for( int i = 0; i < allocation_count; i++) {
- void *addr = malloc(7);
- assert(addr);
- free(addr);
+ // Expects malloc_pool allocations
+ test_malloc(3);
+ test_malloc(4);
+ test_malloc(6);
+ test_malloc(7);
+ test_malloc(8);
+ test_malloc(9);
+ test_malloc(15);
+ test_malloc(16);
+ test_malloc(17);
+ test_malloc(32);
+ test_malloc(1024);
- addr = malloc(17);
- assert(addr);
- free(addr);
+ // Expects malloc_pool allocations
+ test_aligned_alloc(16, 5);
+ test_aligned_alloc(16, 19);
+ test_aligned_alloc(32, 17);
+ test_aligned_alloc(1024, 255);
+
+ // Expects full page allocations
+ test_malloc(1025);
+ test_aligned_alloc(2048, 1027);
}
- // Verify all allocations above were handled by malloc_pool
- assert(memory_malloc_mempool_counter->read() - memory_malloc_mempool_counter_now >= 2 * allocation_count);
- // Verify that NO allocations were handled by alloc_page
- assert(memory_malloc_page_counter->read() - memory_malloc_page_counter_now == 0);
-}
\ No newline at end of file
+ // Verify correct number of allocations above were handled by malloc_pool
+ assert(memory_malloc_mempool_counter->read() - memory_malloc_mempool_counter_now >= 15 * allocation_count);
+
+ // Verify correct number of allocations were handled by alloc_page
+ assert(memory_malloc_page_counter->read() - memory_malloc_page_counter_now == 2 * allocation_count);
+}
--
2.19.1
--
You received this message because you are subscribed to the Google Groups "OSv Development" group.
To unsubscribe from this group and stop receiving emails from it, send an email to osv-dev+***@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
2.19.1
--
You received this message because you are subscribed to the Google Groups "OSv Development" group.
To unsubscribe from this group and stop receiving emails from it, send an email to osv-dev+***@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.