šŸŒŠ Sailfish pool (sfpool)
Secure and Fast memory pool, in 300 lines of portable, header-only C code.
Loading...
Searching...
No Matches
sfpool.h
1/* SPDX-FileCopyrightText: 2025 Dyne.org foundation
2 * SPDX-License-Identifier: GPL-3.0-or-later
3 *
4 * Copyright (C) 2025 Dyne.org foundation
5 * designed, written and maintained by Denis Roio <jaromil@dyne.org>
6 *
7 * This program is free software: you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation, either version 3 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Affero General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public
18 * License along with this program. If not, see
19 * <https://www.gnu.org/licenses/>.
20 *
21 */
22
23#ifndef __SFPOOL_H__
24#define __SFPOOL_H__
25#include <stdlib.h>
26#include <string.h>
27#include <stdio.h>
28#include <stdint.h>
29#include <stdbool.h>
30#include <assert.h>
31#ifdef __EMSCRIPTEN__
32#include <emscripten.h>
33#elif defined(_WIN32)
34#include <windows.h>
35#else // lucky shot on POSIX
36// defined(__unix__) || defined(__linux__) || defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
37#include <unistd.h> // for geteuid to switch protected memory map
38#include <sys/resource.h>
39#include <sys/mman.h>
40#endif
41
42// Configuration
43#define SECURE_ZERO // Enable secure zeroing
44#define FALLBACK // Enable fallback to system alloc
45#define PROFILING // Profile most used sizes allocated
46
47#if defined(__x86_64__) || defined(_M_X64) || defined(__ppc64__) || defined(__LP64__)
48#define ptr_t uint64_t
49#define ptr_align 8
50#define struct_align 16
51#else
52#define ptr_t uint32_t
53#define ptr_align 4
54#define struct_align 8
55#endif
56
57// Memory pool structure
58typedef struct __attribute__((aligned(struct_align))) sfpool_t {
59 uint8_t *buffer; // raw
60 uint8_t *data; // aligned
61 uint8_t *free_list;
62 uint32_t free_count;
63 uint32_t total_blocks;
64 uint32_t total_bytes;
65 uint32_t block_size;
66#ifdef PROFILING
67 uint32_t *hits;
68 uint32_t hits_total;
69 size_t hits_bytes;
70 uint32_t miss_total;
71 size_t miss_bytes;
72 size_t alloc_total;
73#endif
74} sfpool_t;
75
76
77#if !defined(__MUSL__)
78static_assert(sizeof(ptr_t) == sizeof(void*), "Unknown memory pointer size detected");
79#endif
80static inline bool _is_in_pool(sfpool_t *pool, const void *ptr) {
81 volatile ptr_t p = (ptr_t)ptr;
82 return(p >= (ptr_t)pool->data
83 && p < (ptr_t)(pool->data + pool->total_bytes));
84}
85
99void sfutil_zero(void *ptr, uint32_t size) {
100 register uint32_t *p = (uint32_t*)ptr; // use 32bit pointer
101 register uint32_t s = (size>>2); // divide counter by 4
102 while (s--) *p++ = 0x0; // hit the road jack
103}
104
113void *sfutil_memalign(const void* ptr) {
114 register ptr_t mask = ptr_align - 1;
115 ptr_t aligned = ((ptr_t)ptr + mask) & ~mask;
116 return (void*)aligned;
117}
118
127void *sfutil_secalloc(size_t size) {
128 // add bytes to every allocation to support alignment
129 void *res = NULL;
130#if defined(__EMSCRIPTEN__)
131 res = (uint8_t *)malloc(size+ptr_align);
132#elif defined(_WIN32)
133 res = VirtualAlloc(NULL, size+ptr_align,
134 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
135#elif defined(__APPLE__)
136 int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
137 res = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
138 mlock(res, size);
139#else // assume POSIX
140 int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
141 struct rlimit rl;
142 if (getrlimit(RLIMIT_MEMLOCK, &rl) == 0)
143 if(size<=rl.rlim_cur) flags |= MAP_LOCKED;
144 res = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
145#endif
146 return res;
147}
148
157void sfutil_secfree(void *ptr, size_t size) {
158#if defined(__EMSCRIPTEN__)
159 free(ptr);
160#elif defined(_WIN32)
161 VirtualFree(ptr, 0, MEM_RELEASE);
162#else // Posix
163 munmap(ptr, size +ptr_align);
164#endif
165}
166
// End of sfutil group
168
169
186size_t sfpool_init(sfpool_t *pool, size_t nmemb, size_t blocksize) {
187 if((blocksize & (blocksize - 1)) != 0) return 0;
188 // SFPool blocksize must be a power of two
189 size_t totalsize = nmemb * blocksize;
190 pool->buffer = sfutil_secalloc(totalsize);
191 if (pool->buffer == NULL) return 0;
192 // Failed to allocate pool memory
193 pool->data = sfutil_memalign(pool->buffer);
194 if (pool->data == NULL) return 0;
195 // Failed to allocate pool memory
196 pool->total_bytes = totalsize;
197 pool->total_blocks = nmemb;
198 pool->block_size = blocksize;
199 // Initialize the embedded free list
200 pool->free_list = pool->data;
201 register uint32_t i, bi;
202 for (i = 0; i < pool->total_blocks - 1; ++i) {
203 bi = i*blocksize;
204 *(uint8_t **)(pool->data + bi) =
205 pool->data + bi + blocksize;
206 }
207 pool->free_count = pool->total_blocks;
208 *(uint8_t **)
209 (pool->data + (pool->total_blocks - 1) * blocksize) = NULL;
210#ifdef PROFILING
211 pool->miss_total = pool->miss_bytes = 0;
212 pool->hits_total = pool->hits_bytes = 0;
213 pool->alloc_total = 0;
214#endif
215 return totalsize;
216}
217
218
226void sfpool_teardown(sfpool_t *restrict pool) {
227 // Free pool memory
228 sfutil_secfree(pool->buffer, pool->total_bytes);
229#ifdef PROFILING
230 pool->miss_total = pool->miss_bytes = 0;
231 pool->hits_total = pool->hits_bytes = 0;
232 pool->alloc_total = 0;
233#endif
234}
235
246void *sfpool_malloc(void *restrict opaque, const size_t size) {
247 sfpool_t *pool = (sfpool_t*)opaque;
248 void *ptr;
249 if (size <= pool->block_size
250 && pool->free_list != NULL) {
251#ifdef PROFILING
252 pool->hits_total++;
253 pool->hits_bytes+=size;
254 pool->alloc_total+=size;
255#endif
256 // Remove the first block from the free list
257 uint8_t *block = pool->free_list;
258 pool->free_list = *(uint8_t **)block;
259 pool->free_count-- ;
260 return block;
261 }
262 // Fallback to system malloc for large allocations
263 ptr = malloc(size);
264 if(ptr == NULL) perror("system malloc error");
265#ifdef PROFILING
266 pool->miss_total++;
267 pool->miss_bytes+=size;
268 pool->alloc_total+=size;
269#endif
270 return ptr;
271}
272
273
283void sfpool_free(void *restrict opaque, void *ptr) {
284 sfpool_t *pool = (sfpool_t*)opaque;
285 if (ptr == NULL) return; // Freeing NULL is a no-op
286 if (_is_in_pool(pool,ptr)) {
287 // Add the block back to the free list
288 *(uint8_t **)ptr = pool->free_list;
289 pool->free_list = (uint8_t *)ptr;
290 pool->free_count++ ;
291#ifdef SECURE_ZERO
292 // Zero out the block for security
293 sfutil_zero(ptr, pool->block_size);
294#endif
295 return;
296 } else {
297#ifdef FALLBACK
298 free(ptr);
299#endif
300 }
301}
302
303
315void *sfpool_realloc(void *restrict opaque, void *ptr, const size_t size) {
316 sfpool_t *pool = (sfpool_t*)opaque;
317 if (ptr == NULL) {
318 return sfpool_malloc(pool, size);
319 }
320 if (size == 0) {
321 sfpool_free(pool, ptr);
322 return NULL;
323 }
324 if (_is_in_pool((sfpool_t*)pool,ptr)) {
325 if (size <= pool->block_size) {
326#ifdef PROFILING
327 pool->hits_total++;
328 pool->hits_bytes+=size;
329 pool->alloc_total+=size;
330#endif
331 return ptr; // No need to reallocate
332 } else {
333 void *new_ptr = malloc(size);
334 memcpy(new_ptr, ptr, pool->block_size); // Copy only BLOCK_SIZE bytes
335#ifdef SECURE_ZERO
336 sfutil_zero(ptr, pool->block_size); // Zero out the old block
337#endif
338 // Add the block back to the free list
339 *(uint8_t **)ptr = pool->free_list;
340 pool->free_list = (uint8_t *)ptr;
341 pool->free_count++ ;
342#ifdef SECURE_ZERO
343 // Zero out the block for security
344 sfutil_zero(ptr, pool->block_size);
345#endif
346#ifdef PROFILING
347 pool->miss_total++;
348 pool->miss_bytes+=size;
349 pool->alloc_total+=size;
350#endif
351 return new_ptr;
352 }
353 } else {
354#ifdef FALLBACK
355 // Handle large allocations
356 return realloc(ptr, size);
357#ifdef PROFILING
358 pool->miss_total++;
359 pool->miss_bytes+=size;
360 pool->alloc_total+=size;
361#endif
362#else
363 return NULL;
364#endif
365 }
366}
367
377int sfpool_contains(void *restrict opaque, const void *ptr) {
378 sfpool_t *pool = (sfpool_t*)opaque;
379 int res = 0;
380 if( _is_in_pool(pool,ptr) ) res = 1;
381 return res;
382}
383
384
393void sfpool_status(sfpool_t *restrict p) {
394 fprintf(stderr,"\nšŸŒŠ sfpool: %u blocks %u B each\n",
395 p->total_blocks, p->block_size);
396#ifdef PROFILING
397 fprintf(stderr,"šŸŒŠ Total: %lu K\n",
398 p->alloc_total/1024);
399 fprintf(stderr,"šŸŒŠ Misses: %lu K (%u calls)\n",p->miss_bytes/1024,p->miss_total);
400 fprintf(stderr,"šŸŒŠ Hits: %lu K (%u calls)\n",p->hits_bytes/1024,p->hits_total);
401#endif
402}
403
// End of sfpool group
405
406#endif
size_t sfpool_init(sfpool_t *pool, size_t nmemb, size_t blocksize)
Initializes a memory pool.
Definition sfpool.h:186
int sfpool_contains(void *restrict opaque, const void *ptr)
Checks if a pointer is within the memory pool.
Definition sfpool.h:377
void sfpool_status(sfpool_t *restrict p)
Prints the status of the memory pool.
Definition sfpool.h:393
void sfpool_teardown(sfpool_t *restrict pool)
Tears down a memory pool.
Definition sfpool.h:226
void * sfpool_realloc(void *restrict opaque, void *ptr, const size_t size)
Reallocates memory from the pool.
Definition sfpool.h:315
void sfpool_free(void *restrict opaque, void *ptr)
Frees memory allocated from the pool.
Definition sfpool.h:283
void * sfpool_malloc(void *restrict opaque, const size_t size)
Allocates memory from the pool.
Definition sfpool.h:246
void sfutil_zero(void *ptr, uint32_t size)
Zeroes out a block of memory.
Definition sfpool.h:99
void sfutil_secfree(void *ptr, size_t size)
Frees memory allocated securely.
Definition sfpool.h:157
void * sfutil_secalloc(size_t size)
Allocates memory securely.
Definition sfpool.h:127
void * sfutil_memalign(const void *ptr)
Aligns a pointer to the nearest boundary.
Definition sfpool.h:113