| 125 |
theseven |
1 |
#include "../../global.h"
|
|
|
2 |
#include "../include/assert.h"
|
|
|
3 |
#include <limits.h>
|
|
|
4 |
#include <stddef.h>
|
|
|
5 |
#include "../include/stdio.h"
|
|
|
6 |
#include "../include/stdlib.h"
|
|
|
7 |
#include "../include/string.h"
|
|
|
8 |
#include "../../console.h"
|
| 568 |
theseven |
9 |
#include "../../thread.h"
|
| 125 |
theseven |
10 |
|
|
|
11 |
#include "tlsf.h"
|
|
|
12 |
#include "tlsfbits.h"
|
|
|
13 |
|
|
|
14 |
/*
|
|
|
15 |
** Constants.
|
|
|
16 |
*/
|
|
|
17 |
|
|
|
18 |
/* Public constants: may be modified. */
|
|
|
19 |
enum tlsf_public
|
|
|
20 |
{
|
|
|
21 |
/* log2 of number of linear subdivisions of block sizes. */
|
|
|
22 |
SL_INDEX_COUNT_LOG2 = 5,
|
|
|
23 |
};
|
|
|
24 |
|
|
|
25 |
/* Private constants: do not modify. */
|
|
|
26 |
enum tlsf_private
|
|
|
27 |
{
|
|
|
28 |
/* All allocation sizes and addresses are aligned to 4 bytes. */
|
|
|
29 |
ALIGN_SIZE_LOG2 = 2,
|
|
|
30 |
ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
|
|
|
31 |
|
|
|
32 |
/*
|
|
|
33 |
** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
|
|
|
34 |
** However, because we linearly subdivide the second-level lists, and
|
|
|
35 |
** our minimum size granularity is 4 bytes, it doesn't make sense to
|
|
|
36 |
** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
|
|
|
37 |
** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
|
|
|
38 |
** trying to split size ranges into more slots than we have available.
|
|
|
39 |
** Instead, we calculate the minimum threshold size, and place all
|
|
|
40 |
** blocks below that size into the 0th first-level list.
|
|
|
41 |
*/
|
|
|
42 |
FL_INDEX_MAX = 30,
|
|
|
43 |
SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
|
|
|
44 |
FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
|
|
|
45 |
FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
|
|
|
46 |
|
|
|
47 |
SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
|
|
|
48 |
};
|
|
|
49 |
|
|
|
50 |
/*
|
|
|
51 |
** Cast and min/max macros.
|
|
|
52 |
*/
|
|
|
53 |
|
|
|
54 |
#define tlsf_cast(t, exp) ((t) (exp))
|
|
|
55 |
#define tlsf_min(a, b) ((a) < (b) ? (a) : (b))
|
|
|
56 |
#define tlsf_max(a, b) ((a) > (b) ? (a) : (b))
|
|
|
57 |
|
|
|
58 |
/*
|
|
|
59 |
** Set assert macro, if it has not been provided by the user.
|
|
|
60 |
*/
|
|
|
61 |
#if !defined (tlsf_assert)
|
|
|
62 |
#define tlsf_assert assert
|
|
|
63 |
#endif
|
|
|
64 |
|
|
|
65 |
/*
|
|
|
66 |
** Static assertion mechanism.
|
|
|
67 |
*/
|
|
|
68 |
|
|
|
69 |
#define _tlsf_glue2(x, y) x ## y
|
|
|
70 |
#define _tlsf_glue(x, y) _tlsf_glue2(x, y)
|
|
|
71 |
#define tlsf_static_assert(exp) \
|
|
|
72 |
typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
|
|
|
73 |
|
|
|
74 |
/* FIXME: This code only currently supports 32-bit architectures. */
|
|
|
75 |
tlsf_static_assert(sizeof(size_t) * CHAR_BIT == 32);
|
|
|
76 |
|
|
|
77 |
/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
|
|
|
78 |
tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
|
|
|
79 |
|
|
|
80 |
/* sizeof fl_bitmap must be >= FL_INDEX_COUNT. */
|
|
|
81 |
tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= FL_INDEX_COUNT);
|
|
|
82 |
|
|
|
83 |
/* Ensure we've properly tuned our sizes. */
|
|
|
84 |
tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
|
|
|
85 |
|
|
|
86 |
/*
|
|
|
87 |
** Data structures and associated constants.
|
|
|
88 |
*/
|
|
|
89 |
|
|
|
90 |
/*
|
|
|
91 |
** Block header structure.
|
|
|
92 |
**
|
|
|
93 |
** There are several implementation subtleties involved:
|
|
|
94 |
** - The prev_phys_block field is only valid if the previous block is free.
|
|
|
95 |
** - The prev_phys_block field is actually stored in the last word of the
|
|
|
96 |
** previous block. It appears at the beginning of this structure only to
|
|
|
97 |
** simplify the implementation.
|
|
|
98 |
** - The next_free / prev_free fields are only valid if the block is free.
|
|
|
99 |
*/
|
|
|
100 |
typedef struct block_header_t
|
|
|
101 |
{
|
|
|
102 |
/* Points to the previous physical block. */
|
|
|
103 |
struct block_header_t* prev_phys_block;
|
|
|
104 |
|
|
|
105 |
/* The size of this block, excluding the block header. */
|
|
|
106 |
size_t size;
|
|
|
107 |
|
|
|
108 |
/* Next and previous free blocks. */
|
|
|
109 |
struct block_header_t* next_free;
|
|
|
110 |
struct block_header_t* prev_free;
|
|
|
111 |
} block_header_t;
|
|
|
112 |
|
|
|
113 |
/*
|
|
|
114 |
** Since block sizes are always a multiple of 4, the two least significant
|
|
|
115 |
** bits of the size field are used to store the block status:
|
|
|
116 |
** - bit 0: whether block is busy or free
|
|
|
117 |
** - bit 1: whether previous block is busy or free
|
|
|
118 |
*/
|
|
|
119 |
static const size_t block_header_free_bit = 1 << 0;
|
|
|
120 |
static const size_t block_header_prev_free_bit = 1 << 1;
|
|
|
121 |
|
|
|
122 |
/*
|
|
|
123 |
** The size of the block header exposed to used blocks is the size field.
|
|
|
124 |
** The prev_phys_block field is stored *inside* the previous free block.
|
|
|
125 |
*/
|
|
|
126 |
static const size_t block_header_overhead = sizeof(size_t);
|
|
|
127 |
|
|
|
128 |
/* User data starts directly after the size field in a used block. */
|
|
|
129 |
static const size_t block_start_offset =
|
|
|
130 |
offsetof(block_header_t, size) + sizeof(size_t);
|
|
|
131 |
|
|
|
132 |
/*
|
|
|
133 |
** A free block must be large enough to store its header minus the size of
|
|
|
134 |
** the prev_phys_block field, and no larger than the number of addressable
|
|
|
135 |
** bits for FL_INDEX.
|
|
|
136 |
*/
|
|
|
137 |
static const size_t block_size_min =
|
|
|
138 |
sizeof(block_header_t) - sizeof(block_header_t*);
|
|
|
139 |
static const size_t block_size_max = 1 << FL_INDEX_MAX;
|
|
|
140 |
|
|
|
141 |
/* Empty lists point at this block to indicate they are free. */
|
|
|
142 |
static block_header_t block_null;
|
|
|
143 |
|
|
|
144 |
/* The TLSF pool structure. */
|
|
|
145 |
typedef struct pool_t
|
|
|
146 |
{
|
|
|
147 |
/* Bitmaps for free lists. */
|
|
|
148 |
unsigned int fl_bitmap;
|
|
|
149 |
unsigned int sl_bitmap[FL_INDEX_COUNT];
|
|
|
150 |
|
|
|
151 |
/* Head of free lists. */
|
|
|
152 |
block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
|
|
|
153 |
} pool_t;
|
|
|
154 |
|
|
|
155 |
/* A type used for casting when doing pointer arithmetic. */
|
|
|
156 |
typedef unsigned int tlsfptr_t;
|
|
|
157 |
|
|
|
158 |
/*
|
|
|
159 |
** block_header_t member functions.
|
|
|
160 |
*/
|
|
|
161 |
|
|
|
162 |
static size_t block_size(const block_header_t* block) ICODE_ATTR;
|
|
|
163 |
static size_t block_size(const block_header_t* block)
|
|
|
164 |
{
|
|
|
165 |
return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
|
|
|
166 |
}
|
|
|
167 |
|
|
|
168 |
static void block_set_size(block_header_t* block, size_t size) ICODE_ATTR;
|
|
|
169 |
static void block_set_size(block_header_t* block, size_t size)
|
|
|
170 |
{
|
|
|
171 |
const size_t oldsize = block->size;
|
|
|
172 |
block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
|
|
|
173 |
}
|
|
|
174 |
|
|
|
175 |
static int block_is_last(const block_header_t* block) ICODE_ATTR;
|
|
|
176 |
static int block_is_last(const block_header_t* block)
|
|
|
177 |
{
|
|
|
178 |
return 0 == block_size(block);
|
|
|
179 |
}
|
|
|
180 |
|
|
|
181 |
static int block_is_free(const block_header_t* block) ICODE_ATTR;
|
|
|
182 |
static int block_is_free(const block_header_t* block)
|
|
|
183 |
{
|
|
|
184 |
return block->size & block_header_free_bit;
|
|
|
185 |
}
|
|
|
186 |
|
|
|
187 |
static void block_set_free(block_header_t* block) ICODE_ATTR;
|
|
|
188 |
static void block_set_free(block_header_t* block)
|
|
|
189 |
{
|
|
|
190 |
block->size |= block_header_free_bit;
|
|
|
191 |
}
|
|
|
192 |
|
|
|
193 |
static void block_set_used(block_header_t* block) ICODE_ATTR;
|
|
|
194 |
static void block_set_used(block_header_t* block)
|
|
|
195 |
{
|
|
|
196 |
block->size &= ~block_header_free_bit;
|
|
|
197 |
}
|
|
|
198 |
|
|
|
199 |
static int block_is_prev_free(const block_header_t* block) ICODE_ATTR;
|
|
|
200 |
static int block_is_prev_free(const block_header_t* block)
|
|
|
201 |
{
|
|
|
202 |
return block->size & block_header_prev_free_bit;
|
|
|
203 |
}
|
|
|
204 |
|
|
|
205 |
static void block_set_prev_free(block_header_t* block) ICODE_ATTR;
|
|
|
206 |
static void block_set_prev_free(block_header_t* block)
|
|
|
207 |
{
|
|
|
208 |
block->size |= block_header_prev_free_bit;
|
|
|
209 |
}
|
|
|
210 |
|
|
|
211 |
static void block_set_prev_used(block_header_t* block) ICODE_ATTR;
|
|
|
212 |
static void block_set_prev_used(block_header_t* block)
|
|
|
213 |
{
|
|
|
214 |
block->size &= ~block_header_prev_free_bit;
|
|
|
215 |
}
|
|
|
216 |
|
|
|
217 |
static block_header_t* block_from_ptr(const void* ptr) ICODE_ATTR;
|
|
|
218 |
static block_header_t* block_from_ptr(const void* ptr)
|
|
|
219 |
{
|
|
|
220 |
return tlsf_cast(block_header_t*,
|
|
|
221 |
tlsf_cast(unsigned char*, ptr) - block_start_offset);
|
|
|
222 |
}
|
|
|
223 |
|
|
|
224 |
static void* block_to_ptr(const block_header_t* block) ICODE_ATTR;
|
|
|
225 |
static void* block_to_ptr(const block_header_t* block)
|
|
|
226 |
{
|
|
|
227 |
return tlsf_cast(void*,
|
|
|
228 |
tlsf_cast(unsigned char*, block) + block_start_offset);
|
|
|
229 |
}
|
|
|
230 |
|
|
|
231 |
/* Return location of next block after block of given size. */
|
|
|
232 |
static block_header_t* offset_to_block(const void* ptr, size_t size) ICODE_ATTR;
|
|
|
233 |
static block_header_t* offset_to_block(const void* ptr, size_t size)
|
|
|
234 |
{
|
|
|
235 |
return tlsf_cast(block_header_t*,
|
|
|
236 |
tlsf_cast(unsigned char*, ptr) + size);
|
|
|
237 |
}
|
|
|
238 |
|
|
|
239 |
/* Return location of previous block. */
|
|
|
240 |
static block_header_t* block_prev(const block_header_t* block) ICODE_ATTR;
|
|
|
241 |
static block_header_t* block_prev(const block_header_t* block)
|
|
|
242 |
{
|
|
|
243 |
return block->prev_phys_block;
|
|
|
244 |
}
|
|
|
245 |
|
|
|
246 |
/* Return location of next existing block. */
|
|
|
247 |
static block_header_t* block_next(const block_header_t* block) ICODE_ATTR;
|
|
|
248 |
static block_header_t* block_next(const block_header_t* block)
|
|
|
249 |
{
|
|
|
250 |
block_header_t* next = offset_to_block(block_to_ptr(block),
|
|
|
251 |
block_size(block) - block_header_overhead);
|
|
|
252 |
tlsf_assert(!block_is_last(block));
|
|
|
253 |
return next;
|
|
|
254 |
}
|
|
|
255 |
|
|
|
256 |
/* Link a new block with its physical neighbor, return the neighbor. */
|
|
|
257 |
static block_header_t* block_link_next(block_header_t* block) ICODE_ATTR;
|
|
|
258 |
static block_header_t* block_link_next(block_header_t* block)
|
|
|
259 |
{
|
|
|
260 |
block_header_t* next = block_next(block);
|
|
|
261 |
next->prev_phys_block = block;
|
|
|
262 |
return next;
|
|
|
263 |
}
|
|
|
264 |
|
|
|
265 |
static void block_mark_as_free(block_header_t* block) ICODE_ATTR;
|
|
|
266 |
static void block_mark_as_free(block_header_t* block)
|
|
|
267 |
{
|
|
|
268 |
/* Link the block to the next block, first. */
|
|
|
269 |
block_header_t* next = block_link_next(block);
|
|
|
270 |
block_set_prev_free(next);
|
|
|
271 |
block_set_free(block);
|
|
|
272 |
}
|
|
|
273 |
|
|
|
274 |
static void block_mark_as_used(block_header_t* block) ICODE_ATTR;
|
|
|
275 |
static void block_mark_as_used(block_header_t* block)
|
|
|
276 |
{
|
|
|
277 |
block_header_t* next = block_next(block);
|
|
|
278 |
block_set_prev_used(next);
|
|
|
279 |
block_set_used(block);
|
|
|
280 |
}
|
|
|
281 |
|
|
|
282 |
static size_t align_up(size_t x, size_t align) ICODE_ATTR;
|
|
|
283 |
static size_t align_up(size_t x, size_t align)
|
|
|
284 |
{
|
|
|
285 |
tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
|
|
|
286 |
return (x + (align - 1)) & ~(align - 1);
|
|
|
287 |
}
|
|
|
288 |
|
|
|
289 |
static size_t align_down(size_t x, size_t align) ICODE_ATTR;
|
|
|
290 |
static size_t align_down(size_t x, size_t align)
|
|
|
291 |
{
|
|
|
292 |
tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
|
|
|
293 |
return x - (x & (align - 1));
|
|
|
294 |
}
|
|
|
295 |
|
|
|
296 |
static void* align_ptr(void* ptr, size_t align) ICODE_ATTR;
|
|
|
297 |
static void* align_ptr(void* ptr, size_t align)
|
|
|
298 |
{
|
|
|
299 |
const tlsfptr_t aligned =
|
|
|
300 |
(tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
|
|
|
301 |
tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
|
|
|
302 |
return tlsf_cast(void*, aligned);
|
|
|
303 |
}
|
|
|
304 |
|
|
|
305 |
/*
|
|
|
306 |
** Adjust an allocation size to be aligned to word size, and no smaller
|
|
|
307 |
** than internal minimum.
|
|
|
308 |
*/
|
|
|
309 |
static size_t adjust_request_size(size_t size, size_t align) ICODE_ATTR;
|
|
|
310 |
static size_t adjust_request_size(size_t size, size_t align)
|
|
|
311 |
{
|
|
|
312 |
size_t adjust = 0;
|
|
|
313 |
if (size && size < block_size_max)
|
|
|
314 |
{
|
|
|
315 |
const size_t aligned = align_up(size, align);
|
|
|
316 |
adjust = tlsf_max(aligned, block_size_min);
|
|
|
317 |
}
|
|
|
318 |
return adjust;
|
|
|
319 |
}
|
|
|
320 |
|
|
|
321 |
/*
|
|
|
322 |
** TLSF utility functions. In most cases, these are direct translations of
|
|
|
323 |
** the documentation found in the white paper.
|
|
|
324 |
*/
|
|
|
325 |
|
|
|
326 |
static void mapping_insert(size_t size, int* fli, int* sli) ICODE_ATTR;
|
|
|
327 |
static void mapping_insert(size_t size, int* fli, int* sli)
|
|
|
328 |
{
|
|
|
329 |
int fl, sl;
|
|
|
330 |
if (size < SMALL_BLOCK_SIZE)
|
|
|
331 |
{
|
|
|
332 |
/* Store small blocks in first list. */
|
|
|
333 |
fl = 0;
|
|
|
334 |
sl = size / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
|
|
|
335 |
}
|
|
|
336 |
else
|
|
|
337 |
{
|
|
|
338 |
fl = tlsf_fls(size);
|
|
|
339 |
sl = (size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
|
|
|
340 |
fl -= (FL_INDEX_SHIFT - 1);
|
|
|
341 |
}
|
|
|
342 |
*fli = fl;
|
|
|
343 |
*sli = sl;
|
|
|
344 |
}
|
|
|
345 |
|
|
|
346 |
/* This version rounds up to the next block size (for allocations) */
|
|
|
347 |
static void mapping_search(size_t size, int* fli, int* sli) ICODE_ATTR;
|
|
|
348 |
static void mapping_search(size_t size, int* fli, int* sli)
|
|
|
349 |
{
|
|
|
350 |
if (size >= (1 << SL_INDEX_COUNT_LOG2))
|
|
|
351 |
{
|
|
|
352 |
const size_t round = (1 << (tlsf_fls(size) - SL_INDEX_COUNT_LOG2)) - 1;
|
|
|
353 |
size += round;
|
|
|
354 |
}
|
|
|
355 |
mapping_insert(size, fli, sli);
|
|
|
356 |
}
|
|
|
357 |
|
|
|
358 |
static block_header_t* search_suitable_block(pool_t* pool, int* fli, int* sli) ICODE_ATTR;
|
|
|
359 |
static block_header_t* search_suitable_block(pool_t* pool, int* fli, int* sli)
|
|
|
360 |
{
|
|
|
361 |
int fl = *fli;
|
|
|
362 |
int sl = *sli;
|
|
|
363 |
|
|
|
364 |
/*
|
|
|
365 |
** First, search for a block in the list associated with the given
|
|
|
366 |
** fl/sl index.
|
|
|
367 |
*/
|
|
|
368 |
unsigned int sl_map = pool->sl_bitmap[fl] & (0xffffffff << sl);
|
|
|
369 |
if (!sl_map)
|
|
|
370 |
{
|
|
|
371 |
/* No block exists. Search in the next largest first-level list. */
|
|
|
372 |
const unsigned int fl_map = pool->fl_bitmap & (0xffffffff << (fl + 1));
|
|
|
373 |
if (!fl_map)
|
|
|
374 |
{
|
|
|
375 |
/* No free blocks available, memory has been exhausted. */
|
|
|
376 |
return 0;
|
|
|
377 |
}
|
|
|
378 |
|
|
|
379 |
fl = tlsf_ffs(fl_map);
|
|
|
380 |
*fli = fl;
|
|
|
381 |
sl_map = pool->sl_bitmap[fl];
|
|
|
382 |
}
|
|
|
383 |
tlsf_assert(sl_map && "internal error - second level bitmap is null");
|
|
|
384 |
sl = tlsf_ffs(sl_map);
|
|
|
385 |
*sli = sl;
|
|
|
386 |
|
|
|
387 |
/* Return the first block in the free list. */
|
|
|
388 |
return pool->blocks[fl][sl];
|
|
|
389 |
}
|
|
|
390 |
|
|
|
391 |
/* Remove a free block from the free list.*/
|
|
|
392 |
static void remove_free_block(pool_t* pool, block_header_t* block, int fl, int sl) ICODE_ATTR;
|
|
|
393 |
static void remove_free_block(pool_t* pool, block_header_t* block, int fl, int sl)
|
|
|
394 |
{
|
|
|
395 |
block_header_t* prev = block->prev_free;
|
|
|
396 |
block_header_t* next = block->next_free;
|
|
|
397 |
tlsf_assert(prev && "prev_free field can not be null");
|
|
|
398 |
tlsf_assert(next && "next_free field can not be null");
|
|
|
399 |
next->prev_free = prev;
|
|
|
400 |
prev->next_free = next;
|
|
|
401 |
|
|
|
402 |
/* If this block is the head of the free list, set new head. */
|
|
|
403 |
if (pool->blocks[fl][sl] == block)
|
|
|
404 |
{
|
|
|
405 |
pool->blocks[fl][sl] = next;
|
|
|
406 |
|
|
|
407 |
/* If the new head is null, clear the bitmap. */
|
|
|
408 |
if (next == &block_null)
|
|
|
409 |
{
|
|
|
410 |
pool->sl_bitmap[fl] &= ~(1 << sl);
|
|
|
411 |
|
|
|
412 |
/* If the second bitmap is now empty, clear the fl bitmap. */
|
|
|
413 |
if (!pool->sl_bitmap[fl])
|
|
|
414 |
{
|
|
|
415 |
pool->fl_bitmap &= ~(1 << fl);
|
|
|
416 |
}
|
|
|
417 |
}
|
|
|
418 |
}
|
|
|
419 |
}
|
|
|
420 |
|
|
|
421 |
/* Insert a free block into the free block list. */
|
|
|
422 |
static void insert_free_block(pool_t* pool, block_header_t* block, int fl, int sl) ICODE_ATTR;
|
|
|
423 |
static void insert_free_block(pool_t* pool, block_header_t* block, int fl, int sl)
|
|
|
424 |
{
|
|
|
425 |
block_header_t* current = pool->blocks[fl][sl];
|
|
|
426 |
tlsf_assert(current && "free list cannot have a null entry");
|
|
|
427 |
tlsf_assert(block && "cannot insert a null entry into the free list");
|
|
|
428 |
block->next_free = current;
|
|
|
429 |
block->prev_free = &block_null;
|
|
|
430 |
current->prev_free = block;
|
|
|
431 |
|
|
|
432 |
/*
|
|
|
433 |
** Insert the new block at the head of the list, and mark the first-
|
|
|
434 |
** and second-level bitmaps appropriately.
|
|
|
435 |
*/
|
|
|
436 |
pool->blocks[fl][sl] = block;
|
|
|
437 |
pool->fl_bitmap |= (1 << fl);
|
|
|
438 |
pool->sl_bitmap[fl] |= (1 << sl);
|
|
|
439 |
}
|
|
|
440 |
|
|
|
441 |
/* Remove a given block from the free list. */
|
|
|
442 |
static void block_remove(pool_t* pool, block_header_t* block) ICODE_ATTR;
|
|
|
443 |
static void block_remove(pool_t* pool, block_header_t* block)
|
|
|
444 |
{
|
|
|
445 |
int fl, sl;
|
|
|
446 |
mapping_insert(block_size(block), &fl, &sl);
|
|
|
447 |
remove_free_block(pool, block, fl, sl);
|
|
|
448 |
}
|
|
|
449 |
|
|
|
450 |
/* Insert a given block into the free list. */
|
|
|
451 |
static void block_insert(pool_t* pool, block_header_t* block) ICODE_ATTR;
|
|
|
452 |
static void block_insert(pool_t* pool, block_header_t* block)
|
|
|
453 |
{
|
|
|
454 |
int fl, sl;
|
|
|
455 |
mapping_insert(block_size(block), &fl, &sl);
|
|
|
456 |
insert_free_block(pool, block, fl, sl);
|
|
|
457 |
}
|
|
|
458 |
|
|
|
459 |
static int block_can_split(block_header_t* block, size_t size) ICODE_ATTR;
|
|
|
460 |
static int block_can_split(block_header_t* block, size_t size)
|
|
|
461 |
{
|
|
|
462 |
return block_size(block) >= sizeof(block_header_t) + size;
|
|
|
463 |
}
|
|
|
464 |
|
|
|
465 |
/* Split a block into two, the second of which is free. */
|
|
|
466 |
static block_header_t* block_split(block_header_t* block, size_t size) ICODE_ATTR;
|
|
|
467 |
static block_header_t* block_split(block_header_t* block, size_t size)
|
|
|
468 |
{
|
|
|
469 |
/* Calculate the amount of space left in the remaining block. */
|
|
|
470 |
block_header_t* remaining =
|
|
|
471 |
offset_to_block(block_to_ptr(block), size - block_header_overhead);
|
|
|
472 |
|
|
|
473 |
const size_t remain_size = block_size(block) - (size + block_header_overhead);
|
|
|
474 |
tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
|
|
|
475 |
block_set_size(remaining, remain_size);
|
|
|
476 |
tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
|
|
|
477 |
|
|
|
478 |
block_set_size(block, size);
|
|
|
479 |
block_mark_as_free(remaining);
|
|
|
480 |
|
|
|
481 |
return remaining;
|
|
|
482 |
}
|
|
|
483 |
|
|
|
484 |
/* Absorb a free block's storage into an adjacent previous free block. */
|
|
|
485 |
static block_header_t* block_absorb(block_header_t* prev, block_header_t* block) ICODE_ATTR;
|
|
|
486 |
static block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
|
|
|
487 |
{
|
|
|
488 |
tlsf_assert(!block_is_last(prev) && "previous block can't be last!");
|
|
|
489 |
/* Note: Leaves flags untouched. */
|
|
|
490 |
prev->size += block_size(block) + block_header_overhead;
|
|
|
491 |
block_link_next(prev);
|
|
|
492 |
return prev;
|
|
|
493 |
}
|
|
|
494 |
|
|
|
495 |
/* Merge a just-freed block with an adjacent previous free block. */
|
|
|
496 |
static block_header_t* block_merge_prev(pool_t* pool, block_header_t* block) ICODE_ATTR;
|
|
|
497 |
static block_header_t* block_merge_prev(pool_t* pool, block_header_t* block)
|
|
|
498 |
{
|
|
|
499 |
if (block_is_prev_free(block))
|
|
|
500 |
{
|
|
|
501 |
block_header_t* prev = block_prev(block);
|
|
|
502 |
tlsf_assert(prev && "prev physical block can't be null");
|
|
|
503 |
tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
|
|
|
504 |
block_remove(pool, prev);
|
|
|
505 |
block = block_absorb(prev, block);
|
|
|
506 |
}
|
|
|
507 |
|
|
|
508 |
return block;
|
|
|
509 |
}
|
|
|
510 |
|
|
|
511 |
/* Merge a just-freed block with an adjacent free block. */
|
|
|
512 |
static block_header_t* block_merge_next(pool_t* pool, block_header_t* block) ICODE_ATTR;
|
|
|
513 |
static block_header_t* block_merge_next(pool_t* pool, block_header_t* block)
|
|
|
514 |
{
|
|
|
515 |
block_header_t* next = block_next(block);
|
|
|
516 |
tlsf_assert(next && "next physical block can't be null");
|
|
|
517 |
|
|
|
518 |
if (block_is_free(next))
|
|
|
519 |
{
|
|
|
520 |
tlsf_assert(!block_is_last(block) && "previous block can't be last!");
|
|
|
521 |
block_remove(pool, next);
|
|
|
522 |
block = block_absorb(block, next);
|
|
|
523 |
}
|
|
|
524 |
|
|
|
525 |
return block;
|
|
|
526 |
}
|
|
|
527 |
|
|
|
528 |
/* Trim any trailing block space off the end of a block, return to pool. */
|
|
|
529 |
static void block_trim_free(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
|
|
|
530 |
static void block_trim_free(pool_t* pool, block_header_t* block, size_t size)
|
|
|
531 |
{
|
|
|
532 |
tlsf_assert(block_is_free(block) && "block must be free");
|
|
|
533 |
if (block_can_split(block, size))
|
|
|
534 |
{
|
|
|
535 |
block_header_t* remaining_block = block_split(block, size);
|
|
|
536 |
block_link_next(block);
|
|
|
537 |
block_set_prev_free(remaining_block);
|
|
|
538 |
block_insert(pool, remaining_block);
|
|
|
539 |
}
|
|
|
540 |
}
|
|
|
541 |
|
|
|
542 |
/* Trim any trailing block space off the end of a used block, return to pool. */
|
|
|
543 |
static void block_trim_used(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
|
|
|
544 |
static void block_trim_used(pool_t* pool, block_header_t* block, size_t size)
|
|
|
545 |
{
|
|
|
546 |
tlsf_assert(!block_is_free(block) && "block must be used");
|
|
|
547 |
if (block_can_split(block, size))
|
|
|
548 |
{
|
|
|
549 |
/* If the next block is free, we must coalesce. */
|
|
|
550 |
block_header_t* remaining_block = block_split(block, size);
|
|
|
551 |
block_set_prev_used(remaining_block);
|
|
|
552 |
|
|
|
553 |
remaining_block = block_merge_next(pool, remaining_block);
|
|
|
554 |
block_insert(pool, remaining_block);
|
|
|
555 |
}
|
|
|
556 |
}
|
|
|
557 |
|
|
|
558 |
static block_header_t* block_trim_free_leading(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
|
|
|
559 |
static block_header_t* block_trim_free_leading(pool_t* pool, block_header_t* block, size_t size)
|
|
|
560 |
{
|
|
|
561 |
block_header_t* remaining_block = block;
|
|
|
562 |
if (block_can_split(block, size))
|
|
|
563 |
{
|
|
|
564 |
/* We want the 2nd block. */
|
|
|
565 |
remaining_block = block_split(block, size - block_header_overhead);
|
|
|
566 |
block_set_prev_free(remaining_block);
|
|
|
567 |
|
|
|
568 |
block_link_next(block);
|
|
|
569 |
block_insert(pool, block);
|
|
|
570 |
}
|
|
|
571 |
|
|
|
572 |
return remaining_block;
|
|
|
573 |
}
|
|
|
574 |
|
|
|
575 |
static block_header_t* block_locate_free(pool_t* pool, size_t size) ICODE_ATTR;
|
|
|
576 |
static block_header_t* block_locate_free(pool_t* pool, size_t size)
|
|
|
577 |
{
|
|
|
578 |
int fl, sl;
|
|
|
579 |
block_header_t* block = 0;
|
|
|
580 |
|
|
|
581 |
if (size)
|
|
|
582 |
{
|
|
|
583 |
mapping_search(size, &fl, &sl);
|
|
|
584 |
block = search_suitable_block(pool, &fl, &sl);
|
|
|
585 |
}
|
|
|
586 |
|
|
|
587 |
if (block)
|
|
|
588 |
{
|
|
|
589 |
tlsf_assert(block_size(block) >= size);
|
|
|
590 |
remove_free_block(pool, block, fl, sl);
|
|
|
591 |
}
|
|
|
592 |
|
|
|
593 |
return block;
|
|
|
594 |
}
|
|
|
595 |
|
|
|
596 |
static void* block_prepare_used(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
|
|
|
597 |
static void* block_prepare_used(pool_t* pool, block_header_t* block, size_t size)
|
|
|
598 |
{
|
|
|
599 |
void* p = 0;
|
|
|
600 |
if (block)
|
|
|
601 |
{
|
|
|
602 |
block_trim_free(pool, block, size);
|
|
|
603 |
block_mark_as_used(block);
|
|
|
604 |
p = block_to_ptr(block);
|
|
|
605 |
}
|
|
|
606 |
return p;
|
|
|
607 |
}
|
|
|
608 |
|
|
|
609 |
/* Clear structure and point all empty lists at the null block. */
|
|
|
610 |
static void pool_construct(pool_t* pool) ICODE_ATTR;
|
|
|
611 |
static void pool_construct(pool_t* pool)
|
|
|
612 |
{
|
|
|
613 |
int i, j;
|
|
|
614 |
|
|
|
615 |
block_null.next_free = &block_null;
|
|
|
616 |
block_null.prev_free = &block_null;
|
|
|
617 |
|
|
|
618 |
pool->fl_bitmap = 0;
|
|
|
619 |
for (i = 0; i < FL_INDEX_COUNT; ++i)
|
|
|
620 |
{
|
|
|
621 |
pool->sl_bitmap[i] = 0;
|
|
|
622 |
for (j = 0; j < SL_INDEX_COUNT; ++j)
|
|
|
623 |
{
|
|
|
624 |
pool->blocks[i][j] = &block_null;
|
|
|
625 |
}
|
|
|
626 |
}
|
|
|
627 |
}
|
|
|
628 |
|
|
|
629 |
/*
|
|
|
630 |
** Debugging utilities.
|
|
|
631 |
*/
|
|
|
632 |
|
|
|
633 |
typedef struct integrity_t
|
|
|
634 |
{
|
|
|
635 |
int prev_status;
|
|
|
636 |
int status;
|
|
|
637 |
} integrity_t;
|
|
|
638 |
|
|
|
639 |
#define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
|
|
|
640 |
|
|
|
641 |
static void integrity_walker(void* ptr, size_t size, int used, void* user)
|
|
|
642 |
{
|
|
|
643 |
block_header_t* block = block_from_ptr(ptr);
|
|
|
644 |
integrity_t* integ = tlsf_cast(integrity_t*, user);
|
|
|
645 |
const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
|
|
|
646 |
const int this_status = block_is_free(block) ? 1 : 0;
|
|
|
647 |
const size_t this_block_size = block_size(block);
|
|
|
648 |
|
|
|
649 |
int status = 0;
|
|
|
650 |
tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
|
|
|
651 |
tlsf_insist(size == this_block_size && "block size incorrect");
|
|
|
652 |
|
|
|
653 |
integ->prev_status = this_status;
|
|
|
654 |
integ->status += status;
|
|
|
655 |
}
|
|
|
656 |
|
|
|
657 |
int tlsf_check_heap(tlsf_pool tlsf)
|
|
|
658 |
{
|
|
|
659 |
int i, j;
|
|
|
660 |
|
|
|
661 |
pool_t* pool = tlsf_cast(pool_t*, tlsf);
|
|
|
662 |
int status = 0;
|
|
|
663 |
|
|
|
664 |
/* Check that the blocks are physically correct. */
|
|
|
665 |
integrity_t integ = { 0, 0 };
|
|
|
666 |
tlsf_walk_heap(tlsf, integrity_walker, &integ);
|
|
|
667 |
status = integ.status;
|
|
|
668 |
|
|
|
669 |
/* Check that the free lists and bitmaps are accurate. */
|
|
|
670 |
for (i = 0; i < FL_INDEX_COUNT; ++i)
|
|
|
671 |
{
|
|
|
672 |
for (j = 0; j < SL_INDEX_COUNT; ++j)
|
|
|
673 |
{
|
|
|
674 |
const int fl_map = pool->fl_bitmap & (1 << i);
|
|
|
675 |
const int sl_list = pool->sl_bitmap[i];
|
|
|
676 |
const int sl_map = sl_list & (1 << j);
|
|
|
677 |
const block_header_t* block = pool->blocks[i][j];
|
|
|
678 |
|
|
|
679 |
/* Check that first- and second-level lists agree. */
|
|
|
680 |
if (!fl_map)
|
|
|
681 |
{
|
|
|
682 |
tlsf_insist(!sl_map && "second-level map must be null");
|
|
|
683 |
}
|
|
|
684 |
|
|
|
685 |
if (!sl_map)
|
|
|
686 |
{
|
|
|
687 |
tlsf_insist((block == &block_null) && "block list must be null");
|
|
|
688 |
continue;
|
|
|
689 |
}
|
|
|
690 |
|
|
|
691 |
/* Check that there is at least one free block. */
|
|
|
692 |
tlsf_insist(sl_list && "no free blocks in second-level map");
|
|
|
693 |
tlsf_insist((block != &block_null) && "block should not be null");
|
|
|
694 |
|
|
|
695 |
while (block != &block_null)
|
|
|
696 |
{
|
|
|
697 |
int fli, sli;
|
|
|
698 |
tlsf_insist(block_is_free(block) && "block should be free");
|
|
|
699 |
tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
|
|
|
700 |
tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
|
|
|
701 |
tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
|
|
|
702 |
tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
|
|
|
703 |
|
|
|
704 |
mapping_insert(block_size(block), &fli, &sli);
|
|
|
705 |
tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
|
|
|
706 |
block = block->next_free;
|
|
|
707 |
}
|
|
|
708 |
}
|
|
|
709 |
}
|
|
|
710 |
|
|
|
711 |
return status;
|
|
|
712 |
}
|
|
|
713 |
|
|
|
714 |
#undef tlsf_insist
|
|
|
715 |
|
|
|
716 |
static void default_walker(void* ptr, size_t size, int used, void* user)
|
|
|
717 |
{
|
| 568 |
theseven |
718 |
if (used)
|
|
|
719 |
{
|
|
|
720 |
struct scheduler_thread* owner = *((struct scheduler_thread**)(ptr + size - 4));
|
|
|
721 |
cprintf((int)user, "%08X: %08X+8 bytes owned by %08X%s\n", ptr,
|
|
|
722 |
size - 4, owner, owner == current_thread ? " (self)" : "");
|
|
|
723 |
}
|
| 565 |
theseven |
724 |
else cprintf((int)user, "%08X: %08X bytes free\n", ptr, size + 4);
|
| 125 |
theseven |
725 |
}
|
|
|
726 |
|
|
|
727 |
void tlsf_walk_heap(tlsf_pool pool, tlsf_walker walker, void* user)
|
|
|
728 |
{
|
|
|
729 |
tlsf_walker heap_walker = walker ? walker : default_walker;
|
|
|
730 |
block_header_t* block =
|
|
|
731 |
offset_to_block(pool, sizeof(pool_t) - block_header_overhead);
|
|
|
732 |
|
|
|
733 |
while (block && !block_is_last(block))
|
|
|
734 |
{
|
|
|
735 |
heap_walker(
|
|
|
736 |
block_to_ptr(block),
|
|
|
737 |
block_size(block),
|
|
|
738 |
!block_is_free(block),
|
|
|
739 |
user);
|
|
|
740 |
block = block_next(block);
|
|
|
741 |
}
|
|
|
742 |
}
|
|
|
743 |
|
|
|
744 |
size_t tlsf_block_size(void* ptr)
|
|
|
745 |
{
|
|
|
746 |
size_t size = 0;
|
|
|
747 |
if (ptr)
|
|
|
748 |
{
|
|
|
749 |
const block_header_t* block = block_from_ptr(ptr);
|
|
|
750 |
size = block_size(block);
|
|
|
751 |
}
|
|
|
752 |
return size;
|
|
|
753 |
}
|
|
|
754 |
|
|
|
755 |
/*
|
|
|
756 |
** Overhead of the TLSF structures in a given memory block passed to
|
|
|
757 |
** tlsf_create, equal to the size of a pool_t plus overhead of the initial
|
|
|
758 |
** free block and the sentinel block.
|
|
|
759 |
*/
|
|
|
760 |
size_t tlsf_overhead()
|
|
|
761 |
{
|
|
|
762 |
const size_t pool_overhead = sizeof(pool_t) + 2 * block_header_overhead;
|
|
|
763 |
return pool_overhead;
|
|
|
764 |
}
|
|
|
765 |
|
|
|
766 |
/*
|
|
|
767 |
** TLSF main interface. Right out of the white paper.
|
|
|
768 |
*/
|
|
|
769 |
|
|
|
770 |
tlsf_pool tlsf_create(void* mem, size_t bytes)
|
|
|
771 |
{
|
|
|
772 |
block_header_t* block;
|
|
|
773 |
block_header_t* next;
|
|
|
774 |
|
|
|
775 |
const size_t pool_overhead = tlsf_overhead();
|
|
|
776 |
const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
|
|
|
777 |
pool_t* pool = tlsf_cast(pool_t*, mem);
|
|
|
778 |
|
|
|
779 |
if (pool_bytes < block_size_min || pool_bytes > block_size_max)
|
|
|
780 |
{
|
|
|
781 |
cprintf(CONSOLE_BOOT, "tlsf_create: Pool size must be between %d and %d bytes.\n",
|
|
|
782 |
pool_overhead + block_size_min, pool_overhead + block_size_max);
|
|
|
783 |
return 0;
|
|
|
784 |
}
|
|
|
785 |
|
|
|
786 |
/* Construct a valid pool object. */
|
|
|
787 |
pool_construct(pool);
|
|
|
788 |
|
|
|
789 |
/*
|
|
|
790 |
** Create the main free block. Offset the start of the block slightly
|
|
|
791 |
** so that the prev_phys_block field falls inside of the pool
|
|
|
792 |
** structure - it will never be used.
|
|
|
793 |
*/
|
|
|
794 |
block = offset_to_block(
|
|
|
795 |
tlsf_cast(void*, pool), sizeof(pool_t) - block_header_overhead);
|
|
|
796 |
block_set_size(block, align_down(pool_bytes, ALIGN_SIZE));
|
|
|
797 |
block_set_free(block);
|
|
|
798 |
block_set_prev_used(block);
|
|
|
799 |
block_insert(pool, block);
|
|
|
800 |
|
|
|
801 |
/* Split the block to create a zero-size pool sentinel block. */
|
|
|
802 |
next = block_link_next(block);
|
|
|
803 |
block_set_size(next, 0);
|
|
|
804 |
block_set_used(next);
|
|
|
805 |
block_set_prev_free(next);
|
|
|
806 |
|
|
|
807 |
return tlsf_cast(tlsf_pool, pool);
|
|
|
808 |
}
|
|
|
809 |
|
|
|
810 |
void tlsf_destroy(tlsf_pool pool)
|
|
|
811 |
{
|
|
|
812 |
/* Nothing to do. */
|
|
|
813 |
pool = pool;
|
|
|
814 |
}
|
|
|
815 |
|
|
|
816 |
void* tlsf_malloc(tlsf_pool tlsf, size_t size)
|
|
|
817 |
{
|
|
|
818 |
pool_t* pool = tlsf_cast(pool_t*, tlsf);
|
|
|
819 |
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
|
|
|
820 |
block_header_t* block = block_locate_free(pool, adjust);
|
|
|
821 |
return block_prepare_used(pool, block, adjust);
|
|
|
822 |
}
|
|
|
823 |
|
|
|
824 |
void* tlsf_memalign(tlsf_pool tlsf, size_t align, size_t size)
|
|
|
825 |
{
|
|
|
826 |
pool_t* pool = tlsf_cast(pool_t*, tlsf);
|
|
|
827 |
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
|
|
|
828 |
|
|
|
829 |
/*
|
|
|
830 |
** We must allocate an additional minimum block size bytes so that if
|
|
|
831 |
** our free block will leave an alignment gap which is smaller, we can
|
|
|
832 |
** trim a leading free block and release it back to the heap. We must
|
|
|
833 |
** do this because the previous physical block is in use, therefore
|
|
|
834 |
** the prev_phys_block field is not valid, and we can't simply adjust
|
|
|
835 |
** the size of that block.
|
|
|
836 |
*/
|
|
|
837 |
const ptrdiff_t gap_minimum = tlsf_cast(ptrdiff_t, sizeof(block_header_t));
|
|
|
838 |
const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align);
|
|
|
839 |
|
|
|
840 |
/* If alignment is less than or equals base alignment, we're done. */
|
|
|
841 |
const size_t aligned_size = (align <= ALIGN_SIZE) ? adjust : size_with_gap;
|
|
|
842 |
|
|
|
843 |
block_header_t* block = block_locate_free(pool, aligned_size);
|
|
|
844 |
|
|
|
845 |
/* This can't be a static assert. */
|
|
|
846 |
tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
|
|
|
847 |
|
|
|
848 |
if (block)
|
|
|
849 |
{
|
|
|
850 |
void* ptr = block_to_ptr(block);
|
|
|
851 |
void* aligned = align_ptr(ptr, align);
|
|
|
852 |
ptrdiff_t gap = tlsf_cast(ptrdiff_t,
|
|
|
853 |
tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
|
|
|
854 |
|
|
|
855 |
/* If gap size is too small, offset to next aligned boundary. */
|
|
|
856 |
if (gap && gap < gap_minimum)
|
|
|
857 |
{
|
|
|
858 |
const ptrdiff_t gap_remain = gap_minimum - gap;
|
|
|
859 |
const ptrdiff_t offset = tlsf_max(gap_remain, tlsf_cast(ptrdiff_t, align));
|
|
|
860 |
void* next_aligned = tlsf_cast(void*,
|
|
|
861 |
tlsf_cast(tlsfptr_t, aligned) + tlsf_cast(tlsfptr_t, offset));
|
|
|
862 |
aligned = align_ptr(next_aligned, align);
|
|
|
863 |
gap = tlsf_cast(ptrdiff_t,
|
|
|
864 |
tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
|
|
|
865 |
}
|
|
|
866 |
|
|
|
867 |
if (gap)
|
|
|
868 |
{
|
|
|
869 |
tlsf_assert(gap >= gap_minimum && "gap size too small");
|
|
|
870 |
block = block_trim_free_leading(pool, block, gap);
|
|
|
871 |
}
|
|
|
872 |
}
|
|
|
873 |
|
|
|
874 |
return block_prepare_used(pool, block, adjust);
|
|
|
875 |
}
|
|
|
876 |
|
|
|
877 |
void tlsf_free(tlsf_pool tlsf, void* ptr)
|
|
|
878 |
{
|
|
|
879 |
/* Don't attempt to free a NULL pointer. */
|
|
|
880 |
if (ptr)
|
|
|
881 |
{
|
|
|
882 |
pool_t* pool = tlsf_cast(pool_t*, tlsf);
|
|
|
883 |
block_header_t* block = block_from_ptr(ptr);
|
|
|
884 |
block_mark_as_free(block);
|
|
|
885 |
block = block_merge_prev(pool, block);
|
|
|
886 |
block = block_merge_next(pool, block);
|
|
|
887 |
block_insert(pool, block);
|
|
|
888 |
}
|
|
|
889 |
}
|
|
|
890 |
|
|
|
891 |
/*
|
|
|
892 |
** The TLSF block information provides us with enough information to
|
|
|
893 |
** provide a reasonably intelligent implementation of realloc, growing or
|
|
|
894 |
** shrinking the currently allocated block as required.
|
|
|
895 |
**
|
|
|
896 |
** This routine handles the somewhat esoteric edge cases of realloc:
|
|
|
897 |
** - a non-zero size with a null pointer will behave like malloc
|
|
|
898 |
** - a zero size with a non-null pointer will behave like free
|
|
|
899 |
** - a request that cannot be satisfied will leave the original buffer
|
|
|
900 |
** untouched
|
|
|
901 |
** - an extended buffer size will leave the newly-allocated area with
|
|
|
902 |
** contents undefined
|
|
|
903 |
*/
|
|
|
904 |
void* tlsf_realloc(tlsf_pool tlsf, void* ptr, size_t size)
|
|
|
905 |
{
|
|
|
906 |
pool_t* pool = tlsf_cast(pool_t*, tlsf);
|
|
|
907 |
void* p = 0;
|
|
|
908 |
|
|
|
909 |
/* Zero-size requests are treated as free. */
|
|
|
910 |
if (ptr && size == 0)
|
|
|
911 |
{
|
|
|
912 |
tlsf_free(tlsf, ptr);
|
|
|
913 |
}
|
|
|
914 |
/* Requests with NULL pointers are treated as malloc. */
|
|
|
915 |
else if (!ptr)
|
|
|
916 |
{
|
|
|
917 |
p = tlsf_malloc(tlsf, size);
|
|
|
918 |
}
|
|
|
919 |
else
|
|
|
920 |
{
|
|
|
921 |
block_header_t* block = block_from_ptr(ptr);
|
|
|
922 |
block_header_t* next = block_next(block);
|
|
|
923 |
|
|
|
924 |
const size_t cursize = block_size(block);
|
|
|
925 |
const size_t combined = cursize + block_size(next) + block_header_overhead;
|
|
|
926 |
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
|
|
|
927 |
|
|
|
928 |
/*
|
|
|
929 |
** If the next block is used, or when combined with the current
|
|
|
930 |
** block, does not offer enough space, we must reallocate and copy.
|
|
|
931 |
*/
|
|
|
932 |
if (adjust > cursize && (!block_is_free(next) || adjust > combined))
|
|
|
933 |
{
|
|
|
934 |
p = tlsf_malloc(tlsf, size);
|
|
|
935 |
if (p)
|
|
|
936 |
{
|
|
|
937 |
const size_t minsize = tlsf_min(cursize, size);
|
|
|
938 |
memcpy(p, ptr, minsize);
|
|
|
939 |
tlsf_free(tlsf, ptr);
|
|
|
940 |
}
|
|
|
941 |
}
|
|
|
942 |
else
|
|
|
943 |
{
|
|
|
944 |
/* Do we need to expand to the next block? */
|
|
|
945 |
if (adjust > cursize)
|
|
|
946 |
{
|
|
|
947 |
block_merge_next(pool, block);
|
|
|
948 |
block_mark_as_used(block);
|
|
|
949 |
}
|
|
|
950 |
|
|
|
951 |
/* Trim the resulting block and return the original pointer. */
|
|
|
952 |
block_trim_used(pool, block, adjust);
|
|
|
953 |
p = ptr;
|
|
|
954 |
}
|
|
|
955 |
}
|
|
|
956 |
|
|
|
957 |
return p;
|
|
|
958 |
}
|