Subversion Repositories freemyipod

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
125 theseven 1
#include "../../global.h"
2
#include "../include/assert.h"
3
#include <limits.h>
4
#include <stddef.h>
5
#include "../include/stdio.h"
6
#include "../include/stdlib.h"
7
#include "../include/string.h"
8
#include "../../console.h"
9
 
10
#include "tlsf.h"
11
#include "tlsfbits.h"
12
 
13
/*
14
** Constants.
15
*/
16
 
17
/* Public constants: may be modified. */
18
enum tlsf_public
19
{
20
	/* log2 of number of linear subdivisions of block sizes. */
21
	SL_INDEX_COUNT_LOG2 = 5,
22
};
23
 
24
/* Private constants: do not modify. */
25
enum tlsf_private
26
{
27
	/* All allocation sizes and addresses are aligned to 4 bytes. */
28
	ALIGN_SIZE_LOG2 = 2,
29
	ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
30
 
31
	/*
32
	** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
33
	** However, because we linearly subdivide the second-level lists, and
34
	** our minimum size granularity is 4 bytes, it doesn't make sense to
35
	** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
36
	** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
37
	** trying to split size ranges into more slots than we have available.
38
	** Instead, we calculate the minimum threshold size, and place all
39
	** blocks below that size into the 0th first-level list.
40
	*/
41
	FL_INDEX_MAX = 30,
42
	SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
43
	FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
44
	FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
45
 
46
	SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
47
};
48
 
49
/*
50
** Cast and min/max macros.
51
*/
52
 
53
#define tlsf_cast(t, exp)	((t) (exp))
54
#define tlsf_min(a, b)		((a) < (b) ? (a) : (b))
55
#define tlsf_max(a, b)		((a) > (b) ? (a) : (b))
56
 
57
/*
58
** Set assert macro, if it has not been provided by the user.
59
*/
60
#if !defined (tlsf_assert)
61
#define tlsf_assert assert
62
#endif
63
 
64
/*
65
** Static assertion mechanism.
66
*/
67
 
68
#define _tlsf_glue2(x, y) x ## y
69
#define _tlsf_glue(x, y) _tlsf_glue2(x, y)
70
#define tlsf_static_assert(exp) \
71
	typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
72
 
73
/* FIXME: This code only currently supports 32-bit architectures. */
74
tlsf_static_assert(sizeof(size_t) * CHAR_BIT == 32);
75
 
76
/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
77
tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
78
 
79
/* sizeof fl_bitmap must be >= FL_INDEX_COUNT. */
80
tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= FL_INDEX_COUNT);
81
 
82
/* Ensure we've properly tuned our sizes. */
83
tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
84
 
85
/*
86
** Data structures and associated constants.
87
*/
88
 
89
/*
90
** Block header structure.
91
**
92
** There are several implementation subtleties involved:
93
** - The prev_phys_block field is only valid if the previous block is free.
94
** - The prev_phys_block field is actually stored in the last word of the
95
**   previous block. It appears at the beginning of this structure only to
96
**   simplify the implementation.
97
** - The next_free / prev_free fields are only valid if the block is free.
98
*/
99
typedef struct block_header_t
100
{
101
	/* Points to the previous physical block. */
102
	struct block_header_t* prev_phys_block;
103
 
104
	/* The size of this block, excluding the block header. */
105
	size_t size;
106
 
107
	/* Next and previous free blocks. */
108
	struct block_header_t* next_free;
109
	struct block_header_t* prev_free;
110
} block_header_t;
111
 
112
/*
113
** Since block sizes are always a multiple of 4, the two least significant
114
** bits of the size field are used to store the block status:
115
** - bit 0: whether block is busy or free
116
** - bit 1: whether previous block is busy or free
117
*/
118
static const size_t block_header_free_bit = 1 << 0;
119
static const size_t block_header_prev_free_bit = 1 << 1;
120
 
121
/*
122
** The size of the block header exposed to used blocks is the size field.
123
** The prev_phys_block field is stored *inside* the previous free block.
124
*/
125
static const size_t block_header_overhead = sizeof(size_t);
126
 
127
/* User data starts directly after the size field in a used block. */
128
static const size_t block_start_offset =
129
	offsetof(block_header_t, size) + sizeof(size_t);
130
 
131
/*
132
** A free block must be large enough to store its header minus the size of
133
** the prev_phys_block field, and no larger than the number of addressable
134
** bits for FL_INDEX.
135
*/
136
static const size_t block_size_min = 
137
	sizeof(block_header_t) - sizeof(block_header_t*);
138
static const size_t block_size_max = 1 << FL_INDEX_MAX;
139
 
140
/* Empty lists point at this block to indicate they are free. */
141
static block_header_t block_null;
142
 
143
/* The TLSF pool structure. */
144
typedef struct pool_t
145
{
146
	/* Bitmaps for free lists. */
147
	unsigned int fl_bitmap;
148
	unsigned int sl_bitmap[FL_INDEX_COUNT];
149
 
150
	/* Head of free lists. */
151
	block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
152
} pool_t;
153
 
154
/* A type used for casting when doing pointer arithmetic. */
155
typedef unsigned int tlsfptr_t;
156
 
157
/*
158
** block_header_t member functions.
159
*/
160
 
161
static size_t block_size(const block_header_t* block) ICODE_ATTR;
162
static size_t block_size(const block_header_t* block)
163
{
164
	return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
165
}
166
 
167
static void block_set_size(block_header_t* block, size_t size) ICODE_ATTR;
168
static void block_set_size(block_header_t* block, size_t size)
169
{
170
	const size_t oldsize = block->size;
171
	block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
172
}
173
 
174
static int block_is_last(const block_header_t* block) ICODE_ATTR;
175
static int block_is_last(const block_header_t* block)
176
{
177
	return 0 == block_size(block);
178
}
179
 
180
static int block_is_free(const block_header_t* block) ICODE_ATTR;
181
static int block_is_free(const block_header_t* block)
182
{
183
	return block->size & block_header_free_bit;
184
}
185
 
186
static void block_set_free(block_header_t* block) ICODE_ATTR;
187
static void block_set_free(block_header_t* block)
188
{
189
	block->size |= block_header_free_bit;
190
}
191
 
192
static void block_set_used(block_header_t* block) ICODE_ATTR;
193
static void block_set_used(block_header_t* block)
194
{
195
	block->size &= ~block_header_free_bit;
196
}
197
 
198
static int block_is_prev_free(const block_header_t* block) ICODE_ATTR;
199
static int block_is_prev_free(const block_header_t* block)
200
{
201
	return block->size & block_header_prev_free_bit;
202
}
203
 
204
static void block_set_prev_free(block_header_t* block) ICODE_ATTR;
205
static void block_set_prev_free(block_header_t* block)
206
{
207
	block->size |= block_header_prev_free_bit;
208
}
209
 
210
static void block_set_prev_used(block_header_t* block) ICODE_ATTR;
211
static void block_set_prev_used(block_header_t* block)
212
{
213
	block->size &= ~block_header_prev_free_bit;
214
}
215
 
216
static block_header_t* block_from_ptr(const void* ptr) ICODE_ATTR;
217
static block_header_t* block_from_ptr(const void* ptr)
218
{
219
	return tlsf_cast(block_header_t*,
220
		tlsf_cast(unsigned char*, ptr) - block_start_offset);
221
}
222
 
223
static void* block_to_ptr(const block_header_t* block) ICODE_ATTR;
224
static void* block_to_ptr(const block_header_t* block)
225
{
226
	return tlsf_cast(void*,
227
		tlsf_cast(unsigned char*, block) + block_start_offset);
228
}
229
 
230
/* Return location of next block after block of given size. */
231
static block_header_t* offset_to_block(const void* ptr, size_t size) ICODE_ATTR;
232
static block_header_t* offset_to_block(const void* ptr, size_t size)
233
{
234
	return tlsf_cast(block_header_t*,
235
		tlsf_cast(unsigned char*, ptr) + size);
236
}
237
 
238
/* Return location of previous block. */
239
static block_header_t* block_prev(const block_header_t* block) ICODE_ATTR;
240
static block_header_t* block_prev(const block_header_t* block)
241
{
242
	return block->prev_phys_block;
243
}
244
 
245
/* Return location of next existing block. */
246
static block_header_t* block_next(const block_header_t* block) ICODE_ATTR;
247
static block_header_t* block_next(const block_header_t* block)
248
{
249
	block_header_t* next = offset_to_block(block_to_ptr(block),
250
		block_size(block) - block_header_overhead);
251
	tlsf_assert(!block_is_last(block));
252
	return next;
253
}
254
 
255
/* Link a new block with its physical neighbor, return the neighbor. */
256
static block_header_t* block_link_next(block_header_t* block) ICODE_ATTR;
257
static block_header_t* block_link_next(block_header_t* block)
258
{
259
	block_header_t* next = block_next(block);
260
	next->prev_phys_block = block;
261
	return next;
262
}
263
 
264
static void block_mark_as_free(block_header_t* block) ICODE_ATTR;
265
static void block_mark_as_free(block_header_t* block)
266
{
267
	/* Link the block to the next block, first. */
268
	block_header_t* next = block_link_next(block);
269
	block_set_prev_free(next);
270
	block_set_free(block);
271
}
272
 
273
static void block_mark_as_used(block_header_t* block) ICODE_ATTR;
274
static void block_mark_as_used(block_header_t* block)
275
{
276
	block_header_t* next = block_next(block);
277
	block_set_prev_used(next);
278
	block_set_used(block);
279
}
280
 
281
static size_t align_up(size_t x, size_t align) ICODE_ATTR;
282
static size_t align_up(size_t x, size_t align)
283
{
284
	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
285
	return (x + (align - 1)) & ~(align - 1);
286
}
287
 
288
static size_t align_down(size_t x, size_t align) ICODE_ATTR;
289
static size_t align_down(size_t x, size_t align)
290
{
291
	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
292
	return x - (x & (align - 1));
293
}
294
 
295
static void* align_ptr(void* ptr, size_t align) ICODE_ATTR;
296
static void* align_ptr(void* ptr, size_t align)
297
{
298
	const tlsfptr_t aligned =
299
		(tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
300
	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
301
	return tlsf_cast(void*, aligned);
302
}
303
 
304
/*
305
** Adjust an allocation size to be aligned to word size, and no smaller
306
** than internal minimum.
307
*/
308
static size_t adjust_request_size(size_t size, size_t align) ICODE_ATTR;
309
static size_t adjust_request_size(size_t size, size_t align)
310
{
311
	size_t adjust = 0;
312
	if (size && size < block_size_max)
313
	{
314
		const size_t aligned = align_up(size, align);
315
		adjust = tlsf_max(aligned, block_size_min);
316
	}
317
	return adjust;
318
}
319
 
320
/*
321
** TLSF utility functions. In most cases, these are direct translations of
322
** the documentation found in the white paper.
323
*/
324
 
325
static void mapping_insert(size_t size, int* fli, int* sli) ICODE_ATTR;
326
static void mapping_insert(size_t size, int* fli, int* sli)
327
{
328
	int fl, sl;
329
	if (size < SMALL_BLOCK_SIZE)
330
	{
331
		/* Store small blocks in first list. */
332
		fl = 0;
333
		sl = size / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
334
	}
335
	else
336
	{
337
		fl = tlsf_fls(size);
338
		sl = (size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
339
		fl -= (FL_INDEX_SHIFT - 1);
340
	}
341
	*fli = fl;
342
	*sli = sl;
343
}
344
 
345
/* This version rounds up to the next block size (for allocations) */
346
static void mapping_search(size_t size, int* fli, int* sli) ICODE_ATTR;
347
static void mapping_search(size_t size, int* fli, int* sli)
348
{
349
	if (size >= (1 << SL_INDEX_COUNT_LOG2))
350
	{
351
		const size_t round = (1 << (tlsf_fls(size) - SL_INDEX_COUNT_LOG2)) - 1;
352
		size += round;
353
	}
354
	mapping_insert(size, fli, sli);
355
}
356
 
357
static block_header_t* search_suitable_block(pool_t* pool, int* fli, int* sli) ICODE_ATTR;
358
static block_header_t* search_suitable_block(pool_t* pool, int* fli, int* sli)
359
{
360
	int fl = *fli;
361
	int sl = *sli;
362
 
363
	/*
364
	** First, search for a block in the list associated with the given
365
	** fl/sl index.
366
	*/
367
	unsigned int sl_map = pool->sl_bitmap[fl] & (0xffffffff << sl);
368
	if (!sl_map)
369
	{
370
		/* No block exists. Search in the next largest first-level list. */
371
		const unsigned int fl_map = pool->fl_bitmap & (0xffffffff << (fl + 1));
372
		if (!fl_map)
373
		{
374
			/* No free blocks available, memory has been exhausted. */
375
			return 0;
376
		}
377
 
378
		fl = tlsf_ffs(fl_map);
379
		*fli = fl;
380
		sl_map = pool->sl_bitmap[fl];
381
	}
382
	tlsf_assert(sl_map && "internal error - second level bitmap is null");
383
	sl = tlsf_ffs(sl_map);
384
	*sli = sl;
385
 
386
	/* Return the first block in the free list. */
387
	return pool->blocks[fl][sl];
388
}
389
 
390
/* Remove a free block from the free list.*/
391
static void remove_free_block(pool_t* pool, block_header_t* block, int fl, int sl) ICODE_ATTR;
392
static void remove_free_block(pool_t* pool, block_header_t* block, int fl, int sl)
393
{
394
	block_header_t* prev = block->prev_free;
395
	block_header_t* next = block->next_free;
396
	tlsf_assert(prev && "prev_free field can not be null");
397
	tlsf_assert(next && "next_free field can not be null");
398
	next->prev_free = prev;
399
	prev->next_free = next;
400
 
401
	/* If this block is the head of the free list, set new head. */
402
	if (pool->blocks[fl][sl] == block)
403
	{
404
		pool->blocks[fl][sl] = next;
405
 
406
		/* If the new head is null, clear the bitmap. */
407
		if (next == &block_null)
408
		{
409
			pool->sl_bitmap[fl] &= ~(1 << sl);
410
 
411
			/* If the second bitmap is now empty, clear the fl bitmap. */
412
			if (!pool->sl_bitmap[fl])
413
			{
414
				pool->fl_bitmap &= ~(1 << fl);
415
			}
416
		}
417
	}
418
}
419
 
420
/* Insert a free block into the free block list. */
421
static void insert_free_block(pool_t* pool, block_header_t* block, int fl, int sl) ICODE_ATTR;
422
static void insert_free_block(pool_t* pool, block_header_t* block, int fl, int sl)
423
{
424
	block_header_t* current = pool->blocks[fl][sl];
425
	tlsf_assert(current && "free list cannot have a null entry");
426
	tlsf_assert(block && "cannot insert a null entry into the free list");
427
	block->next_free = current;
428
	block->prev_free = &block_null;
429
	current->prev_free = block;
430
 
431
	/*
432
	** Insert the new block at the head of the list, and mark the first-
433
	** and second-level bitmaps appropriately.
434
	*/
435
	pool->blocks[fl][sl] = block;
436
	pool->fl_bitmap |= (1 << fl);
437
	pool->sl_bitmap[fl] |= (1 << sl);
438
}
439
 
440
/* Remove a given block from the free list. */
441
static void block_remove(pool_t* pool, block_header_t* block) ICODE_ATTR;
442
static void block_remove(pool_t* pool, block_header_t* block)
443
{
444
	int fl, sl;
445
	mapping_insert(block_size(block), &fl, &sl);
446
	remove_free_block(pool, block, fl, sl);
447
}
448
 
449
/* Insert a given block into the free list. */
450
static void block_insert(pool_t* pool, block_header_t* block) ICODE_ATTR;
451
static void block_insert(pool_t* pool, block_header_t* block)
452
{
453
	int fl, sl;
454
	mapping_insert(block_size(block), &fl, &sl);
455
	insert_free_block(pool, block, fl, sl);
456
}
457
 
458
static int block_can_split(block_header_t* block, size_t size) ICODE_ATTR;
459
static int block_can_split(block_header_t* block, size_t size)
460
{
461
	return block_size(block) >= sizeof(block_header_t) + size;
462
}
463
 
464
/* Split a block into two, the second of which is free. */
465
static block_header_t* block_split(block_header_t* block, size_t size) ICODE_ATTR;
466
static block_header_t* block_split(block_header_t* block, size_t size)
467
{
468
	/* Calculate the amount of space left in the remaining block. */
469
	block_header_t* remaining =
470
		offset_to_block(block_to_ptr(block), size - block_header_overhead);
471
 
472
	const size_t remain_size = block_size(block) - (size + block_header_overhead);
473
	tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
474
	block_set_size(remaining, remain_size);
475
	tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
476
 
477
	block_set_size(block, size);
478
	block_mark_as_free(remaining);
479
 
480
	return remaining;
481
}
482
 
483
/* Absorb a free block's storage into an adjacent previous free block. */
484
static block_header_t* block_absorb(block_header_t* prev, block_header_t* block) ICODE_ATTR;
485
static block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
486
{
487
	tlsf_assert(!block_is_last(prev) && "previous block can't be last!");
488
	/* Note: Leaves flags untouched. */
489
	prev->size += block_size(block) + block_header_overhead;
490
	block_link_next(prev);
491
	return prev;
492
}
493
 
494
/* Merge a just-freed block with an adjacent previous free block. */
495
static block_header_t* block_merge_prev(pool_t* pool, block_header_t* block) ICODE_ATTR;
496
static block_header_t* block_merge_prev(pool_t* pool, block_header_t* block)
497
{
498
	if (block_is_prev_free(block))
499
	{
500
		block_header_t* prev = block_prev(block);
501
		tlsf_assert(prev && "prev physical block can't be null");
502
		tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
503
		block_remove(pool, prev);
504
		block = block_absorb(prev, block);
505
	}
506
 
507
	return block;
508
}
509
 
510
/* Merge a just-freed block with an adjacent free block. */
511
static block_header_t* block_merge_next(pool_t* pool, block_header_t* block) ICODE_ATTR;
512
static block_header_t* block_merge_next(pool_t* pool, block_header_t* block)
513
{
514
	block_header_t* next = block_next(block);
515
	tlsf_assert(next && "next physical block can't be null");
516
 
517
	if (block_is_free(next))
518
	{
519
		tlsf_assert(!block_is_last(block) && "previous block can't be last!");
520
		block_remove(pool, next);
521
		block = block_absorb(block, next);
522
	}
523
 
524
	return block;
525
}
526
 
527
/* Trim any trailing block space off the end of a block, return to pool. */
528
static void block_trim_free(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
529
static void block_trim_free(pool_t* pool, block_header_t* block, size_t size)
530
{
531
	tlsf_assert(block_is_free(block) && "block must be free");
532
	if (block_can_split(block, size))
533
	{
534
		block_header_t* remaining_block = block_split(block, size);
535
		block_link_next(block);
536
		block_set_prev_free(remaining_block);
537
		block_insert(pool, remaining_block);
538
	}
539
}
540
 
541
/* Trim any trailing block space off the end of a used block, return to pool. */
542
static void block_trim_used(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
543
static void block_trim_used(pool_t* pool, block_header_t* block, size_t size)
544
{
545
	tlsf_assert(!block_is_free(block) && "block must be used");
546
	if (block_can_split(block, size))
547
	{
548
		/* If the next block is free, we must coalesce. */
549
		block_header_t* remaining_block = block_split(block, size);
550
		block_set_prev_used(remaining_block);
551
 
552
		remaining_block = block_merge_next(pool, remaining_block);
553
		block_insert(pool, remaining_block);
554
	}
555
}
556
 
557
static block_header_t* block_trim_free_leading(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
558
static block_header_t* block_trim_free_leading(pool_t* pool, block_header_t* block, size_t size)
559
{
560
	block_header_t* remaining_block = block;
561
	if (block_can_split(block, size))
562
	{
563
		/* We want the 2nd block. */
564
		remaining_block = block_split(block, size - block_header_overhead);
565
		block_set_prev_free(remaining_block);
566
 
567
		block_link_next(block);
568
		block_insert(pool, block);
569
	}
570
 
571
	return remaining_block;
572
}
573
 
574
static block_header_t* block_locate_free(pool_t* pool, size_t size) ICODE_ATTR;
575
static block_header_t* block_locate_free(pool_t* pool, size_t size)
576
{
577
	int fl, sl;
578
	block_header_t* block = 0;
579
 
580
	if (size)
581
	{
582
		mapping_search(size, &fl, &sl);
583
		block = search_suitable_block(pool, &fl, &sl);
584
	}
585
 
586
	if (block)
587
	{
588
		tlsf_assert(block_size(block) >= size);
589
		remove_free_block(pool, block, fl, sl);
590
	}
591
 
592
	return block;
593
}
594
 
595
static void* block_prepare_used(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
596
static void* block_prepare_used(pool_t* pool, block_header_t* block, size_t size)
597
{
598
	void* p = 0;
599
	if (block)
600
	{
601
		block_trim_free(pool, block, size);
602
		block_mark_as_used(block);
603
		p = block_to_ptr(block);
604
	}
605
	return p;
606
}
607
 
608
/* Clear structure and point all empty lists at the null block. */
609
static void pool_construct(pool_t* pool) ICODE_ATTR;
610
static void pool_construct(pool_t* pool)
611
{
612
	int i, j;
613
 
614
	block_null.next_free = &block_null;
615
	block_null.prev_free = &block_null;
616
 
617
	pool->fl_bitmap = 0;
618
	for (i = 0; i < FL_INDEX_COUNT; ++i)
619
	{
620
		pool->sl_bitmap[i] = 0;
621
		for (j = 0; j < SL_INDEX_COUNT; ++j)
622
		{
623
			pool->blocks[i][j] = &block_null;
624
		}
625
	}
626
}
627
 
628
/*
629
** Debugging utilities.
630
*/
631
 
632
typedef struct integrity_t
633
{
634
	int prev_status;
635
	int status;
636
} integrity_t;
637
 
638
#define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
639
 
640
static void integrity_walker(void* ptr, size_t size, int used, void* user)
641
{
642
	block_header_t* block = block_from_ptr(ptr);
643
	integrity_t* integ = tlsf_cast(integrity_t*, user);
644
	const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
645
	const int this_status = block_is_free(block) ? 1 : 0;
646
	const size_t this_block_size = block_size(block);
647
 
648
	int status = 0;
649
	tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
650
	tlsf_insist(size == this_block_size && "block size incorrect");
651
 
652
	integ->prev_status = this_status;
653
	integ->status += status;
654
}
655
 
656
int tlsf_check_heap(tlsf_pool tlsf)
657
{
658
	int i, j;
659
 
660
	pool_t* pool = tlsf_cast(pool_t*, tlsf);
661
	int status = 0;
662
 
663
	/* Check that the blocks are physically correct. */
664
	integrity_t integ = { 0, 0 };
665
	tlsf_walk_heap(tlsf, integrity_walker, &integ);
666
	status = integ.status;
667
 
668
	/* Check that the free lists and bitmaps are accurate. */
669
	for (i = 0; i < FL_INDEX_COUNT; ++i)
670
	{
671
		for (j = 0; j < SL_INDEX_COUNT; ++j)
672
		{
673
			const int fl_map = pool->fl_bitmap & (1 << i);
674
			const int sl_list = pool->sl_bitmap[i];
675
			const int sl_map = sl_list & (1 << j);
676
			const block_header_t* block = pool->blocks[i][j];
677
 
678
			/* Check that first- and second-level lists agree. */
679
			if (!fl_map)
680
			{
681
				tlsf_insist(!sl_map && "second-level map must be null");
682
			}
683
 
684
			if (!sl_map)
685
			{
686
				tlsf_insist((block == &block_null) && "block list must be null");
687
				continue;
688
			}
689
 
690
			/* Check that there is at least one free block. */
691
			tlsf_insist(sl_list && "no free blocks in second-level map");
692
			tlsf_insist((block != &block_null) && "block should not be null");
693
 
694
			while (block != &block_null)
695
			{
696
				int fli, sli;
697
				tlsf_insist(block_is_free(block) && "block should be free");
698
				tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
699
				tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
700
				tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
701
				tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
702
 
703
				mapping_insert(block_size(block), &fli, &sli);
704
				tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
705
				block = block->next_free;
706
			}
707
		}
708
	}
709
 
710
	return status;
711
}
712
 
713
#undef tlsf_insist
714
 
715
static void default_walker(void* ptr, size_t size, int used, void* user)
716
{
717
	(void)user;
718
	cprintf(CONSOLE_BOOT, "\t%p %s size: %x\n", ptr, used ? "used" : "free", size);
719
}
720
 
721
void tlsf_walk_heap(tlsf_pool pool, tlsf_walker walker, void* user)
722
{
723
	tlsf_walker heap_walker = walker ? walker : default_walker;
724
	block_header_t* block =
725
		offset_to_block(pool, sizeof(pool_t) - block_header_overhead);
726
 
727
	while (block && !block_is_last(block))
728
	{
729
		heap_walker(
730
			block_to_ptr(block),
731
			block_size(block),
732
			!block_is_free(block),
733
			user);
734
		block = block_next(block);
735
	}
736
}
737
 
738
size_t tlsf_block_size(void* ptr)
739
{
740
	size_t size = 0;
741
	if (ptr)
742
	{
743
		const block_header_t* block = block_from_ptr(ptr);
744
		size = block_size(block);
745
	}
746
	return size;
747
}
748
 
749
/*
750
** Overhead of the TLSF structures in a given memory block passed to
751
** tlsf_create, equal to the size of a pool_t plus overhead of the initial
752
** free block and the sentinel block.
753
*/
754
size_t tlsf_overhead()
755
{
756
	const size_t pool_overhead = sizeof(pool_t) + 2 * block_header_overhead;
757
	return pool_overhead;
758
}
759
 
760
/*
761
** TLSF main interface. Right out of the white paper.
762
*/
763
 
764
tlsf_pool tlsf_create(void* mem, size_t bytes)
765
{
766
	block_header_t* block;
767
	block_header_t* next;
768
 
769
	const size_t pool_overhead = tlsf_overhead();
770
	const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
771
	pool_t* pool = tlsf_cast(pool_t*, mem);
772
 
773
	if (pool_bytes < block_size_min || pool_bytes > block_size_max)
774
	{
775
		cprintf(CONSOLE_BOOT, "tlsf_create: Pool size must be between %d and %d bytes.\n", 
776
			pool_overhead + block_size_min, pool_overhead + block_size_max);
777
		return 0;
778
	}
779
 
780
	/* Construct a valid pool object. */
781
	pool_construct(pool);
782
 
783
	/*
784
	** Create the main free block. Offset the start of the block slightly
785
	** so that the prev_phys_block field falls inside of the pool
786
	** structure - it will never be used.
787
	*/
788
	block = offset_to_block(
789
		tlsf_cast(void*, pool), sizeof(pool_t) - block_header_overhead);
790
	block_set_size(block, align_down(pool_bytes, ALIGN_SIZE));
791
	block_set_free(block);
792
	block_set_prev_used(block);
793
	block_insert(pool, block);
794
 
795
	/* Split the block to create a zero-size pool sentinel block. */
796
	next = block_link_next(block);
797
	block_set_size(next, 0);
798
	block_set_used(next);
799
	block_set_prev_free(next);
800
 
801
	return tlsf_cast(tlsf_pool, pool);
802
}
803
 
804
void tlsf_destroy(tlsf_pool pool)
805
{
806
	/* Nothing to do. */
807
	pool = pool;
808
}
809
 
810
void* tlsf_malloc(tlsf_pool tlsf, size_t size)
811
{
812
	pool_t* pool = tlsf_cast(pool_t*, tlsf);
813
	const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
814
	block_header_t* block = block_locate_free(pool, adjust);
815
	return block_prepare_used(pool, block, adjust);
816
}
817
 
818
void* tlsf_memalign(tlsf_pool tlsf, size_t align, size_t size)
819
{
820
	pool_t* pool = tlsf_cast(pool_t*, tlsf);
821
	const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
822
 
823
	/*
824
	** We must allocate an additional minimum block size bytes so that if
825
	** our free block will leave an alignment gap which is smaller, we can
826
	** trim a leading free block and release it back to the heap. We must
827
	** do this because the previous physical block is in use, therefore
828
	** the prev_phys_block field is not valid, and we can't simply adjust
829
	** the size of that block.
830
	*/
831
	const ptrdiff_t gap_minimum = tlsf_cast(ptrdiff_t, sizeof(block_header_t));
832
	const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align);
833
 
834
	/* If alignment is less than or equals base alignment, we're done. */
835
	const size_t aligned_size = (align <= ALIGN_SIZE) ? adjust : size_with_gap;
836
 
837
	block_header_t* block = block_locate_free(pool, aligned_size);
838
 
839
	/* This can't be a static assert. */
840
	tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
841
 
842
	if (block)
843
	{
844
		void* ptr = block_to_ptr(block);
845
		void* aligned = align_ptr(ptr, align);
846
		ptrdiff_t gap = tlsf_cast(ptrdiff_t,
847
			tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
848
 
849
		/* If gap size is too small, offset to next aligned boundary. */
850
		if (gap && gap < gap_minimum)
851
		{
852
			const ptrdiff_t gap_remain = gap_minimum - gap;
853
			const ptrdiff_t offset = tlsf_max(gap_remain, tlsf_cast(ptrdiff_t, align));
854
			void* next_aligned = tlsf_cast(void*,
855
				tlsf_cast(tlsfptr_t, aligned) + tlsf_cast(tlsfptr_t, offset));
856
			aligned = align_ptr(next_aligned, align);
857
			gap = tlsf_cast(ptrdiff_t,
858
				tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
859
		}
860
 
861
		if (gap)
862
		{
863
			tlsf_assert(gap >= gap_minimum && "gap size too small");
864
			block = block_trim_free_leading(pool, block, gap);
865
		}
866
	}
867
 
868
	return block_prepare_used(pool, block, adjust);
869
}
870
 
871
void tlsf_free(tlsf_pool tlsf, void* ptr)
872
{
873
	/* Don't attempt to free a NULL pointer. */
874
	if (ptr)
875
	{
876
		pool_t* pool = tlsf_cast(pool_t*, tlsf);
877
		block_header_t* block = block_from_ptr(ptr);
878
		block_mark_as_free(block);
879
		block = block_merge_prev(pool, block);
880
		block = block_merge_next(pool, block);
881
		block_insert(pool, block);
882
	}
883
}
884
 
885
/*
886
** The TLSF block information provides us with enough information to
887
** provide a reasonably intelligent implementation of realloc, growing or
888
** shrinking the currently allocated block as required.
889
**
890
** This routine handles the somewhat esoteric edge cases of realloc:
891
** - a non-zero size with a null pointer will behave like malloc
892
** - a zero size with a non-null pointer will behave like free
893
** - a request that cannot be satisfied will leave the original buffer
894
**   untouched
895
** - an extended buffer size will leave the newly-allocated area with
896
**   contents undefined
897
*/
898
void* tlsf_realloc(tlsf_pool tlsf, void* ptr, size_t size)
899
{
900
	pool_t* pool = tlsf_cast(pool_t*, tlsf);
901
	void* p = 0;
902
 
903
	/* Zero-size requests are treated as free. */
904
	if (ptr && size == 0)
905
	{
906
		tlsf_free(tlsf, ptr);
907
	}
908
	/* Requests with NULL pointers are treated as malloc. */
909
	else if (!ptr)
910
	{
911
		p = tlsf_malloc(tlsf, size);
912
	}
913
	else
914
	{
915
		block_header_t* block = block_from_ptr(ptr);
916
		block_header_t* next = block_next(block);
917
 
918
		const size_t cursize = block_size(block);
919
		const size_t combined = cursize + block_size(next) + block_header_overhead;
920
		const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
921
 
922
		/*
923
		** If the next block is used, or when combined with the current
924
		** block, does not offer enough space, we must reallocate and copy.
925
		*/
926
		if (adjust > cursize && (!block_is_free(next) || adjust > combined))
927
		{
928
			p = tlsf_malloc(tlsf, size);
929
			if (p)
930
			{
931
				const size_t minsize = tlsf_min(cursize, size);
932
				memcpy(p, ptr, minsize);
933
				tlsf_free(tlsf, ptr);
934
			}
935
		}
936
		else
937
		{
938
			/* Do we need to expand to the next block? */
939
			if (adjust > cursize)
940
			{
941
				block_merge_next(pool, block);
942
				block_mark_as_used(block);
943
			}
944
 
945
			/* Trim the resulting block and return the original pointer. */
946
			block_trim_used(pool, block, adjust);
947
			p = ptr;
948
		}
949
	}
950
 
951
	return p;
952
}