1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
|
/*
* malloc.c
*
* Very simple linked-list based malloc()/free().
*/
#include <stdlib.h>
#include "init.h"
#include "malloc.h"
struct free_arena_header __malloc_head =
{
{
ARENA_TYPE_HEAD,
0,
&__malloc_head,
&__malloc_head,
},
&__malloc_head,
&__malloc_head
};
/* This is extern so it can be overridden by the user application */
extern size_t __stack_size;
extern void *__mem_end; /* Produced after argv parsing */
static inline size_t sp(void)
{
size_t sp;
asm volatile("movl %%esp,%0" : "=rm" (sp));
return sp;
}
static void __constructor init_memory_arena(void)
{
struct free_arena_header *fp;
size_t start, total_space;
start = (size_t)ARENA_ALIGN_UP(__mem_end);
total_space = sp() - start;
if ( __stack_size == 0 || __stack_size > total_space >> 1 )
__stack_size = total_space >> 1; /* Half for the stack, half for the heap... */
if ( total_space < __stack_size + 4*sizeof(struct arena_header) )
__stack_size = total_space - 4*sizeof(struct arena_header);
fp = (struct free_arena_header *)start;
fp->a.type = ARENA_TYPE_FREE;
fp->a.size = total_space - __stack_size;
/* Insert into chains */
fp->a.next = fp->a.prev = &__malloc_head;
fp->next_free = fp->prev_free = &__malloc_head;
__malloc_head.a.next = __malloc_head.a.prev = fp;
__malloc_head.next_free = __malloc_head.prev_free = fp;
}
static void *__malloc_from_block(struct free_arena_header *fp, size_t size)
{
size_t fsize;
struct free_arena_header *nfp, *na;
fsize = fp->a.size;
/* We need the 2* to account for the larger requirements of a free block */
if ( fsize >= size+2*sizeof(struct arena_header) ) {
/* Bigger block than required -- split block */
nfp = (struct free_arena_header *)((char *)fp + size);
na = fp->a.next;
nfp->a.type = ARENA_TYPE_FREE;
nfp->a.size = fsize-size;
fp->a.type = ARENA_TYPE_USED;
fp->a.size = size;
/* Insert into all-block chain */
nfp->a.prev = fp;
nfp->a.next = na;
na->a.prev = nfp;
fp->a.next = nfp;
/* Replace current block on free chain */
nfp->next_free = fp->next_free;
nfp->prev_free = fp->prev_free;
fp->next_free->prev_free = nfp;
fp->prev_free->next_free = nfp;
} else {
/* Allocate the whole block */
fp->a.type = ARENA_TYPE_USED;
/* Remove from free chain */
fp->next_free->prev_free = fp->prev_free;
fp->prev_free->next_free = fp->next_free;
}
return (void *)(&fp->a + 1);
}
void *malloc(size_t size)
{
struct free_arena_header *fp;
if ( size == 0 )
return NULL;
/* Add the obligatory arena header, and round up */
size = (size+2*sizeof(struct arena_header)-1) & ARENA_SIZE_MASK;
for ( fp = __malloc_head.next_free ; fp->a.type != ARENA_TYPE_HEAD ;
fp = fp->next_free ) {
if ( fp->a.size >= size ) {
/* Found fit -- allocate out of this block */
return __malloc_from_block(fp, size);
}
}
/* Nothing found... need to request a block from the kernel */
return NULL; /* No kernel to get stuff from */
}
|