1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
|
/* Heap management routines for GNU Emacs on the Microsoft Windows
API. Copyright (C) 1994, 2001-2014 Free Software Foundation, Inc.
This file is part of GNU Emacs.
GNU Emacs is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GNU Emacs is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
/*
Geoff Voelker (voelker@cs.washington.edu) 7-29-94
*/
/*
Heavily modified by Fabrice Popineau (fabrice.popineau@gmail.com) 28-02-2014
*/
/*
Memory allocation scheme for w32/w64:
- Buffers are mmap'ed using a very simple emulation of mmap/munmap
- During the temacs phase:
* we use a private heap declared to be stored into the `dumped_data'
* unfortunately, this heap cannot be made growable, so the size of
blocks it can allocate is limited to (0x80000 - pagesize)
* the blocks that are larger than this are allocated from the end
of the `dumped_data' array; there are not so many of them.
We use a very simple first-fit scheme to reuse those blocks.
* we check that the private heap does not cross the area used
by the bigger chunks.
- During the emacs phase:
* we create a private heap for new memory blocks
* we make sure that we never free a block that has been dumped.
Freeing a dumped block could work in principle, but may prove
unreliable if we distribute binaries of emacs.exe: MS does not
guarantee that the heap data structures are the same across all
versions of their OS, even though the API is available since XP. */
#include <config.h>
#include <stdio.h>
#include <errno.h>
#include <sys/mman.h>
#include "w32common.h"
#include "w32heap.h"
#include "lisp.h" /* for VALMASK */
/* We chose to leave those declarations here. They are used only in
this file. The RtlCreateHeap is available since XP. It is located
in ntdll.dll and is available with the DDK. People often
complained that HeapCreate doesn't offer the ability to create a
heap at a given place, which we need here, and which RtlCreateHeap
provides. We reproduce here the definitions available with the
DDK. */
typedef PVOID (WINAPI * RtlCreateHeap_Proc) (
/* _In_ */ ULONG Flags,
/* _In_opt_ */ PVOID HeapBase,
/* _In_opt_ */ SIZE_T ReserveSize,
/* _In_opt_ */ SIZE_T CommitSize,
/* _In_opt_ */ PVOID Lock,
/* _In_opt_ */ PVOID Parameters
);
typedef LONG NTSTATUS;
typedef NTSTATUS
(NTAPI * PRTL_HEAP_COMMIT_ROUTINE)(
IN PVOID Base,
IN OUT PVOID *CommitAddress,
IN OUT PSIZE_T CommitSize
);
typedef struct _RTL_HEAP_PARAMETERS {
ULONG Length;
SIZE_T SegmentReserve;
SIZE_T SegmentCommit;
SIZE_T DeCommitFreeBlockThreshold;
SIZE_T DeCommitTotalFreeThreshold;
SIZE_T MaximumAllocationSize;
SIZE_T VirtualMemoryThreshold;
SIZE_T InitialCommit;
SIZE_T InitialReserve;
PRTL_HEAP_COMMIT_ROUTINE CommitRoutine;
SIZE_T Reserved[ 2 ];
} RTL_HEAP_PARAMETERS, *PRTL_HEAP_PARAMETERS;
/* We reserve space for dumping emacs lisp byte-code inside a static
array. By storing it in an array, the generic mechanism in
unexecw32.c will be able to dump it without the need to add a
special segment to the executable. In order to be able to do this
without losing too much space, we need to create a Windows heap at
the specific address of the static array. The RtlCreateHeap
available inside the NT kernel since XP will do this. It allows to
create a non-growable heap at a specific address. So before
dumping, we create a non-growable heap at the address of the
dumped_data[] array. After dumping, we reuse memory allocated
there without being able to free it (but most of it is not meant to
be freed anyway), and we use a new private heap for all new
allocations. */
/* FIXME: Most of the space reserved for dumped_data[] is only used by
the 1st bootstrap-emacs.exe built while bootstrapping. Once the
preloaded Lisp files are byte-compiled, the next loadup uses less
than half of the size stated below. It would be nice to find a way
to build only the first bootstrap-emacs.exe with the large size,
and reset that to a lower value afterwards. */
#ifdef _WIN64
# define DUMPED_HEAP_SIZE (18*1024*1024)
#else
# define DUMPED_HEAP_SIZE (11*1024*1024)
#endif
static unsigned char dumped_data[DUMPED_HEAP_SIZE];
/* Info for keeping track of our dynamic heap used after dumping. */
unsigned char *data_region_base = NULL;
unsigned char *data_region_end = NULL;
static DWORD_PTR committed = 0;
/* The maximum block size that can be handled by a non-growable w32
heap is limited by the MaxBlockSize value below.
This point deserves and explanation.
The W32 heap allocator can be used for a growable
heap or a non-growable one.
A growable heap is not compatible with a fixed base address for the
heap. Only a non-growable one is. One drawback of non-growable
heaps is that they can hold only objects smaller than a certain
size (the one defined below). Most of the largest blocks are GC'ed
before dumping. In any case and to be safe, we implement a simple
first-fit allocation algorithm starting at the end of the
dumped_data[] array like depicted below:
----------------------------------------------
| | | |
| Private heap |-> <-| Big chunks |
| | | |
----------------------------------------------
^ ^ ^
dumped_data dumped_data bc_limit
+ committed
*/
/* Info for managing our preload heap, which is essentially a fixed size
data area in the executable. */
#define PAGE_SIZE 0x1000
#define MaxBlockSize (0x80000 - PAGE_SIZE)
#define MAX_BLOCKS 0x40
static struct
{
unsigned char *address;
size_t size;
DWORD occupied;
} blocks[MAX_BLOCKS];
static DWORD blocks_number = 0;
static unsigned char *bc_limit;
/* Handle for the private heap:
- inside the dumped_data[] array before dump,
- outside of it after dump.
*/
HANDLE heap = NULL;
/* We redirect the standard allocation functions. */
malloc_fn the_malloc_fn;
realloc_fn the_realloc_fn;
free_fn the_free_fn;
/* It doesn't seem to be useful to allocate from a file mapping.
It would be if the memory was shared.
http://stackoverflow.com/questions/307060/what-is-the-purpose-of-allocating-pages-in-the-pagefile-with-createfilemapping */
/* This is the function to commit memory when the heap allocator
claims for new memory. Before dumping, we allocate space
from the fixed size dumped_data[] array.
*/
NTSTATUS NTAPI
dumped_data_commit (PVOID Base, PVOID *CommitAddress, PSIZE_T CommitSize)
{
/* This is used before dumping.
The private heap is stored at dumped_data[] address.
We commit contiguous areas of the dumped_data array
as requests arrive. */
*CommitAddress = data_region_base + committed;
committed += *CommitSize;
/* Check that the private heap area does not overlap the big chunks area. */
if (((unsigned char *)(*CommitAddress)) + *CommitSize >= bc_limit)
{
fprintf (stderr,
"dumped_data_commit: memory exhausted.\nEnlarge dumped_data[]!\n");
exit (-1);
}
return 0;
}
/* Heap creation. */
/* We want to turn on Low Fragmentation Heap for XP and older systems.
MinGW32 lacks those definitions. */
#ifndef MINGW_W64
typedef enum _HEAP_INFORMATION_CLASS {
HeapCompatibilityInformation
} HEAP_INFORMATION_CLASS;
typedef WINBASEAPI BOOL (WINAPI * HeapSetInformation_Proc)(HANDLE,HEAP_INFORMATION_CLASS,PVOID,SIZE_T);
#endif
void
init_heap (void)
{
if (using_dynamic_heap)
{
unsigned long enable_lfh = 2;
/* After dumping, use a new private heap. We explicitly enable
the low fragmentation heap (LFH) here, for the sake of pre
Vista versions. Note: this will harmlessly fail on Vista and
later, where the low-fragmentation heap is enabled by
default. It will also fail on pre-Vista versions when Emacs
is run under a debugger; set _NO_DEBUG_HEAP=1 in the
environment before starting GDB to get low fragmentation heap
on XP and older systems, for the price of losing "certain
heap debug options"; for the details see
http://msdn.microsoft.com/en-us/library/windows/desktop/aa366705%28v=vs.85%29.aspx. */
data_region_end = data_region_base;
/* Create the private heap. */
heap = HeapCreate (0, 0, 0);
#ifndef MINGW_W64
/* Set the low-fragmentation heap for OS before Vista. */
HMODULE hm_kernel32dll = LoadLibrary ("kernel32.dll");
HeapSetInformation_Proc s_pfn_Heap_Set_Information = (HeapSetInformation_Proc) GetProcAddress (hm_kernel32dll, "HeapSetInformation");
if (s_pfn_Heap_Set_Information != NULL)
{
if (s_pfn_Heap_Set_Information ((PVOID) heap,
HeapCompatibilityInformation,
&enable_lfh, sizeof(enable_lfh)) == 0)
DebPrint (("Enabling Low Fragmentation Heap failed: error %ld\n",
GetLastError ()));
}
#endif
the_malloc_fn = malloc_after_dump;
the_realloc_fn = realloc_after_dump;
the_free_fn = free_after_dump;
}
else
{
/* Find the RtlCreateHeap function. Headers for this function
are provided with the w32 ddk, but the function is available
in ntdll.dll since XP. */
HMODULE hm_ntdll = LoadLibrary ("ntdll.dll");
RtlCreateHeap_Proc s_pfn_Rtl_Create_Heap
= (RtlCreateHeap_Proc) GetProcAddress (hm_ntdll, "RtlCreateHeap");
/* Specific parameters for the private heap. */
RTL_HEAP_PARAMETERS params;
ZeroMemory (¶ms, sizeof(params));
params.Length = sizeof(RTL_HEAP_PARAMETERS);
data_region_base = (unsigned char *)ROUND_UP (dumped_data, 0x1000);
data_region_end = bc_limit = dumped_data + DUMPED_HEAP_SIZE;
params.InitialCommit = committed = 0x1000;
params.InitialReserve = sizeof(dumped_data);
/* Use our own routine to commit memory from the dumped_data
array. */
params.CommitRoutine = &dumped_data_commit;
/* Create the private heap. */
if (s_pfn_Rtl_Create_Heap == NULL)
{
fprintf (stderr, "Cannot build Emacs without RtlCreateHeap being available; exiting.\n");
exit (-1);
}
heap = s_pfn_Rtl_Create_Heap (0, data_region_base, 0, 0, NULL, ¶ms);
the_malloc_fn = malloc_before_dump;
the_realloc_fn = realloc_before_dump;
the_free_fn = free_before_dump;
}
/* Update system version information to match current system. */
cache_system_info ();
}
#undef malloc
#undef realloc
#undef free
/* FREEABLE_P checks if the block can be safely freed. */
#define FREEABLE_P(addr) \
((unsigned char *)(addr) < dumped_data \
|| (unsigned char *)(addr) >= dumped_data + DUMPED_HEAP_SIZE)
void *
malloc_after_dump (size_t size)
{
/* Use the new private heap. */
void *p = HeapAlloc (heap, 0, size);
/* After dump, keep track of the "brk value" for sbrk(0). */
if (p)
{
unsigned char *new_brk = (unsigned char *)p + size;
if (new_brk > data_region_end)
data_region_end = new_brk;
}
else
errno = ENOMEM;
return p;
}
void *
malloc_before_dump (size_t size)
{
void *p;
/* Before dumping. The private heap can handle only requests for
less than MaxBlockSize. */
if (size < MaxBlockSize)
{
/* Use the private heap if possible. */
p = HeapAlloc (heap, 0, size);
if (!p)
errno = ENOMEM;
}
else
{
/* Find the first big chunk that can hold the requested size. */
int i = 0;
for (i = 0; i < blocks_number; i++)
{
if (blocks[i].occupied == 0 && blocks[i].size >= size)
break;
}
if (i < blocks_number)
{
/* If found, use it. */
p = blocks[i].address;
blocks[i].occupied = TRUE;
}
else
{
/* Allocate a new big chunk from the end of the dumped_data
array. */
if (blocks_number >= MAX_BLOCKS)
{
fprintf (stderr,
"malloc_before_dump: no more big chunks available.\nEnlarge MAX_BLOCKS!\n");
exit (-1);
}
bc_limit -= size;
bc_limit = (unsigned char *)ROUND_DOWN (bc_limit, 0x10);
p = bc_limit;
blocks[blocks_number].address = p;
blocks[blocks_number].size = size;
blocks[blocks_number].occupied = TRUE;
blocks_number++;
/* Check that areas do not overlap. */
if (bc_limit < dumped_data + committed)
{
fprintf (stderr,
"malloc_before_dump: memory exhausted.\nEnlarge dumped_data[]!\n");
exit (-1);
}
}
}
return p;
}
/* Re-allocate the previously allocated block in ptr, making the new
block SIZE bytes long. */
void *
realloc_after_dump (void *ptr, size_t size)
{
void *p;
/* After dumping. */
if (FREEABLE_P (ptr))
{
/* Reallocate the block since it lies in the new heap. */
p = HeapReAlloc (heap, 0, ptr, size);
if (!p)
errno = ENOMEM;
}
else
{
/* If the block lies in the dumped data, do not free it. Only
allocate a new one. */
p = HeapAlloc (heap, 0, size);
if (p)
CopyMemory (p, ptr, size);
else
errno = ENOMEM;
}
/* After dump, keep track of the "brk value" for sbrk(0). */
if (p)
{
unsigned char *new_brk = (unsigned char *)p + size;
if (new_brk > data_region_end)
data_region_end = new_brk;
}
return p;
}
void *
realloc_before_dump (void *ptr, size_t size)
{
void *p;
/* Before dumping. */
if (dumped_data < (unsigned char *)ptr
&& (unsigned char *)ptr < bc_limit && size <= MaxBlockSize)
{
p = HeapReAlloc (heap, 0, ptr, size);
if (!p)
errno = ENOMEM;
}
else
{
/* In this case, either the new block is too large for the heap,
or the old block was already too large. In both cases,
malloc_before_dump() and free_before_dump() will take care of
reallocation. */
p = malloc_before_dump (size);
/* If SIZE is below MaxBlockSize, malloc_before_dump will try to
allocate it in the fixed heap. If that fails, we could have
kept the block in its original place, above bc_limit, instead
of failing the call as below. But this doesn't seem to be
worth the added complexity, as loadup allocates only a very
small number of large blocks, and never reallocates them. */
if (p)
{
CopyMemory (p, ptr, size);
free_before_dump (ptr);
}
}
return p;
}
/* Free a block allocated by `malloc', `realloc' or `calloc'. */
void
free_after_dump (void *ptr)
{
/* After dumping. */
if (FREEABLE_P (ptr))
{
/* Free the block if it is in the new private heap. */
HeapFree (heap, 0, ptr);
}
}
void
free_before_dump (void *ptr)
{
/* Before dumping. */
if (dumped_data < (unsigned char *)ptr
&& (unsigned char *)ptr < bc_limit)
{
/* Free the block if it is allocated in the private heap. */
HeapFree (heap, 0, ptr);
}
else
{
/* Look for the big chunk. */
int i;
for (i = 0; i < blocks_number; i++)
{
if (blocks[i].address == ptr)
{
/* Reset block occupation if found. */
blocks[i].occupied = 0;
break;
}
/* What if the block is not found? We should trigger an
error here. */
eassert (i < blocks_number);
}
}
}
#ifdef ENABLE_CHECKING
void
report_temacs_memory_usage (void)
{
DWORD blocks_used = 0;
size_t large_mem_used = 0;
int i;
for (i = 0; i < blocks_number; i++)
if (blocks[i].occupied)
{
blocks_used++;
large_mem_used += blocks[i].size;
}
/* Emulate 'message', which writes to stderr in non-interactive
sessions. */
fprintf (stderr,
"Dump memory usage: Heap: %" PRIu64 " Large blocks(%lu/%lu): %" PRIu64 "/%" PRIu64 "\n",
(unsigned long long)committed, blocks_used, blocks_number,
(unsigned long long)large_mem_used,
(unsigned long long)(dumped_data + DUMPED_HEAP_SIZE - bc_limit));
}
#endif
/* Emulate getpagesize. */
int
getpagesize (void)
{
return sysinfo_cache.dwPageSize;
}
void *
sbrk (ptrdiff_t increment)
{
/* data_region_end is the address beyond the last allocated byte.
The sbrk() function is not emulated at all, except for a 0 value
of its parameter. This is needed by the Emacs Lisp function
`memory-limit'. */
eassert (increment == 0);
return data_region_end;
}
#define MAX_BUFFER_SIZE (512 * 1024 * 1024)
/* MMAP allocation for buffers. */
void *
mmap_alloc (void **var, size_t nbytes)
{
void *p = NULL;
/* We implement amortized allocation. We start by reserving twice
the size requested and commit only the size requested. Then
realloc could proceed and use the reserved pages, reallocating
only if needed. Buffer shrink would happen only so that we stay
in the 2x range. This is a big win when visiting compressed
files, where the final size of the buffer is not known in
advance, and the buffer is enlarged several times as the data is
decompressed on the fly. */
if (nbytes < MAX_BUFFER_SIZE)
p = VirtualAlloc (NULL, (nbytes * 2), MEM_RESERVE, PAGE_READWRITE);
/* If it fails, or if the request is above 512MB, try with the
requested size. */
if (p == NULL)
p = VirtualAlloc (NULL, nbytes, MEM_RESERVE, PAGE_READWRITE);
if (p != NULL)
{
/* Now, commit pages for NBYTES. */
*var = VirtualAlloc (p, nbytes, MEM_COMMIT, PAGE_READWRITE);
}
if (!p)
{
if (GetLastError () == ERROR_NOT_ENOUGH_MEMORY)
errno = ENOMEM;
else
{
DebPrint (("mmap_alloc: error %ld\n", GetLastError ()));
errno = EINVAL;
}
}
return *var = p;
}
void
mmap_free (void **var)
{
if (*var)
{
if (VirtualFree (*var, 0, MEM_RELEASE) == 0)
DebPrint (("mmap_free: error %ld\n", GetLastError ()));
*var = NULL;
}
}
void *
mmap_realloc (void **var, size_t nbytes)
{
MEMORY_BASIC_INFORMATION memInfo, m2;
if (*var == NULL)
return mmap_alloc (var, nbytes);
/* This case happens in init_buffer(). */
if (nbytes == 0)
{
mmap_free (var);
return mmap_alloc (var, nbytes);
}
if (VirtualQuery (*var, &memInfo, sizeof (memInfo)) == 0)
DebPrint (("mmap_realloc: VirtualQuery error = %ld\n", GetLastError ()));
/* We need to enlarge the block. */
if (memInfo.RegionSize < nbytes)
{
if (VirtualQuery (*var + memInfo.RegionSize, &m2, sizeof(m2)) == 0)
DebPrint (("mmap_realloc: VirtualQuery error = %ld\n",
GetLastError ()));
/* If there is enough room in the current reserved area, then
commit more pages as needed. */
if (m2.State == MEM_RESERVE
&& nbytes <= memInfo.RegionSize + m2.RegionSize)
{
void *p;
p = VirtualAlloc (*var + memInfo.RegionSize,
nbytes - memInfo.RegionSize,
MEM_COMMIT, PAGE_READWRITE);
if (!p /* && GetLastError() != ERROR_NOT_ENOUGH_MEMORY */)
{
DebPrint (("realloc enlarge: VirtualAlloc error %ld\n",
GetLastError ()));
errno = ENOMEM;
}
return *var;
}
else
{
/* Else we must actually enlarge the block by allocating a
new one and copying previous contents from the old to the
new one. */
void *old_ptr = *var;
if (mmap_alloc (var, nbytes))
{
CopyMemory (*var, old_ptr, memInfo.RegionSize);
mmap_free (&old_ptr);
return *var;
}
else
{
/* We failed to enlarge the buffer. */
*var = old_ptr;
return NULL;
}
}
}
/* If we are shrinking by more than one page... */
if (memInfo.RegionSize > nbytes + getpagesize())
{
/* If we are shrinking a lot... */
if ((memInfo.RegionSize / 2) > nbytes)
{
/* Let's give some memory back to the system and release
some pages. */
void *old_ptr = *var;
if (mmap_alloc (var, nbytes))
{
CopyMemory (*var, old_ptr, nbytes);
mmap_free (&old_ptr);
return *var;
}
else
{
/* In case we fail to shrink, try to go on with the old block.
But that means there is a lot of memory pressure.
We could also decommit pages. */
*var = old_ptr;
return *var;
}
}
/* We still can decommit pages. */
if (VirtualFree (*var + nbytes + get_page_size(),
memInfo.RegionSize - nbytes - get_page_size(),
MEM_DECOMMIT) == 0)
DebPrint (("mmap_realloc: VirtualFree error %ld\n", GetLastError ()));
return *var;
}
/* Not enlarging, not shrinking by more than one page. */
return *var;
}
|