1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
|
/*
* Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "PerProcess.h"
#include "VMHeap.h"
#include <thread>
namespace bmalloc {
XLargeRange VMHeap::tryAllocateLargeChunk(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
{
// We allocate VM in aligned multiples to increase the chances that
// the OS will provide contiguous ranges that we can merge.
size_t roundedAlignment = roundUpToMultipleOf<chunkSize>(alignment);
if (roundedAlignment < alignment) // Check for overflow
return XLargeRange();
alignment = roundedAlignment;
size_t roundedSize = roundUpToMultipleOf<chunkSize>(size);
if (roundedSize < size) // Check for overflow
return XLargeRange();
size = roundedSize;
void* memory = tryVMAllocate(alignment, size);
if (!memory)
return XLargeRange();
Chunk* chunk = new (memory) Chunk(lock);
#if BOS(DARWIN)
m_zone.addChunk(chunk);
#endif
return XLargeRange(chunk->bytes(), size, 0);
}
void VMHeap::allocateSmallChunk(std::lock_guard<StaticMutex>& lock, size_t pageClass)
{
Chunk* chunk =
new (vmAllocate(chunkSize, chunkSize)) Chunk(lock);
#if BOS(DARWIN)
m_zone.addChunk(chunk);
#endif
size_t pageSize = bmalloc::pageSize(pageClass);
size_t smallPageCount = pageSize / smallPageSize;
// We align to our page size in order to guarantee that we can service
// aligned allocation requests at equal and smaller powers of two.
size_t metadataSize = divideRoundingUp(sizeof(Chunk), pageSize) * pageSize;
Object begin(chunk, metadataSize);
Object end(chunk, chunkSize);
for (Object it = begin; it + pageSize <= end; it = it + pageSize) {
SmallPage* page = it.page();
new (page) SmallPage;
for (size_t i = 0; i < smallPageCount; ++i)
page[i].setSlide(i);
m_smallPages[pageClass].push(page);
}
}
} // namespace bmalloc
|