summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/jit/ExecutableAllocator.cpp')
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.cpp463
1 files changed, 309 insertions, 154 deletions
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
index 5ac6cc412..44f8fbae4 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2009, 2015, 2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -20,245 +20,400 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
-
#include "ExecutableAllocator.h"
-#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+#if ENABLE(ASSEMBLER)
+
#include "CodeProfiling.h"
-#include <wtf/HashSet.h>
+#include "ExecutableAllocationFuzz.h"
+#include "JSCInlines.h"
#include <wtf/MetaAllocator.h>
#include <wtf/PageReservation.h>
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-#include <wtf/PassOwnPtr.h>
+
+#if OS(DARWIN)
+#include <sys/mman.h>
+#endif
+
+#include "LinkBuffer.h"
+#include "MacroAssembler.h"
+
+#if PLATFORM(MAC) || (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000)
+#define HAVE_REMAP_JIT 1
+#endif
+
+#if HAVE(REMAP_JIT)
+#if CPU(ARM64) && PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000
+#define USE_EXECUTE_ONLY_JIT_WRITE_FUNCTION 1
#endif
-#include <wtf/ThreadingPrimitives.h>
-#include <wtf/VMTags.h>
#endif
-// Uncomment to create an artificial executable memory usage limit. This limit
-// is imperfect and is primarily useful for testing the VM's ability to handle
-// out-of-executable-memory situations.
-// #define EXECUTABLE_MEMORY_LIMIT 1000000
+#if OS(DARWIN)
+#include <mach/mach.h>
+extern "C" {
+ /* Routine mach_vm_remap */
+#ifdef mig_external
+ mig_external
+#else
+ extern
+#endif /* mig_external */
+ kern_return_t mach_vm_remap
+ (
+ vm_map_t target_task,
+ mach_vm_address_t *target_address,
+ mach_vm_size_t size,
+ mach_vm_offset_t mask,
+ int flags,
+ vm_map_t src_task,
+ mach_vm_address_t src_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection,
+ vm_prot_t *max_protection,
+ vm_inherit_t inheritance
+ );
+}
-#if ENABLE(ASSEMBLER)
+#endif
using namespace WTF;
namespace JSC {
-#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+JS_EXPORTDATA uintptr_t startOfFixedExecutableMemoryPool;
+JS_EXPORTDATA uintptr_t endOfFixedExecutableMemoryPool;
+
+JS_EXPORTDATA JITWriteFunction jitWriteFunction;
+
+#if !USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) && HAVE(REMAP_JIT)
+static uintptr_t startOfFixedWritableMemoryPool;
+#endif
-class DemandExecutableAllocator : public MetaAllocator {
+class FixedVMPoolExecutableAllocator : public MetaAllocator {
+ WTF_MAKE_FAST_ALLOCATED;
public:
- DemandExecutableAllocator()
- : MetaAllocator(jitAllocationGranule)
- {
- MutexLocker lock(allocatorsMutex());
- allocators().add(this);
- // Don't preallocate any memory here.
- }
-
- virtual ~DemandExecutableAllocator()
+ FixedVMPoolExecutableAllocator()
+ : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
{
- {
- MutexLocker lock(allocatorsMutex());
- allocators().remove(this);
+ size_t reservationSize;
+ if (Options::jitMemoryReservationSize())
+ reservationSize = Options::jitMemoryReservationSize();
+ else
+ reservationSize = fixedExecutableMemoryPoolSize;
+ reservationSize = roundUpToMultipleOf(pageSize(), reservationSize);
+ m_reservation = PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+ if (m_reservation) {
+ ASSERT(m_reservation.size() == reservationSize);
+ void* reservationBase = m_reservation.base();
+
+ if (Options::useSeparatedWXHeap()) {
+ // First page of our JIT allocation is reserved.
+ ASSERT(reservationSize >= pageSize() * 2);
+ reservationBase = (void*)((uintptr_t)reservationBase + pageSize());
+ reservationSize -= pageSize();
+ initializeSeparatedWXHeaps(m_reservation.base(), pageSize(), reservationBase, reservationSize);
+ }
+
+ addFreshFreeSpace(reservationBase, reservationSize);
+
+ startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(reservationBase);
+ endOfFixedExecutableMemoryPool = startOfFixedExecutableMemoryPool + reservationSize;
}
- for (unsigned i = 0; i < reservations.size(); ++i)
- reservations.at(i).deallocate();
}
- static size_t bytesAllocatedByAllAllocators()
+ virtual ~FixedVMPoolExecutableAllocator();
+
+protected:
+ void* allocateNewSpace(size_t&) override
{
- size_t total = 0;
- MutexLocker lock(allocatorsMutex());
- for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
- total += (*allocator)->bytesAllocated();
- return total;
+ // We're operating in a fixed pool, so new allocation is always prohibited.
+ return 0;
}
- static size_t bytesCommittedByAllocactors()
+ void notifyNeedPage(void* page) override
{
- size_t total = 0;
- MutexLocker lock(allocatorsMutex());
- for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
- total += (*allocator)->bytesCommitted();
- return total;
+#if USE(MADV_FREE_FOR_JIT_MEMORY)
+ UNUSED_PARAM(page);
+#else
+ m_reservation.commit(page, pageSize());
+#endif
}
-#if ENABLE(META_ALLOCATOR_PROFILE)
- static void dumpProfileFromAllAllocators()
+ void notifyPageIsFree(void* page) override
{
- MutexLocker lock(allocatorsMutex());
- for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
- (*allocator)->dumpProfile();
- }
+#if USE(MADV_FREE_FOR_JIT_MEMORY)
+ for (;;) {
+ int result = madvise(page, pageSize(), MADV_FREE);
+ if (!result)
+ return;
+ ASSERT(result == -1);
+ if (errno != EAGAIN) {
+ RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
+ break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway.
+ }
+ }
+#else
+ m_reservation.decommit(page, pageSize());
#endif
+ }
-protected:
- virtual void* allocateNewSpace(size_t& numPages)
+private:
+#if OS(DARWIN) && HAVE(REMAP_JIT)
+ void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize)
{
- size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize();
-
- ASSERT(newNumPages >= numPages);
-
- numPages = newNumPages;
-
-#ifdef EXECUTABLE_MEMORY_LIMIT
- if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT)
- return 0;
+ mach_vm_address_t writableAddr = 0;
+
+ // Create a second mapping of the JIT region at a random address.
+ vm_prot_t cur, max;
+ int remapFlags = VM_FLAGS_ANYWHERE;
+#if defined(VM_FLAGS_RANDOM_ADDR)
+ remapFlags |= VM_FLAGS_RANDOM_ADDR;
+#endif
+ kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0,
+ remapFlags,
+ mach_task_self(), (mach_vm_address_t)jitBase, FALSE,
+ &cur, &max, VM_INHERIT_DEFAULT);
+
+ bool remapSucceeded = (ret == KERN_SUCCESS);
+ if (!remapSucceeded)
+ return;
+
+ // Assemble a thunk that will serve as the means for writing into the JIT region.
+ MacroAssemblerCodeRef writeThunk = jitWriteThunkGenerator(reinterpret_cast<void*>(writableAddr), stubBase, stubSize);
+
+ int result = 0;
+
+#if USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
+ // Prevent reading the write thunk code.
+ result = mprotect(stubBase, stubSize, VM_PROT_EXECUTE_ONLY);
+ RELEASE_ASSERT(!result);
#endif
-
- PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
- RELEASE_ASSERT(reservation);
-
- reservations.append(reservation);
-
- return reservation.base();
+
+ // Prevent writing into the executable JIT mapping.
+ result = mprotect(jitBase, jitSize, VM_PROT_READ | VM_PROT_EXECUTE);
+ RELEASE_ASSERT(!result);
+
+ // Prevent execution in the writable JIT mapping.
+ result = mprotect((void*)writableAddr, jitSize, VM_PROT_READ | VM_PROT_WRITE);
+ RELEASE_ASSERT(!result);
+
+ // Zero out writableAddr to avoid leaking the address of the writable mapping.
+ memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr));
+
+ jitWriteFunction = reinterpret_cast<JITWriteFunction>(writeThunk.code().executableAddress());
}
-
- virtual void notifyNeedPage(void* page)
+
+#if CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
+ MacroAssemblerCodeRef jitWriteThunkGenerator(void* writableAddr, void* stubBase, size_t stubSize)
{
- OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true);
+ using namespace ARM64Registers;
+ using TrustedImm32 = MacroAssembler::TrustedImm32;
+
+ MacroAssembler jit;
+
+ jit.move(MacroAssembler::TrustedImmPtr(writableAddr), x7);
+ jit.addPtr(x7, x0);
+
+ jit.move(x0, x3);
+ MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64));
+
+ jit.add64(TrustedImm32(32), x3);
+ jit.and64(TrustedImm32(-32), x3);
+ jit.loadPair64(x1, x12, x13);
+ jit.loadPair64(x1, TrustedImm32(16), x14, x15);
+ jit.sub64(x3, x0, x5);
+ jit.addPtr(x5, x1);
+
+ jit.loadPair64(x1, x8, x9);
+ jit.loadPair64(x1, TrustedImm32(16), x10, x11);
+ jit.add64(TrustedImm32(32), x1);
+ jit.sub64(x5, x2);
+ jit.storePair64(x12, x13, x0);
+ jit.storePair64(x14, x15, x0, TrustedImm32(16));
+ MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2);
+
+ MacroAssembler::Label copyLoop = jit.label();
+ jit.storePair64WithNonTemporalAccess(x8, x9, x3);
+ jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16));
+ jit.add64(TrustedImm32(32), x3);
+ jit.loadPair64WithNonTemporalAccess(x1, x8, x9);
+ jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11);
+ jit.add64(TrustedImm32(32), x1);
+ jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit);
+
+ cleanup.link(&jit);
+ jit.add64(x2, x1);
+ jit.loadPair64(x1, x12, x13);
+ jit.loadPair64(x1, TrustedImm32(16), x14, x15);
+ jit.storePair64(x8, x9, x3);
+ jit.storePair64(x10, x11, x3, TrustedImm32(16));
+ jit.addPtr(x2, x3);
+ jit.storePair64(x12, x13, x3, TrustedImm32(32));
+ jit.storePair64(x14, x15, x3, TrustedImm32(48));
+ jit.ret();
+
+ MacroAssembler::Label local0 = jit.label();
+ jit.load64(x1, PostIndex(8), x6);
+ jit.store64(x6, x3, PostIndex(8));
+ smallCopy.link(&jit);
+ jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit);
+ MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2);
+ MacroAssembler::Label local1 = jit.label();
+ jit.load8(x1, PostIndex(1), x6);
+ jit.store8(x6, x3, PostIndex(1));
+ jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit);
+ local2.link(&jit);
+ jit.ret();
+
+ LinkBuffer linkBuffer(jit, stubBase, stubSize);
+ // We don't use FINALIZE_CODE() for two reasons.
+ // The first is that we don't want the writeable address, as disassembled instructions,
+ // to appear in the console or anywhere in memory, via the PrintStream buffer.
+ // The second is we can't guarantee that the code is readable when using the
+ // asyncDisassembly option as our caller will set our pages execute only.
+ return linkBuffer.finalizeCodeWithoutDisassembly();
}
-
- virtual void notifyPageIsFree(void* page)
+#else // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
+ static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize)
{
- OSAllocator::decommit(page, pageSize());
+ memcpy((void*)(startOfFixedWritableMemoryPool + offset), data, dataSize);
}
-private:
- Vector<PageReservation, 16> reservations;
- static HashSet<DemandExecutableAllocator*>& allocators()
+ MacroAssemblerCodeRef jitWriteThunkGenerator(void* address, void*, size_t)
{
- DEFINE_STATIC_LOCAL(HashSet<DemandExecutableAllocator*>, sAllocators, ());
- return sAllocators;
+ startOfFixedWritableMemoryPool = reinterpret_cast<uintptr_t>(address);
+ uintptr_t function = (uintptr_t)((void*)&genericWriteToJITRegion);
+#if CPU(ARM_THUMB2)
+ // Handle thumb offset
+ function -= 1;
+#endif
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr((void*)function));
}
- static Mutex& allocatorsMutex()
+#endif
+
+#else // OS(DARWIN) && HAVE(REMAP_JIT)
+ void initializeSeparatedWXHeaps(void*, size_t, void*, size_t)
{
- DEFINE_STATIC_LOCAL(Mutex, mutex, ());
- return mutex;
}
-};
+#endif
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-void ExecutableAllocator::initializeAllocator()
-{
-}
-#else
-static DemandExecutableAllocator* gAllocator;
+private:
+ PageReservation m_reservation;
+};
-namespace {
-static inline DemandExecutableAllocator* allocator()
-{
- return gAllocator;
-}
-}
+static FixedVMPoolExecutableAllocator* allocator;
void ExecutableAllocator::initializeAllocator()
{
- ASSERT(!gAllocator);
- gAllocator = new DemandExecutableAllocator();
- CodeProfiling::notifyAllocator(gAllocator);
+ ASSERT(!allocator);
+ allocator = new FixedVMPoolExecutableAllocator();
+ CodeProfiling::notifyAllocator(allocator);
}
-#endif
ExecutableAllocator::ExecutableAllocator(VM&)
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- : m_allocator(adoptPtr(new DemandExecutableAllocator()))
-#endif
{
- ASSERT(allocator());
+ ASSERT(allocator);
}
ExecutableAllocator::~ExecutableAllocator()
{
}
+FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator()
+{
+ m_reservation.deallocate();
+}
+
bool ExecutableAllocator::isValid() const
{
- return true;
+ return !!allocator->bytesReserved();
}
bool ExecutableAllocator::underMemoryPressure()
{
-#ifdef EXECUTABLE_MEMORY_LIMIT
- return DemandExecutableAllocator::bytesAllocatedByAllAllocators() > EXECUTABLE_MEMORY_LIMIT / 2;
-#else
- return false;
-#endif
+ MetaAllocator::Statistics statistics = allocator->currentStatistics();
+ return statistics.bytesAllocated > statistics.bytesReserved / 2;
}
double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
{
- double result;
-#ifdef EXECUTABLE_MEMORY_LIMIT
- size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage;
- if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT)
- bytesAllocated = EXECUTABLE_MEMORY_LIMIT;
- result = static_cast<double>(EXECUTABLE_MEMORY_LIMIT) /
- (EXECUTABLE_MEMORY_LIMIT - bytesAllocated);
-#else
- UNUSED_PARAM(addedMemoryUsage);
- result = 1.0;
-#endif
+ MetaAllocator::Statistics statistics = allocator->currentStatistics();
+ ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
+ size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
+ size_t bytesAvailable = static_cast<size_t>(
+ statistics.bytesReserved * (1 - executablePoolReservationFraction));
+ if (bytesAllocated >= bytesAvailable)
+ bytesAllocated = bytesAvailable;
+ double result = 1.0;
+ size_t divisor = bytesAvailable - bytesAllocated;
+ if (divisor)
+ result = static_cast<double>(bytesAvailable) / divisor;
if (result < 1.0)
result = 1.0;
return result;
-
}
-PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
+RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
- RefPtr<ExecutableMemoryHandle> result = allocator()->allocate(sizeInBytes, ownerUID);
- RELEASE_ASSERT(result || effort != JITCompilationMustSucceed);
- return result.release();
+ if (Options::logExecutableAllocation()) {
+ MetaAllocator::Statistics stats = allocator->currentStatistics();
+ dataLog("Allocating ", sizeInBytes, " bytes of executable memory with ", stats.bytesAllocated, " bytes allocated, ", stats.bytesReserved, " bytes reserved, and ", stats.bytesCommitted, " committed.\n");
+ }
+
+ if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) {
+ dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n");
+ WTFReportBacktrace();
+ }
+
+ if (effort == JITCompilationCanFail
+ && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation)
+ return nullptr;
+
+ if (effort == JITCompilationCanFail) {
+ // Don't allow allocations if we are down to reserve.
+ MetaAllocator::Statistics statistics = allocator->currentStatistics();
+ size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes;
+ size_t bytesAvailable = static_cast<size_t>(
+ statistics.bytesReserved * (1 - executablePoolReservationFraction));
+ if (bytesAllocated > bytesAvailable)
+ return nullptr;
+ }
+
+ RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
+ if (!result) {
+ if (effort != JITCompilationCanFail) {
+ dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n");
+ CRASH();
+ }
+ return nullptr;
+ }
+ return result;
}
-size_t ExecutableAllocator::committedByteCount()
+bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address)
{
- return DemandExecutableAllocator::bytesCommittedByAllocactors();
+ return allocator->isInAllocatedMemory(locker, address);
}
-#if ENABLE(META_ALLOCATOR_PROFILE)
-void ExecutableAllocator::dumpProfile()
+Lock& ExecutableAllocator::getLock() const
{
- DemandExecutableAllocator::dumpProfileFromAllAllocators();
+ return allocator->getLock();
}
-#endif
-
-#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-#if OS(WINDOWS)
-#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
-#endif
-
-void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting)
+size_t ExecutableAllocator::committedByteCount()
{
- size_t pageSize = WTF::pageSize();
-
- // Calculate the start of the page containing this region,
- // and account for this extra memory within size.
- intptr_t startPtr = reinterpret_cast<intptr_t>(start);
- intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
- void* pageStart = reinterpret_cast<void*>(pageStartPtr);
- size += (startPtr - pageStartPtr);
-
- // Round size up
- size += (pageSize - 1);
- size &= ~(pageSize - 1);
-
- mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
+ return allocator->bytesCommitted();
}
+#if ENABLE(META_ALLOCATOR_PROFILE)
+void ExecutableAllocator::dumpProfile()
+{
+ allocator->dumpProfile();
+}
#endif
-
+
}
-#endif // HAVE(ASSEMBLER)
+#endif // ENABLE(ASSEMBLER)