summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/assembler/LinkBuffer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/assembler/LinkBuffer.cpp')
-rw-r--r--Source/JavaScriptCore/assembler/LinkBuffer.cpp220
1 files changed, 136 insertions, 84 deletions
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.cpp b/Source/JavaScriptCore/assembler/LinkBuffer.cpp
index a7f469da8..0309d585d 100644
--- a/Source/JavaScriptCore/assembler/LinkBuffer.cpp
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,111 +28,164 @@
#if ENABLE(ASSEMBLER)
+#include "CodeBlock.h"
+#include "JITCode.h"
+#include "JSCInlines.h"
#include "Options.h"
#include "VM.h"
#include <wtf/CompilationThread.h>
namespace JSC {
+bool shouldDumpDisassemblyFor(CodeBlock* codeBlock)
+{
+ if (codeBlock && JITCode::isOptimizingJIT(codeBlock->jitType()) && Options::dumpDFGDisassembly())
+ return true;
+ return Options::dumpDisassembly();
+}
+
LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
{
performFinalization();
ASSERT(m_didAllocate);
if (m_executableMemory)
- return CodeRef(m_executableMemory);
+ return CodeRef(*m_executableMemory);
return CodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(m_code));
}
LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...)
{
- ASSERT(Options::showDisassembly() || Options::showDFGDisassembly());
-
CodeRef result = finalizeCodeWithoutDisassembly();
-#if ENABLE(DISASSEMBLER)
- dataLogF("Generated JIT code for ");
+ if (m_alreadyDisassembled)
+ return result;
+
+ StringPrintStream out;
+ out.printf("Generated JIT code for ");
va_list argList;
va_start(argList, format);
- WTF::dataLogFV(format, argList);
+ out.vprintf(format, argList);
va_end(argList);
- dataLogF(":\n");
+ out.printf(":\n");
+
+ out.printf(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size());
+
+ CString header = out.toCString();
+
+ if (Options::asyncDisassembly()) {
+ disassembleAsynchronously(header, result, m_size, " ");
+ return result;
+ }
- dataLogF(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size());
+ dataLog(header);
disassemble(result.code(), m_size, " ", WTF::dataFile());
-#else
- UNUSED_PARAM(format);
-#endif // ENABLE(DISASSEMBLER)
return result;
}
#if ENABLE(BRANCH_COMPACTION)
+static ALWAYS_INLINE void recordLinkOffsets(AssemblerData& assemblerData, int32_t regionStart, int32_t regionEnd, int32_t offset)
+{
+ int32_t ptr = regionStart / sizeof(int32_t);
+ const int32_t end = regionEnd / sizeof(int32_t);
+ int32_t* offsets = reinterpret_cast_ptr<int32_t*>(assemblerData.buffer());
+ while (ptr < end)
+ offsets[ptr++] = offset;
+}
+
template <typename InstructionType>
-void LinkBuffer::copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort effort)
+void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
{
- m_initialSize = m_assembler->m_assembler.codeSize();
- allocate(m_initialSize, ownerUID, effort);
- uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
- uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
+ allocate(macroAssembler, ownerUID, effort);
+ const size_t initialSize = macroAssembler.m_assembler.codeSize();
+ if (didFailToAllocate())
+ return;
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = macroAssembler.jumpsToLink();
+ m_assemblerStorage = macroAssembler.m_assembler.buffer().releaseAssemblerData();
+ uint8_t* inData = reinterpret_cast<uint8_t*>(m_assemblerStorage.buffer());
+
+ AssemblerData outBuffer(m_size);
+
+ uint8_t* outData = reinterpret_cast<uint8_t*>(outBuffer.buffer());
+ uint8_t* codeOutData = reinterpret_cast<uint8_t*>(m_code);
+
int readPtr = 0;
int writePtr = 0;
- Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = m_assembler->jumpsToLink();
unsigned jumpCount = jumpsToLink.size();
- for (unsigned i = 0; i < jumpCount; ++i) {
- int offset = readPtr - writePtr;
- ASSERT(!(offset & 1));
-
- // Copy the instructions from the last jump to the current one.
- size_t regionSize = jumpsToLink[i].from() - readPtr;
- InstructionType* copySource = reinterpret_cast_ptr<InstructionType*>(inData + readPtr);
- InstructionType* copyEnd = reinterpret_cast_ptr<InstructionType*>(inData + readPtr + regionSize);
- InstructionType* copyDst = reinterpret_cast_ptr<InstructionType*>(outData + writePtr);
- ASSERT(!(regionSize % 2));
- ASSERT(!(readPtr % 2));
- ASSERT(!(writePtr % 2));
- while (copySource != copyEnd)
- *copyDst++ = *copySource++;
- m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
- readPtr += regionSize;
- writePtr += regionSize;
-
- // Calculate absolute address of the jump target, in the case of backwards
- // branches we need to be precise, forward branches we are pessimistic
- const uint8_t* target;
- if (jumpsToLink[i].to() >= jumpsToLink[i].from())
- target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
- else
- target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
-
- JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target);
- // Compact branch if we can...
- if (m_assembler->canCompact(jumpsToLink[i].type())) {
- // Step back in the write stream
- int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
- if (delta) {
- writePtr -= delta;
- m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ if (m_shouldPerformBranchCompaction) {
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ int offset = readPtr - writePtr;
+ ASSERT(!(offset & 1));
+
+ // Copy the instructions from the last jump to the current one.
+ size_t regionSize = jumpsToLink[i].from() - readPtr;
+ InstructionType* copySource = reinterpret_cast_ptr<InstructionType*>(inData + readPtr);
+ InstructionType* copyEnd = reinterpret_cast_ptr<InstructionType*>(inData + readPtr + regionSize);
+ InstructionType* copyDst = reinterpret_cast_ptr<InstructionType*>(outData + writePtr);
+ ASSERT(!(regionSize % 2));
+ ASSERT(!(readPtr % 2));
+ ASSERT(!(writePtr % 2));
+ while (copySource != copyEnd)
+ *copyDst++ = *copySource++;
+ recordLinkOffsets(m_assemblerStorage, readPtr, jumpsToLink[i].from(), offset);
+ readPtr += regionSize;
+ writePtr += regionSize;
+
+ // Calculate absolute address of the jump target, in the case of backwards
+ // branches we need to be precise, forward branches we are pessimistic
+ const uint8_t* target;
+ if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+ target = codeOutData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+ else
+ target = codeOutData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
+
+ JumpLinkType jumpLinkType = MacroAssembler::computeJumpType(jumpsToLink[i], codeOutData + writePtr, target);
+ // Compact branch if we can...
+ if (MacroAssembler::canCompact(jumpsToLink[i].type())) {
+ // Step back in the write stream
+ int32_t delta = MacroAssembler::jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+ if (delta) {
+ writePtr -= delta;
+ recordLinkOffsets(m_assemblerStorage, jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ }
}
+ jumpsToLink[i].setFrom(writePtr);
+ }
+ } else {
+ if (!ASSERT_DISABLED) {
+ for (unsigned i = 0; i < jumpCount; ++i)
+ ASSERT(!MacroAssembler::canCompact(jumpsToLink[i].type()));
}
- jumpsToLink[i].setFrom(writePtr);
}
// Copy everything after the last jump
- memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
- m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
+ memcpy(outData + writePtr, inData + readPtr, initialSize - readPtr);
+ recordLinkOffsets(m_assemblerStorage, readPtr, initialSize, readPtr - writePtr);
for (unsigned i = 0; i < jumpCount; ++i) {
- uint8_t* location = outData + jumpsToLink[i].from();
- uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
- m_assembler->link(jumpsToLink[i], location, target);
+ uint8_t* location = codeOutData + jumpsToLink[i].from();
+ uint8_t* target = codeOutData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
+ MacroAssembler::link(jumpsToLink[i], outData + jumpsToLink[i].from(), location, target);
}
jumpsToLink.clear();
- shrink(writePtr + m_initialSize - readPtr);
+
+ size_t compactSize = writePtr + initialSize - readPtr;
+ if (m_executableMemory) {
+ m_size = compactSize;
+ m_executableMemory->shrink(m_size);
+ } else {
+ size_t nopSizeInBytes = initialSize - compactSize;
+ bool isCopyingToExecutableMemory = false;
+ MacroAssembler::AssemblerType_T::fillNops(outData + compactSize, nopSizeInBytes, isCopyingToExecutableMemory);
+ }
+
+ performJITMemcpy(m_code, outData, m_size);
#if DUMP_LINK_STATISTICS
- dumpLinkStatistics(m_code, m_initialSize, m_size);
+ dumpLinkStatistics(m_code, initialSize, m_size);
#endif
#if DUMP_CODE
dumpCode(m_code, m_size);
@@ -141,59 +194,63 @@ void LinkBuffer::copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort eff
#endif
-void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
+void LinkBuffer::linkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
{
+ // Ensure that the end of the last invalidation point does not extend beyond the end of the buffer.
+ macroAssembler.label();
+
#if !ENABLE(BRANCH_COMPACTION)
#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
- m_assembler->m_assembler.buffer().flushConstantPool(false);
+ macroAssembler.m_assembler.buffer().flushConstantPool(false);
#endif
- AssemblerBuffer& buffer = m_assembler->m_assembler.buffer();
- allocate(buffer.codeSize(), ownerUID, effort);
+ allocate(macroAssembler, ownerUID, effort);
if (!m_didAllocate)
return;
ASSERT(m_code);
+ AssemblerBuffer& buffer = macroAssembler.m_assembler.buffer();
#if CPU(ARM_TRADITIONAL)
- m_assembler->m_assembler.prepareExecutableCopy(m_code);
+ macroAssembler.m_assembler.prepareExecutableCopy(m_code);
#endif
- memcpy(m_code, buffer.data(), buffer.codeSize());
+ performJITMemcpy(m_code, buffer.data(), buffer.codeSize());
#if CPU(MIPS)
- m_assembler->m_assembler.relocateJumps(buffer.data(), m_code);
+ macroAssembler.m_assembler.relocateJumps(buffer.data(), m_code);
#endif
#elif CPU(ARM_THUMB2)
- copyCompactAndLinkCode<uint16_t>(ownerUID, effort);
+ copyCompactAndLinkCode<uint16_t>(macroAssembler, ownerUID, effort);
#elif CPU(ARM64)
- copyCompactAndLinkCode<uint32_t>(ownerUID, effort);
-#endif
+ copyCompactAndLinkCode<uint32_t>(macroAssembler, ownerUID, effort);
+#endif // !ENABLE(BRANCH_COMPACTION)
+
+ m_linkTasks = WTFMove(macroAssembler.m_linkTasks);
}
-void LinkBuffer::allocate(size_t initialSize, void* ownerUID, JITCompilationEffort effort)
+void LinkBuffer::allocate(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
{
+ size_t initialSize = macroAssembler.m_assembler.codeSize();
if (m_code) {
if (initialSize > m_size)
return;
+ size_t nopsToFillInBytes = m_size - initialSize;
+ macroAssembler.emitNops(nopsToFillInBytes);
m_didAllocate = true;
- m_size = initialSize;
return;
}
+ ASSERT(m_vm != nullptr);
m_executableMemory = m_vm->executableAllocator.allocate(*m_vm, initialSize, ownerUID, effort);
if (!m_executableMemory)
return;
- ExecutableAllocator::makeWritable(m_executableMemory->start(), m_executableMemory->sizeInBytes());
m_code = m_executableMemory->start();
m_size = initialSize;
m_didAllocate = true;
}
-void LinkBuffer::shrink(size_t newSize)
-{
- m_size = newSize;
- m_executableMemory->shrink(m_size);
-}
-
void LinkBuffer::performFinalization()
{
+ for (auto& task : m_linkTasks)
+ task->run(*this);
+
#ifndef NDEBUG
ASSERT(!isCompilationThread());
ASSERT(!m_completed);
@@ -201,11 +258,6 @@ void LinkBuffer::performFinalization()
m_completed = true;
#endif
-#if ENABLE(BRANCH_COMPACTION)
- ExecutableAllocator::makeExecutable(code(), m_initialSize);
-#else
- ExecutableAllocator::makeExecutable(code(), m_size);
-#endif
MacroAssembler::cacheFlush(code(), m_size);
}