summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@digia.com>2013-09-13 12:51:20 +0200
committerThe Qt Project <gerrit-noreply@qt-project.org>2013-09-19 20:50:05 +0200
commitd441d6f39bb846989d95bcf5caf387b42414718d (patch)
treee367e64a75991c554930278175d403c072de6bb8 /Source/JavaScriptCore/jit
parent0060b2994c07842f4c59de64b5e3e430525c4b90 (diff)
downloadqtwebkit-d441d6f39bb846989d95bcf5caf387b42414718d.tar.gz
Import Qt5x2 branch of QtWebkit for Qt 5.2
Importing a new snapshot of webkit. Change-Id: I2d01ad12cdc8af8cb015387641120a9d7ea5f10c Reviewed-by: Allan Sandfeld Jensen <allan.jensen@digia.com>
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp11
-rw-r--r--Source/JavaScriptCore/jit/ClosureCallStubRoutine.h2
-rw-r--r--Source/JavaScriptCore/jit/CompactJITCodeMap.h1
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.cpp10
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.h12
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp34
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp23
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h10
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.cpp4
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.h2
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp175
-rw-r--r--Source/JavaScriptCore/jit/JIT.h231
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic.cpp69
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic32_64.cpp97
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp62
-rw-r--r--Source/JavaScriptCore/jit/JITCall32_64.cpp59
-rw-r--r--Source/JavaScriptCore/jit/JITCode.h13
-rw-r--r--Source/JavaScriptCore/jit/JITDisassembler.cpp89
-rw-r--r--Source/JavaScriptCore/jit/JITDisassembler.h13
-rw-r--r--Source/JavaScriptCore/jit/JITDriver.h12
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.cpp27
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.h8
-rw-r--r--Source/JavaScriptCore/jit/JITInlines.h190
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp495
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp442
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp201
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp124
-rw-r--r--Source/JavaScriptCore/jit/JITStubRoutine.cpp2
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp558
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h755
-rw-r--r--Source/JavaScriptCore/jit/JITThunks.cpp108
-rw-r--r--Source/JavaScriptCore/jit/JITThunks.h76
-rw-r--r--Source/JavaScriptCore/jit/JITWriteBarrier.h18
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h166
-rw-r--r--Source/JavaScriptCore/jit/JumpReplacementWatchpoint.h7
-rw-r--r--Source/JavaScriptCore/jit/SpecializedThunkJIT.h17
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerator.h44
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.cpp538
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.h44
-rw-r--r--Source/JavaScriptCore/jit/UnusedPointer.h37
40 files changed, 2470 insertions, 2316 deletions
diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp
index 73704aa03..1588f7fea 100644
--- a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp
@@ -30,18 +30,19 @@
#include "Executable.h"
#include "Heap.h"
-#include "JSGlobalData.h"
+#include "VM.h"
+#include "Operations.h"
#include "SlotVisitor.h"
#include "Structure.h"
namespace JSC {
ClosureCallStubRoutine::ClosureCallStubRoutine(
- const MacroAssemblerCodeRef& code, JSGlobalData& globalData, const JSCell* owner,
+ const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner,
Structure* structure, ExecutableBase* executable, const CodeOrigin& codeOrigin)
- : GCAwareJITStubRoutine(code, globalData, true)
- , m_structure(globalData, owner, structure)
- , m_executable(globalData, owner, executable)
+ : GCAwareJITStubRoutine(code, vm, true)
+ , m_structure(vm, owner, structure)
+ , m_executable(vm, owner, executable)
, m_codeOrigin(codeOrigin)
{
}
diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h
index 3fd020691..d951075e2 100644
--- a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h
+++ b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h
@@ -38,7 +38,7 @@ namespace JSC {
class ClosureCallStubRoutine : public GCAwareJITStubRoutine {
public:
ClosureCallStubRoutine(
- const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner,
+ const MacroAssemblerCodeRef&, VM&, const JSCell* owner,
Structure*, ExecutableBase*, const CodeOrigin&);
virtual ~ClosureCallStubRoutine();
diff --git a/Source/JavaScriptCore/jit/CompactJITCodeMap.h b/Source/JavaScriptCore/jit/CompactJITCodeMap.h
index 5b92a8961..45ab175ec 100644
--- a/Source/JavaScriptCore/jit/CompactJITCodeMap.h
+++ b/Source/JavaScriptCore/jit/CompactJITCodeMap.h
@@ -34,7 +34,6 @@
#include <wtf/FastMalloc.h>
#include <wtf/OwnPtr.h>
#include <wtf/PassOwnPtr.h>
-#include <wtf/UnusedParam.h>
#include <wtf/Vector.h>
namespace JSC {
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
index e9bb66ce7..5ac6cc412 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -114,8 +114,7 @@ protected:
#endif
PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
- if (!reservation)
- CRASH();
+ RELEASE_ASSERT(reservation);
reservations.append(reservation);
@@ -168,7 +167,7 @@ void ExecutableAllocator::initializeAllocator()
}
#endif
-ExecutableAllocator::ExecutableAllocator(JSGlobalData&)
+ExecutableAllocator::ExecutableAllocator(VM&)
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
: m_allocator(adoptPtr(new DemandExecutableAllocator()))
#endif
@@ -213,11 +212,10 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
}
-PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
RefPtr<ExecutableMemoryHandle> result = allocator()->allocate(sizeInBytes, ownerUID);
- if (!result && effort == JITCompilationMustSucceed)
- CRASH();
+ RELEASE_ASSERT(result || effort != JITCompilationMustSucceed);
return result.release();
}
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h
index 85779e6a8..fe63ddf7a 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h
@@ -34,7 +34,6 @@
#include <wtf/PageAllocation.h>
#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
-#include <wtf/UnusedParam.h>
#include <wtf/Vector.h>
#if OS(IOS)
@@ -74,15 +73,14 @@ extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLeng
namespace JSC {
-class JSGlobalData;
-void releaseExecutableMemory(JSGlobalData&);
+class VM;
+void releaseExecutableMemory(VM&);
static const unsigned jitAllocationGranule = 32;
inline size_t roundUpAllocationSize(size_t request, size_t granularity)
{
- if ((std::numeric_limits<size_t>::max() - granularity) <= request)
- CRASH(); // Allocation is too large
+ RELEASE_ASSERT((std::numeric_limits<size_t>::max() - granularity) > request);
// Round up to next page boundary
size_t size = request + (granularity - 1);
@@ -119,7 +117,7 @@ class ExecutableAllocator {
enum ProtectionSetting { Writable, Executable };
public:
- ExecutableAllocator(JSGlobalData&);
+ ExecutableAllocator(VM&);
~ExecutableAllocator();
static void initializeAllocator();
@@ -136,7 +134,7 @@ public:
static void dumpProfile() { }
#endif
- PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
+ PassRefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
static void makeWritable(void* start, size_t size)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index 7ee3e0497..7823cd2d2 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -36,10 +36,19 @@
#include <wtf/PageReservation.h>
#include <wtf/VMTags.h>
+#if OS(DARWIN)
+#include <sys/mman.h>
+#endif
+
#if OS(LINUX)
#include <stdio.h>
#endif
+#if !PLATFORM(IOS) && PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090
+// MADV_FREE_REUSABLE does not work for JIT memory on older OSes so use MADV_FREE in that case.
+#define WTF_USE_MADV_FREE_FOR_JIT_MEMORY 1
+#endif
+
using namespace WTF;
namespace JSC {
@@ -54,8 +63,7 @@ public:
{
m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
#if !ENABLE(LLINT)
- if (!m_reservation)
- CRASH();
+ RELEASE_ASSERT(m_reservation);
#endif
if (m_reservation) {
ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize);
@@ -64,6 +72,8 @@ public:
startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base());
}
}
+
+ virtual ~FixedVMPoolExecutableAllocator();
protected:
virtual void* allocateNewSpace(size_t&)
@@ -74,7 +84,7 @@ protected:
virtual void notifyNeedPage(void* page)
{
-#if OS(DARWIN)
+#if USE(MADV_FREE_FOR_JIT_MEMORY)
UNUSED_PARAM(page);
#else
m_reservation.commit(page, pageSize());
@@ -83,14 +93,14 @@ protected:
virtual void notifyPageIsFree(void* page)
{
-#if OS(DARWIN)
+#if USE(MADV_FREE_FOR_JIT_MEMORY)
for (;;) {
int result = madvise(page, pageSize(), MADV_FREE);
if (!result)
return;
ASSERT(result == -1);
if (errno != EAGAIN) {
- ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
+ RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway.
}
}
@@ -112,7 +122,7 @@ void ExecutableAllocator::initializeAllocator()
CodeProfiling::notifyAllocator(allocator);
}
-ExecutableAllocator::ExecutableAllocator(JSGlobalData&)
+ExecutableAllocator::ExecutableAllocator(VM&)
{
ASSERT(allocator);
}
@@ -121,6 +131,11 @@ ExecutableAllocator::~ExecutableAllocator()
{
}
+FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator()
+{
+ m_reservation.deallocate();
+}
+
bool ExecutableAllocator::isValid() const
{
return !!allocator->bytesReserved();
@@ -148,16 +163,15 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
return result;
}
-PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM& vm, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
if (!result) {
if (effort == JITCompilationCanFail)
return result;
- releaseExecutableMemory(globalData);
+ releaseExecutableMemory(vm);
result = allocator->allocate(sizeInBytes, ownerUID);
- if (!result)
- CRASH();
+ RELEASE_ASSERT(result);
}
return result.release();
}
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
index 521e49751..f681dd847 100644
--- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
@@ -29,20 +29,21 @@
#if ENABLE(JIT)
#include "Heap.h"
-#include "JSGlobalData.h"
+#include "VM.h"
+#include "Operations.h"
#include "SlotVisitor.h"
#include "Structure.h"
namespace JSC {
GCAwareJITStubRoutine::GCAwareJITStubRoutine(
- const MacroAssemblerCodeRef& code, JSGlobalData& globalData, bool isClosureCall)
+ const MacroAssemblerCodeRef& code, VM& vm, bool isClosureCall)
: JITStubRoutine(code)
, m_mayBeExecuting(false)
, m_isJettisoned(false)
, m_isClosureCall(isClosureCall)
{
- globalData.heap.m_jitStubRoutines.add(this);
+ vm.heap.m_jitStubRoutines.add(this);
}
GCAwareJITStubRoutine::~GCAwareJITStubRoutine() { }
@@ -59,7 +60,7 @@ void GCAwareJITStubRoutine::observeZeroRefCount()
return;
}
- ASSERT(!m_refCount);
+ RELEASE_ASSERT(!m_refCount);
m_isJettisoned = true;
}
@@ -78,10 +79,10 @@ void GCAwareJITStubRoutine::markRequiredObjectsInternal(SlotVisitor&)
}
MarkingGCAwareJITStubRoutineWithOneObject::MarkingGCAwareJITStubRoutineWithOneObject(
- const MacroAssemblerCodeRef& code, JSGlobalData& globalData, const JSCell* owner,
+ const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner,
JSCell* object)
- : GCAwareJITStubRoutine(code, globalData)
- , m_object(globalData, owner, object)
+ : GCAwareJITStubRoutine(code, vm)
+ , m_object(vm, owner, object)
{
}
@@ -96,7 +97,7 @@ void MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal(Slot
PassRefPtr<JITStubRoutine> createJITStubRoutine(
const MacroAssemblerCodeRef& code,
- JSGlobalData& globalData,
+ VM& vm,
const JSCell*,
bool makesCalls)
{
@@ -104,12 +105,12 @@ PassRefPtr<JITStubRoutine> createJITStubRoutine(
return adoptRef(new JITStubRoutine(code));
return static_pointer_cast<JITStubRoutine>(
- adoptRef(new GCAwareJITStubRoutine(code, globalData)));
+ adoptRef(new GCAwareJITStubRoutine(code, vm)));
}
PassRefPtr<JITStubRoutine> createJITStubRoutine(
const MacroAssemblerCodeRef& code,
- JSGlobalData& globalData,
+ VM& vm,
const JSCell* owner,
bool makesCalls,
JSCell* object)
@@ -118,7 +119,7 @@ PassRefPtr<JITStubRoutine> createJITStubRoutine(
return adoptRef(new JITStubRoutine(code));
return static_pointer_cast<JITStubRoutine>(
- adoptRef(new MarkingGCAwareJITStubRoutineWithOneObject(code, globalData, owner, object)));
+ adoptRef(new MarkingGCAwareJITStubRoutineWithOneObject(code, vm, owner, object)));
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
index e5ce281e8..f0b282cf1 100644
--- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
@@ -54,7 +54,7 @@ class JITStubRoutineSet;
// list which does not get reclaimed all at once).
class GCAwareJITStubRoutine : public JITStubRoutine {
public:
- GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, JSGlobalData&, bool isClosureCall = false);
+ GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&, bool isClosureCall = false);
virtual ~GCAwareJITStubRoutine();
void markRequiredObjects(SlotVisitor& visitor)
@@ -84,7 +84,7 @@ private:
class MarkingGCAwareJITStubRoutineWithOneObject : public GCAwareJITStubRoutine {
public:
MarkingGCAwareJITStubRoutineWithOneObject(
- const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner, JSCell*);
+ const MacroAssemblerCodeRef&, VM&, const JSCell* owner, JSCell*);
virtual ~MarkingGCAwareJITStubRoutineWithOneObject();
protected:
@@ -102,7 +102,7 @@ private:
//
// PassRefPtr<JITStubRoutine> createJITStubRoutine(
// const MacroAssemblerCodeRef& code,
-// JSGlobalData& globalData,
+// VM& vm,
// const JSCell* owner,
// bool makesCalls,
// ...);
@@ -114,9 +114,9 @@ private:
// way.
PassRefPtr<JITStubRoutine> createJITStubRoutine(
- const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner, bool makesCalls);
+ const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls);
PassRefPtr<JITStubRoutine> createJITStubRoutine(
- const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner, bool makesCalls,
+ const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls,
JSCell*);
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
index 967c499b9..528fb2bc4 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
@@ -27,9 +27,9 @@
#include "HostCallReturnValue.h"
#include "CallFrame.h"
-#include <wtf/InlineASM.h>
+#include "JSCJSValueInlines.h"
#include "JSObject.h"
-#include "JSValueInlines.h"
+#include <wtf/InlineASM.h>
namespace JSC {
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h
index 3f61179a3..f4c8bc703 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.h
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h
@@ -26,7 +26,7 @@
#ifndef HostCallReturnValue_h
#define HostCallReturnValue_h
-#include "JSValue.h"
+#include "JSCJSValue.h"
#include "MacroAssemblerCodeRef.h"
#include <wtf/Platform.h>
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index cccf33bf6..8e003c782 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -43,6 +43,7 @@ JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse
#include "JSArray.h"
#include "JSFunction.h"
#include "LinkBuffer.h"
+#include "Operations.h"
#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
@@ -69,9 +70,9 @@ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAd
repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
}
-JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
- : m_interpreter(globalData->interpreter)
- , m_globalData(globalData)
+JIT::JIT(VM* vm, CodeBlock* codeBlock)
+ : m_interpreter(vm->interpreter)
+ , m_vm(vm)
, m_codeBlock(codeBlock)
, m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
, m_bytecodeOffset((unsigned)-1)
@@ -89,11 +90,7 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
, m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
, m_jumpTargetsPosition(0)
#endif
-#if USE(OS_RANDOMNESS)
, m_randomGenerator(cryptographicallyRandomNumber())
-#else
- , m_randomGenerator(static_cast<unsigned>(randomNumber() * 0xFFFFFFF))
-#endif
#if ENABLE(VALUE_PROFILER)
, m_canBeOptimized(false)
, m_shouldEmitProfiling(false)
@@ -102,53 +99,20 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
}
#if ENABLE(DFG_JIT)
-void JIT::emitOptimizationCheck(OptimizationCheckKind kind)
+void JIT::emitEnterOptimizationCheck()
{
if (!canBeOptimized())
return;
-
- Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop() : Options::executionCounterIncrementForReturn()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
+
+ Jump skipOptimize = branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForReturn()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
JITStubCall stubCall(this, cti_optimize);
stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
- if (kind == EnterOptimizationCheck)
- ASSERT(!m_bytecodeOffset);
+ ASSERT(!m_bytecodeOffset);
stubCall.call();
skipOptimize.link(this);
}
#endif
-#if CPU(X86)
-void JIT::emitTimeoutCheck()
-{
- Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), AbsoluteAddress(&m_globalData->m_timeoutCount));
- JITStubCall stubCall(this, cti_timeout_check);
- stubCall.addArgument(regT1, regT0); // save last result registers.
- stubCall.call(regT0);
- store32(regT0, &m_globalData->m_timeoutCount);
- stubCall.getArgument(0, regT1, regT0); // reload last result registers.
- skipTimeout.link(this);
-}
-#elif USE(JSVALUE32_64)
-void JIT::emitTimeoutCheck()
-{
- Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
- JITStubCall stubCall(this, cti_timeout_check);
- stubCall.addArgument(regT1, regT0); // save last result registers.
- stubCall.call(timeoutCheckRegister);
- stubCall.getArgument(0, regT1, regT0); // reload last result registers.
- skipTimeout.link(this);
-}
-#else
-void JIT::emitTimeoutCheck()
-{
- Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
- JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
- skipTimeout.link(this);
-
- killLastResultRegister();
-}
-#endif
-
#define NEXT_OPCODE(name) \
m_bytecodeOffset += OPCODE_LENGTH(name); \
break;
@@ -232,8 +196,17 @@ void JIT::privateCompileMainPass()
#if ENABLE(JIT_VERBOSE)
dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
#endif
+
+ OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
- switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+ if (m_compilation && opcodeID != op_call_put_result) {
+ add64(
+ TrustedImm32(1),
+ AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
+ m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
+ }
+
+ switch (opcodeID) {
DEFINE_BINARY_OP(op_del_by_val)
DEFINE_BINARY_OP(op_in)
DEFINE_BINARY_OP(op_less)
@@ -283,7 +256,6 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_jeq_null)
DEFINE_OP(op_jfalse)
DEFINE_OP(op_jmp)
- DEFINE_OP(op_jmp_scopes)
DEFINE_OP(op_jneq_null)
DEFINE_OP(op_jneq_ptr)
DEFINE_OP(op_jless)
@@ -295,14 +267,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_jngreater)
DEFINE_OP(op_jngreatereq)
DEFINE_OP(op_jtrue)
- DEFINE_OP(op_loop)
DEFINE_OP(op_loop_hint)
- DEFINE_OP(op_loop_if_less)
- DEFINE_OP(op_loop_if_lesseq)
- DEFINE_OP(op_loop_if_greater)
- DEFINE_OP(op_loop_if_greatereq)
- DEFINE_OP(op_loop_if_true)
- DEFINE_OP(op_loop_if_false)
DEFINE_OP(op_lshift)
DEFINE_OP(op_mod)
DEFINE_OP(op_mov)
@@ -321,10 +286,8 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_not)
DEFINE_OP(op_nstricteq)
DEFINE_OP(op_pop_scope)
- DEFINE_OP(op_post_dec)
- DEFINE_OP(op_post_inc)
- DEFINE_OP(op_pre_dec)
- DEFINE_OP(op_pre_inc)
+ DEFINE_OP(op_dec)
+ DEFINE_OP(op_inc)
DEFINE_OP(op_profile_did_call)
DEFINE_OP(op_profile_will_call)
DEFINE_OP(op_push_name_scope)
@@ -359,7 +322,6 @@ void JIT::privateCompileMainPass()
case op_put_to_base_variable:
DEFINE_OP(op_put_to_base)
- DEFINE_OP(op_ensure_property_exists)
DEFINE_OP(op_resolve_with_base)
DEFINE_OP(op_resolve_with_this)
DEFINE_OP(op_ret)
@@ -377,9 +339,12 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_tear_off_arguments)
DEFINE_OP(op_throw)
DEFINE_OP(op_throw_static_error)
- DEFINE_OP(op_to_jsnumber)
+ DEFINE_OP(op_to_number)
DEFINE_OP(op_to_primitive)
+ DEFINE_OP(op_get_scoped_var)
+ DEFINE_OP(op_put_scoped_var)
+
case op_get_by_id_chain:
case op_get_by_id_generic:
case op_get_by_id_proto:
@@ -394,11 +359,11 @@ void JIT::privateCompileMainPass()
case op_put_by_id_generic:
case op_put_by_id_replace:
case op_put_by_id_transition:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
}
- ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
+ RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
@@ -423,7 +388,7 @@ void JIT::privateCompileSlowCases()
m_globalResolveInfoIndex = 0;
m_callLinkInfoIndex = 0;
-#if !ASSERT_DISABLED && ENABLE(VALUE_PROFILER)
+#if ENABLE(VALUE_PROFILER)
// Use this to assert that slow-path code associates new profiling sites with existing
// ValueProfiles rather than creating new ones. This ensures that for a given instruction
// (say, get_by_id) we get combined statistics for both the fast-path executions of that
@@ -439,9 +404,9 @@ void JIT::privateCompileSlowCases()
#endif
m_bytecodeOffset = iter->to;
-#ifndef NDEBUG
+
unsigned firstTo = m_bytecodeOffset;
-#endif
+
Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
#if ENABLE(VALUE_PROFILER)
@@ -489,12 +454,7 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_jngreater)
DEFINE_SLOWCASE_OP(op_jngreatereq)
DEFINE_SLOWCASE_OP(op_jtrue)
- DEFINE_SLOWCASE_OP(op_loop_if_less)
- DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
- DEFINE_SLOWCASE_OP(op_loop_if_greater)
- DEFINE_SLOWCASE_OP(op_loop_if_greatereq)
- DEFINE_SLOWCASE_OP(op_loop_if_true)
- DEFINE_SLOWCASE_OP(op_loop_if_false)
+ DEFINE_SLOWCASE_OP(op_loop_hint)
DEFINE_SLOWCASE_OP(op_lshift)
DEFINE_SLOWCASE_OP(op_mod)
DEFINE_SLOWCASE_OP(op_mul)
@@ -503,10 +463,8 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_new_object)
DEFINE_SLOWCASE_OP(op_not)
DEFINE_SLOWCASE_OP(op_nstricteq)
- DEFINE_SLOWCASE_OP(op_post_dec)
- DEFINE_SLOWCASE_OP(op_post_inc)
- DEFINE_SLOWCASE_OP(op_pre_dec)
- DEFINE_SLOWCASE_OP(op_pre_inc)
+ DEFINE_SLOWCASE_OP(op_dec)
+ DEFINE_SLOWCASE_OP(op_inc)
case op_put_by_id_out_of_line:
case op_put_by_id_transition_direct:
case op_put_by_id_transition_normal:
@@ -519,7 +477,7 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_urshift)
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
- DEFINE_SLOWCASE_OP(op_to_jsnumber)
+ DEFINE_SLOWCASE_OP(op_to_number)
DEFINE_SLOWCASE_OP(op_to_primitive)
case op_resolve_global_property:
@@ -541,11 +499,11 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_put_to_base)
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
- ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
- ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
+ RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
+ RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
#if ENABLE(VALUE_PROFILER)
if (shouldEmitProfiling())
@@ -555,10 +513,10 @@ void JIT::privateCompileSlowCases()
emitJumpSlowToHot(jump(), 0);
}
- ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
- ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
+ RELEASE_ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
+ RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
#if ENABLE(VALUE_PROFILER)
- ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
+ RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
#endif
#ifndef NDEBUG
@@ -617,22 +575,28 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
m_canBeOptimized = false;
m_shouldEmitProfiling = false;
break;
- case DFG::ShouldProfile:
+ case DFG::MayInline:
m_canBeOptimized = false;
+ m_canBeOptimizedOrInlined = true;
m_shouldEmitProfiling = true;
break;
case DFG::CanCompile:
m_canBeOptimized = true;
+ m_canBeOptimizedOrInlined = true;
m_shouldEmitProfiling = true;
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
#endif
- if (Options::showDisassembly())
+ if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
+ if (m_vm->m_perBytecodeProfiler) {
+ m_compilation = m_vm->m_perBytecodeProfiler->newCompilation(m_codeBlock, Profiler::Baseline);
+ m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
+ }
if (m_disassembler)
m_disassembler->setStartOfCode(label());
@@ -682,16 +646,11 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
#endif
addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
- stackCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT1);
+ stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT1);
}
Label functionBody = label();
-#if ENABLE(VALUE_PROFILER)
- if (canBeOptimized())
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->m_executionEntryCount));
-#endif
-
privateCompileMainPass();
privateCompileLinkPass();
privateCompileSlowCases();
@@ -731,7 +690,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
if (m_disassembler)
m_disassembler->setEndOfCode(label());
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock, effort);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort);
if (patchBuffer.didFailToAllocate())
return JITCode();
@@ -806,7 +765,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
}
#if ENABLE(DFG_JIT) || ENABLE(LLINT)
- if (canBeOptimized()
+ if (canBeOptimizedOrInlined()
#if ENABLE(LLINT)
|| true
#endif
@@ -823,12 +782,14 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck)
*functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
- if (m_disassembler)
+ if (Options::showDisassembly())
m_disassembler->dump(patchBuffer);
+ if (m_compilation)
+ m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
- m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
+ m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
static_cast<double>(result.size()) /
static_cast<double>(m_codeBlock->instructions().size()));
@@ -841,13 +802,13 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
return JITCode(result, JITCode::BaselineJIT);
}
-void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, JSGlobalData* globalData, CodeSpecializationKind kind)
+void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind)
{
RepatchBuffer repatchBuffer(callerCodeBlock);
ASSERT(!callLinkInfo->isLinked());
- callLinkInfo->callee.set(*globalData, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
- callLinkInfo->lastSeenCallee.set(*globalData, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee);
repatchBuffer.relink(callLinkInfo->hotPathOther, code);
if (calleeCodeBlock)
@@ -855,12 +816,26 @@ void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* cal
// Patch the slow patch so we do not continue to try to link.
if (kind == CodeForCall) {
- repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualCall());
+ ASSERT(callLinkInfo->callType == CallLinkInfo::Call
+ || callLinkInfo->callType == CallLinkInfo::CallVarargs);
+ if (callLinkInfo->callType == CallLinkInfo::Call) {
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallGenerator).code());
+ return;
+ }
+
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallGenerator).code());
return;
}
ASSERT(kind == CodeForConstruct);
- repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualConstruct());
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructGenerator).code());
+}
+
+void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo)
+{
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallGenerator).code());
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index bbbc3b1c7..df8a19fd8 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -48,8 +48,10 @@
#include "Interpreter.h"
#include "JITDisassembler.h"
#include "JSInterfaceJIT.h"
+#include "LegacyProfiler.h"
#include "Opcode.h"
-#include "Profiler.h"
+#include "ResultType.h"
+#include "UnusedPointer.h"
#include <bytecode/SamplingTool.h>
namespace JSC {
@@ -61,6 +63,7 @@ namespace JSC {
class Interpreter;
class JSScope;
class JSStack;
+ class MarkedAllocator;
class Register;
class StructureChain;
@@ -292,91 +295,90 @@ namespace JSC {
using MacroAssembler::JumpList;
using MacroAssembler::Label;
- static const int patchGetByIdDefaultStructure = -1;
+ static const uintptr_t patchGetByIdDefaultStructure = unusedPointer;
static const int patchGetByIdDefaultOffset = 0;
// Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
// will compress the displacement, and we may not be able to fit a patched offset.
static const int patchPutByIdDefaultOffset = 256;
public:
- static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, JITCompilationEffort effort, CodePtr* functionEntryArityCheck = 0)
+ static JITCode compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, CodePtr* functionEntryArityCheck = 0)
{
- return JIT(globalData, codeBlock).privateCompile(functionEntryArityCheck, effort);
+ return JIT(vm, codeBlock).privateCompile(functionEntryArityCheck, effort);
+ }
+
+ static void compileClosureCall(VM* vm, CallLinkInfo* callLinkInfo, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
+ {
+ JIT jit(vm, callerCodeBlock);
+ jit.m_bytecodeOffset = callLinkInfo->codeOrigin.bytecodeIndex;
+ jit.privateCompileClosureCall(callLinkInfo, calleeCodeBlock, expectedStructure, expectedExecutable, codePtr);
}
- static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
+ static void compileGetByIdProto(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame);
}
- static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
+ static void compileGetByIdSelfList(VM* vm, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset);
}
- static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
+ static void compileGetByIdProtoList(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame);
}
- static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
+ static void compileGetByIdChainList(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame);
}
- static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
+ static void compileGetByIdChain(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame);
}
- static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+ static void compilePutByIdTransition(VM* vm, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
}
- static void compileGetByVal(JSGlobalData* globalData, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode);
}
- static void compilePutByVal(JSGlobalData* globalData, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode);
}
- static PassRefPtr<ExecutableMemoryHandle> compileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
- {
- if (!globalData->canUseJIT())
- return 0;
- JIT jit(globalData, 0);
- return jit.privateCompileCTIMachineTrampolines(globalData, trampolines);
- }
-
- static CodeRef compileCTINativeCall(JSGlobalData* globalData, NativeFunction func)
+ static CodeRef compileCTINativeCall(VM* vm, NativeFunction func)
{
- if (!globalData->canUseJIT()) {
+ if (!vm->canUseJIT()) {
#if ENABLE(LLINT)
return CodeRef::createLLIntCodeRef(llint_native_call_trampoline);
#else
return CodeRef();
#endif
}
- JIT jit(globalData, 0);
- return jit.privateCompileCTINativeCall(globalData, func);
+ JIT jit(vm, 0);
+ return jit.privateCompileCTINativeCall(vm, func);
}
static void resetPatchGetById(RepatchBuffer&, StructureStubInfo*);
@@ -384,26 +386,31 @@ namespace JSC {
static void patchGetByIdSelf(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr);
static void patchPutByIdReplace(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr, bool direct);
- static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
+ static void compilePatchGetArrayLength(VM* vm, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
#if ENABLE(DFG_JIT)
// Force profiling to be enabled during stub generation.
jit.m_canBeOptimized = true;
+ jit.m_canBeOptimizedOrInlined = true;
+ jit.m_shouldEmitProfiling = true;
#endif // ENABLE(DFG_JIT)
return jit.privateCompilePatchGetArrayLength(returnAddress);
}
- static void linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, JSGlobalData*, CodeSpecializationKind);
+ static void linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, VM*, CodeSpecializationKind);
+ static void linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo*);
private:
- JIT(JSGlobalData*, CodeBlock* = 0);
+ JIT(VM*, CodeBlock* = 0);
void privateCompileMainPass();
void privateCompileLinkPass();
void privateCompileSlowCases();
JITCode privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort);
+ void privateCompileClosureCall(CallLinkInfo*, CodeBlock* calleeCodeBlock, Structure*, ExecutableBase*, MacroAssemblerCodePtr);
+
void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*);
void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset);
void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*);
@@ -414,9 +421,8 @@ namespace JSC {
void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
- PassRefPtr<ExecutableMemoryHandle> privateCompileCTIMachineTrampolines(JSGlobalData*, TrampolineStructure*);
- Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false);
- CodeRef privateCompileCTINativeCall(JSGlobalData*, NativeFunction);
+ Label privateCompileCTINativeCall(VM*, bool isConstruct = false);
+ CodeRef privateCompileCTINativeCall(VM*, NativeFunction);
void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
static bool isDirectPutById(StructureStubInfo*);
@@ -440,7 +446,6 @@ namespace JSC {
void emitLoadDouble(int index, FPRegisterID value);
void emitLoadInt32ToDouble(int index, FPRegisterID value);
Jump emitJumpIfNotObject(RegisterID structureReg);
- Jump emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType);
Jump addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, RegisterID scratch);
void addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, JumpList& failureCases, RegisterID scratch);
@@ -452,8 +457,8 @@ namespace JSC {
void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch, RegisterID scratch2, WriteBarrierMode, WriteBarrierUseKind);
void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
- template<typename ClassType, MarkedBlock::DestructorType, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
- template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
+ template<typename StructureType> // StructureType can be RegisterID or ImmPtr.
+ void emitAllocateJSObject(RegisterID allocator, StructureType, RegisterID result, RegisterID scratch);
#if ENABLE(VALUE_PROFILER)
// This assumes that the value to profile is in regT0 and that regT3 is available for
@@ -468,6 +473,7 @@ namespace JSC {
void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*);
void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex);
void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*);
+ void emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile*);
JITArrayMode chooseArrayMode(ArrayProfile*);
@@ -594,7 +600,6 @@ namespace JSC {
Jump emitJumpIfJSCell(RegisterID);
Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
void emitJumpSlowCaseIfJSCell(RegisterID);
- Jump emitJumpIfNotJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
Jump emitJumpIfImmediateInteger(RegisterID);
@@ -605,7 +610,6 @@ namespace JSC {
void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
- void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
void emitTagAsBoolImmediate(RegisterID reg);
void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
@@ -674,7 +678,6 @@ namespace JSC {
void emit_op_jeq_null(Instruction*);
void emit_op_jfalse(Instruction*);
void emit_op_jmp(Instruction*);
- void emit_op_jmp_scopes(Instruction*);
void emit_op_jneq_null(Instruction*);
void emit_op_jneq_ptr(Instruction*);
void emit_op_jless(Instruction*);
@@ -686,14 +689,7 @@ namespace JSC {
void emit_op_jngreater(Instruction*);
void emit_op_jngreatereq(Instruction*);
void emit_op_jtrue(Instruction*);
- void emit_op_loop(Instruction*);
void emit_op_loop_hint(Instruction*);
- void emit_op_loop_if_less(Instruction*);
- void emit_op_loop_if_lesseq(Instruction*);
- void emit_op_loop_if_greater(Instruction*);
- void emit_op_loop_if_greatereq(Instruction*);
- void emit_op_loop_if_true(Instruction*);
- void emit_op_loop_if_false(Instruction*);
void emit_op_lshift(Instruction*);
void emit_op_mod(Instruction*);
void emit_op_mov(Instruction*);
@@ -713,10 +709,8 @@ namespace JSC {
void emit_op_not(Instruction*);
void emit_op_nstricteq(Instruction*);
void emit_op_pop_scope(Instruction*);
- void emit_op_post_dec(Instruction*);
- void emit_op_post_inc(Instruction*);
- void emit_op_pre_dec(Instruction*);
- void emit_op_pre_inc(Instruction*);
+ void emit_op_dec(Instruction*);
+ void emit_op_inc(Instruction*);
void emit_op_profile_did_call(Instruction*);
void emit_op_profile_will_call(Instruction*);
void emit_op_push_name_scope(Instruction*);
@@ -731,7 +725,6 @@ namespace JSC {
void emitSlow_link_resolve_operations(ResolveOperations*, Vector<SlowCaseEntry>::iterator&);
void emit_op_resolve(Instruction*);
void emit_op_resolve_base(Instruction*);
- void emit_op_ensure_property_exists(Instruction*);
void emit_op_resolve_with_base(Instruction*);
void emit_op_resolve_with_this(Instruction*);
void emit_op_put_to_base(Instruction*);
@@ -748,10 +741,12 @@ namespace JSC {
void emit_op_tear_off_arguments(Instruction*);
void emit_op_throw(Instruction*);
void emit_op_throw_static_error(Instruction*);
- void emit_op_to_jsnumber(Instruction*);
+ void emit_op_to_number(Instruction*);
void emit_op_to_primitive(Instruction*);
void emit_op_unexpected_load(Instruction*);
void emit_op_urshift(Instruction*);
+ void emit_op_get_scoped_var(Instruction*);
+ void emit_op_put_scoped_var(Instruction*);
void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -782,12 +777,7 @@ namespace JSC {
void emitSlow_op_jngreater(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jngreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_greater(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_greatereq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_false(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -796,17 +786,15 @@ namespace JSC {
void emitSlow_op_new_object(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_init_global_const_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_number(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -821,10 +809,7 @@ namespace JSC {
void emitInitRegister(unsigned dst);
- void emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
- void emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
- void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry);
void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
#if USE(JSVALUE64)
@@ -855,27 +840,20 @@ namespace JSC {
Jump checkStructure(RegisterID reg, Structure* structure);
- void restoreArgumentReference();
void restoreArgumentReferenceForTrampoline();
void updateTopCallFrame();
Call emitNakedCall(CodePtr function = CodePtr());
- void preserveReturnAddressAfterCall(RegisterID);
- void restoreReturnAddressBeforeReturn(RegisterID);
- void restoreReturnAddressBeforeReturn(Address);
-
// Loads the character value of a single character string into dst.
void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
- enum OptimizationCheckKind { LoopOptimizationCheck, EnterOptimizationCheck };
#if ENABLE(DFG_JIT)
- void emitOptimizationCheck(OptimizationCheckKind);
+ void emitEnterOptimizationCheck();
#else
- void emitOptimizationCheck(OptimizationCheckKind) { }
+ void emitEnterOptimizationCheck() { }
#endif
-
- void emitTimeoutCheck();
+
#ifndef NDEBUG
void printBytecodeOperandTypes(unsigned src1, unsigned src2);
#endif
@@ -901,16 +879,18 @@ namespace JSC {
#if ENABLE(DFG_JIT)
bool canBeOptimized() { return m_canBeOptimized; }
+ bool canBeOptimizedOrInlined() { return m_canBeOptimizedOrInlined; }
bool shouldEmitProfiling() { return m_shouldEmitProfiling; }
#else
bool canBeOptimized() { return false; }
+ bool canBeOptimizedOrInlined() { return false; }
// Enables use of value profiler with tiered compilation turned off,
// in which case all code gets profiled.
- bool shouldEmitProfiling() { return true; }
+ bool shouldEmitProfiling() { return false; }
#endif
Interpreter* m_interpreter;
- JSGlobalData* m_globalData;
+ VM* m_vm;
CodeBlock* m_codeBlock;
Vector<CallRecord> m_calls;
@@ -947,92 +927,17 @@ namespace JSC {
#endif
#endif
OwnPtr<JITDisassembler> m_disassembler;
+ RefPtr<Profiler::Compilation> m_compilation;
WeakRandom m_randomGenerator;
- static CodeRef stringGetByValStubGenerator(JSGlobalData*);
+ static CodeRef stringGetByValStubGenerator(VM*);
#if ENABLE(VALUE_PROFILER)
bool m_canBeOptimized;
+ bool m_canBeOptimizedOrInlined;
bool m_shouldEmitProfiling;
#endif
} JIT_CLASS_ALIGNMENT;
- inline void JIT::emit_op_loop(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jmp(currentInstruction);
- }
-
- inline void JIT::emit_op_loop_hint(Instruction*)
- {
- emitOptimizationCheck(LoopOptimizationCheck);
- }
-
- inline void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jtrue(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jtrue(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_false(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jfalse(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jfalse(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jless(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jless(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jlesseq(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jlesseq(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_greater(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jgreater(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_greater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jgreater(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_greatereq(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jgreatereq(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_greatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jgreatereq(currentInstruction, iter);
- }
-
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
index bcb3dd74a..713d05e3b 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -35,6 +35,7 @@
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
+#include "Operations.h"
#include "ResultType.h"
#include "SamplingTool.h"
@@ -628,61 +629,7 @@ void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEnt
}
}
-void JIT::emit_op_post_inc(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(srcDst, regT0);
- move(regT0, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT1));
- emitFastArithIntToImmNoCheck(regT1, regT1);
- emitPutVirtualRegister(srcDst, regT1);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_post_inc);
- stubCall.addArgument(regT0);
- stubCall.addArgument(Imm32(srcDst));
- stubCall.call(result);
-}
-
-void JIT::emit_op_post_dec(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(srcDst, regT0);
- move(regT0, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT1));
- emitFastArithIntToImmNoCheck(regT1, regT1);
- emitPutVirtualRegister(srcDst, regT1);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_post_dec);
- stubCall.addArgument(regT0);
- stubCall.addArgument(Imm32(srcDst));
- stubCall.call(result);
-}
-
-void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+void JIT::emit_op_inc(Instruction* currentInstruction)
{
unsigned srcDst = currentInstruction[1].u.operand;
@@ -693,7 +640,7 @@ void JIT::emit_op_pre_inc(Instruction* currentInstruction)
emitPutVirtualRegister(srcDst);
}
-void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned srcDst = currentInstruction[1].u.operand;
@@ -701,12 +648,12 @@ void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEn
linkSlowCase(iter);
emitGetVirtualRegister(srcDst, regT0);
notImm.link(this);
- JITStubCall stubCall(this, cti_op_pre_inc);
+ JITStubCall stubCall(this, cti_op_inc);
stubCall.addArgument(regT0);
stubCall.call(srcDst);
}
-void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+void JIT::emit_op_dec(Instruction* currentInstruction)
{
unsigned srcDst = currentInstruction[1].u.operand;
@@ -717,7 +664,7 @@ void JIT::emit_op_pre_dec(Instruction* currentInstruction)
emitPutVirtualRegister(srcDst);
}
-void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned srcDst = currentInstruction[1].u.operand;
@@ -725,7 +672,7 @@ void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEn
linkSlowCase(iter);
emitGetVirtualRegister(srcDst, regT0);
notImm.link(this);
- JITStubCall stubCall(this, cti_op_pre_dec);
+ JITStubCall stubCall(this, cti_op_dec);
stubCall.addArgument(regT0);
stubCall.call(srcDst);
}
@@ -794,7 +741,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
#endif // CPU(X86) || CPU(X86_64)
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 960d06091..c1caf61f5 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -36,6 +36,7 @@
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
+#include "Operations.h"
#include "ResultType.h"
#include "SamplingTool.h"
@@ -448,79 +449,7 @@ void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.call(dst);
}
-// PostInc (i++)
-
-void JIT::emit_op_post_inc(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
-
- if (dst == srcDst) // x = x++ is a noop for ints.
- return;
-
- move(regT0, regT2);
- addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT2));
- emitStoreInt32(srcDst, regT2, true);
-
- emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_inc));
-}
-
-void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter); // int32 check
- if (dst != srcDst)
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_post_inc);
- stubCall.addArgument(srcDst);
- stubCall.addArgument(TrustedImm32(srcDst));
- stubCall.call(dst);
-}
-
-// PostDec (i--)
-
-void JIT::emit_op_post_dec(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
-
- if (dst == srcDst) // x = x-- is a noop for ints.
- return;
-
- move(regT0, regT2);
- addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT2));
- emitStoreInt32(srcDst, regT2, true);
-
- emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_dec));
-}
-
-void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter); // int32 check
- if (dst != srcDst)
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_post_dec);
- stubCall.addArgument(srcDst);
- stubCall.addArgument(TrustedImm32(srcDst));
- stubCall.call(dst);
-}
-
-// PreInc (++i)
-
-void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+void JIT::emit_op_inc(Instruction* currentInstruction)
{
unsigned srcDst = currentInstruction[1].u.operand;
@@ -528,24 +457,22 @@ void JIT::emit_op_pre_inc(Instruction* currentInstruction)
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
- emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_pre_inc));
+ emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_inc));
}
-void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned srcDst = currentInstruction[1].u.operand;
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // overflow check
- JITStubCall stubCall(this, cti_op_pre_inc);
+ JITStubCall stubCall(this, cti_op_inc);
stubCall.addArgument(srcDst);
stubCall.call(srcDst);
}
-// PreDec (--i)
-
-void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+void JIT::emit_op_dec(Instruction* currentInstruction)
{
unsigned srcDst = currentInstruction[1].u.operand;
@@ -553,17 +480,17 @@ void JIT::emit_op_pre_dec(Instruction* currentInstruction)
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
- emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_pre_dec));
+ emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_dec));
}
-void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned srcDst = currentInstruction[1].u.operand;
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // overflow check
- JITStubCall stubCall(this, cti_op_pre_dec);
+ JITStubCall stubCall(this, cti_op_dec);
stubCall.addArgument(srcDst);
stubCall.call(srcDst);
}
@@ -907,7 +834,7 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), dst);
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
if (!notInt32Op2.empty())
@@ -1011,7 +938,7 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), dst);
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
}
@@ -1252,7 +1179,7 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
UNUSED_PARAM(iter);
// We would have really useful assertions here if it wasn't for the compiler's
// insistence on attribute noreturn.
- // ASSERT_NOT_REACHED();
+ // RELEASE_ASSERT_NOT_REACHED();
#endif
}
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index 006c5b741..5520a4d34 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,8 +36,12 @@
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
+#include "Operations.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include "ThunkGenerators.h"
+#include <wtf/StringPrintStream.h>
#ifndef NDEBUG
#include <stdio.h>
@@ -52,7 +56,7 @@ void JIT::emit_op_call_put_result(Instruction* instruction)
int dst = instruction[1].u.operand;
emitValueProfilingSite();
emitPutVirtualRegister(dst);
- if (canBeOptimized())
+ if (canBeOptimizedOrInlined())
killLastResultRegister(); // Make lastResultRegister tracking simpler in the DFG.
}
@@ -84,7 +88,7 @@ void JIT::compileLoadVarargs(Instruction* instruction)
addPtr(callFrameRegister, regT1);
// regT1: newCallFrame
- slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT1));
+ slowCase.append(branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT1));
// Initialize ArgumentCount.
store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
@@ -135,7 +139,7 @@ void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter)
linkSlowCase(iter);
emitGetFromCallFrameHeader64(JSStack::Callee, regT0);
- emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
+ emitNakedCall(m_vm->getCTIStub(virtualCallGenerator).code());
sampleCodeBlock(m_codeBlock);
}
@@ -164,7 +168,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
int argCount = instruction[2].u.operand;
int registerOffset = instruction[3].u.operand;
- if (opcodeID == op_call && canBeOptimized()) {
+ if (opcodeID == op_call && shouldEmitProfiling()) {
emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0);
Jump done = emitJumpIfNotJSCell(regT0);
loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
@@ -216,11 +220,57 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction*, Vector<SlowCase
linkSlowCase(iter);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructGenerator).code() : m_vm->getCTIStub(linkCallGenerator).code());
sampleCodeBlock(m_codeBlock);
}
+void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
+{
+ JumpList slowCases;
+
+ slowCases.append(branchTestPtr(NonZero, regT0, tagMaskRegister));
+ slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
+ slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
+
+ loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
+ emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
+
+ Call call = nearCall();
+ Jump done = jump();
+
+ slowCases.link(this);
+ move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
+ restoreReturnAddressBeforeReturn(regT2);
+ Jump slow = jump();
+
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+
+ patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
+ patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallGenerator).code()));
+
+ RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline closure call stub for %s, return point %p, target %p (%s)",
+ toCString(*m_codeBlock).data(),
+ callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
+ codePtr.executableAddress(),
+ toCString(pointerDump(calleeCodeBlock)).data())),
+ *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
+ callLinkInfo->codeOrigin));
+
+ RepatchBuffer repatchBuffer(m_codeBlock);
+
+ repatchBuffer.replaceWithJump(
+ RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
+ CodeLocationLabel(stubRoutine->code().code()));
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallGenerator).code());
+
+ callLinkInfo->stub = stubRoutine.release();
+}
+
} // namespace JSC
#endif // USE(JSVALUE64)
diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp
index ecd5cf126..c8be31206 100644
--- a/Source/JavaScriptCore/jit/JITCall32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,8 +36,11 @@
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
+#include "Operations.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include <wtf/StringPrintStream.h>
#ifndef NDEBUG
#include <stdio.h>
@@ -159,7 +162,7 @@ void JIT::compileLoadVarargs(Instruction* instruction)
addPtr(callFrameRegister, regT3);
// regT3: newCallFrame
- slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT3));
+ slowCase.append(branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT3));
// Initialize ArgumentCount.
store32(regT2, payloadFor(JSStack::ArgumentCount, regT3));
@@ -212,7 +215,7 @@ void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter)
linkSlowCase(iter);
emitLoad(JSStack::Callee, regT1, regT0);
- emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
+ emitNakedCall(m_vm->getCTIStub(virtualCallGenerator).code());
sampleCodeBlock(m_codeBlock);
}
@@ -241,7 +244,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
int argCount = instruction[2].u.operand;
int registerOffset = instruction[3].u.operand;
- if (opcodeID == op_call && canBeOptimized()) {
+ if (opcodeID == op_call && shouldEmitProfiling()) {
emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1);
Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
loadPtr(Address(regT1, JSCell::structureOffset()), regT1);
@@ -297,11 +300,57 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction*, Vector<SlowCase
linkSlowCase(iter);
linkSlowCase(iter);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructGenerator).code() : m_vm->getCTIStub(linkCallGenerator).code());
sampleCodeBlock(m_codeBlock);
}
+void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
+{
+ JumpList slowCases;
+
+ slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
+ slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
+
+ loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
+
+ Call call = nearCall();
+ Jump done = jump();
+
+ slowCases.link(this);
+ move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
+ restoreReturnAddressBeforeReturn(regT2);
+ Jump slow = jump();
+
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+
+ patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
+ patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallGenerator).code()));
+
+ RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline closure call stub for %s, return point %p, target %p (%s)",
+ toCString(*m_codeBlock).data(),
+ callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
+ codePtr.executableAddress(),
+ toCString(pointerDump(calleeCodeBlock)).data())),
+ *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
+ callLinkInfo->codeOrigin));
+
+ RepatchBuffer repatchBuffer(m_codeBlock);
+
+ repatchBuffer.replaceWithJump(
+ RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
+ CodeLocationLabel(stubRoutine->code().code()));
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallGenerator).code());
+
+ callLinkInfo->stub = stubRoutine.release();
+}
+
} // namespace JSC
#endif // USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/jit/JITCode.h b/Source/JavaScriptCore/jit/JITCode.h
index 0929397ee..b7521fb21 100644
--- a/Source/JavaScriptCore/jit/JITCode.h
+++ b/Source/JavaScriptCore/jit/JITCode.h
@@ -28,16 +28,17 @@
#if ENABLE(JIT) || ENABLE(LLINT)
#include "CallFrame.h"
-#include "JSValue.h"
#include "Disassembler.h"
+#include "JITStubs.h"
+#include "JSCJSValue.h"
+#include "LegacyProfiler.h"
#include "MacroAssemblerCodeRef.h"
-#include "Profiler.h"
#endif
namespace JSC {
#if ENABLE(JIT)
- class JSGlobalData;
+ class VM;
class JSStack;
#endif
@@ -129,10 +130,10 @@ namespace JSC {
#if ENABLE(JIT)
// Execute the code!
- inline JSValue execute(JSStack* stack, CallFrame* callFrame, JSGlobalData* globalData)
+ inline JSValue execute(JSStack* stack, CallFrame* callFrame, VM* vm)
{
- JSValue result = JSValue::decode(ctiTrampoline(m_ref.code().executableAddress(), stack, callFrame, 0, 0, globalData));
- return globalData->exception ? jsNull() : result;
+ JSValue result = JSValue::decode(ctiTrampoline(m_ref.code().executableAddress(), stack, callFrame, 0, 0, vm));
+ return vm->exception ? jsNull() : result;
}
#endif
diff --git a/Source/JavaScriptCore/jit/JITDisassembler.cpp b/Source/JavaScriptCore/jit/JITDisassembler.cpp
index eaef844bf..39953fa34 100644
--- a/Source/JavaScriptCore/jit/JITDisassembler.cpp
+++ b/Source/JavaScriptCore/jit/JITDisassembler.cpp
@@ -31,6 +31,7 @@
#include "CodeBlock.h"
#include "CodeBlockWithJITType.h"
#include "JIT.h"
+#include <wtf/StringPrintStream.h>
namespace JSC {
@@ -47,18 +48,10 @@ JITDisassembler::~JITDisassembler()
void JITDisassembler::dump(PrintStream& out, LinkBuffer& linkBuffer)
{
- out.print("Baseline JIT code for ", CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT), ", instruction count = ", m_codeBlock->instructionCount(), "\n");
- out.print(" Code at [", RawPointer(linkBuffer.debugAddress()), ", ", RawPointer(static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize()), "):\n");
+ dumpHeader(out, linkBuffer);
dumpDisassembly(out, linkBuffer, m_startOfCode, m_labelForBytecodeIndexInMainPath[0]);
- MacroAssembler::Label firstSlowLabel;
- for (unsigned i = 0; i < m_labelForBytecodeIndexInSlowPath.size(); ++i) {
- if (m_labelForBytecodeIndexInSlowPath[i].isSet()) {
- firstSlowLabel = m_labelForBytecodeIndexInSlowPath[i];
- break;
- }
- }
- dumpForInstructions(out, linkBuffer, " ", m_labelForBytecodeIndexInMainPath, firstSlowLabel.isSet() ? firstSlowLabel : m_endOfSlowPath);
+ dumpForInstructions(out, linkBuffer, " ", m_labelForBytecodeIndexInMainPath, firstSlowLabel());
out.print(" (End Of Main Path)\n");
dumpForInstructions(out, linkBuffer, " (S) ", m_labelForBytecodeIndexInSlowPath, m_endOfSlowPath);
out.print(" (End Of Slow Path)\n");
@@ -71,27 +64,95 @@ void JITDisassembler::dump(LinkBuffer& linkBuffer)
dump(WTF::dataFile(), linkBuffer);
}
-void JITDisassembler::dumpForInstructions(PrintStream& out, LinkBuffer& linkBuffer, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel)
+void JITDisassembler::reportToProfiler(Profiler::Compilation* compilation, LinkBuffer& linkBuffer)
+{
+ StringPrintStream out;
+
+ dumpHeader(out, linkBuffer);
+ compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), out.toCString()));
+ out.reset();
+ dumpDisassembly(out, linkBuffer, m_startOfCode, m_labelForBytecodeIndexInMainPath[0]);
+ compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), out.toCString()));
+
+ reportInstructions(compilation, linkBuffer, " ", m_labelForBytecodeIndexInMainPath, firstSlowLabel());
+ compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), " (End Of Main Path)\n"));
+ reportInstructions(compilation, linkBuffer, " (S) ", m_labelForBytecodeIndexInSlowPath, m_endOfSlowPath);
+ compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), " (End Of Slow Path)\n"));
+ out.reset();
+ dumpDisassembly(out, linkBuffer, m_endOfSlowPath, m_endOfCode);
+ compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), out.toCString()));
+}
+
+void JITDisassembler::dumpHeader(PrintStream& out, LinkBuffer& linkBuffer)
+{
+ out.print("Generated Baseline JIT code for ", CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT), ", instruction count = ", m_codeBlock->instructionCount(), "\n");
+ out.print(" Source: ", m_codeBlock->sourceCodeOnOneLine(), "\n");
+ out.print(" Code at [", RawPointer(linkBuffer.debugAddress()), ", ", RawPointer(static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize()), "):\n");
+}
+
+MacroAssembler::Label JITDisassembler::firstSlowLabel()
+{
+ MacroAssembler::Label firstSlowLabel;
+ for (unsigned i = 0; i < m_labelForBytecodeIndexInSlowPath.size(); ++i) {
+ if (m_labelForBytecodeIndexInSlowPath[i].isSet()) {
+ firstSlowLabel = m_labelForBytecodeIndexInSlowPath[i];
+ break;
+ }
+ }
+ return firstSlowLabel.isSet() ? firstSlowLabel : m_endOfSlowPath;
+}
+
+Vector<JITDisassembler::DumpedOp> JITDisassembler::dumpVectorForInstructions(LinkBuffer& linkBuffer, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel)
{
- for (unsigned i = 0 ; i < labels.size();) {
+ StringPrintStream out;
+ Vector<DumpedOp> result;
+
+ for (unsigned i = 0; i < labels.size();) {
if (!labels[i].isSet()) {
i++;
continue;
}
+ out.reset();
+ result.append(DumpedOp());
+ result.last().index = i;
out.print(prefix);
- m_codeBlock->dumpBytecode(i);
+ m_codeBlock->dumpBytecode(out, i);
for (unsigned nextIndex = i + 1; ; nextIndex++) {
if (nextIndex >= labels.size()) {
dumpDisassembly(out, linkBuffer, labels[i], endLabel);
- return;
+ result.last().disassembly = out.toCString();
+ return result;
}
if (labels[nextIndex].isSet()) {
dumpDisassembly(out, linkBuffer, labels[i], labels[nextIndex]);
+ result.last().disassembly = out.toCString();
i = nextIndex;
break;
}
}
}
+
+ return result;
+}
+
+void JITDisassembler::dumpForInstructions(PrintStream& out, LinkBuffer& linkBuffer, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel)
+{
+ Vector<DumpedOp> dumpedOps = dumpVectorForInstructions(linkBuffer, prefix, labels, endLabel);
+
+ for (unsigned i = 0; i < dumpedOps.size(); ++i)
+ out.print(dumpedOps[i].disassembly);
+}
+
+void JITDisassembler::reportInstructions(Profiler::Compilation* compilation, LinkBuffer& linkBuffer, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel)
+{
+ Vector<DumpedOp> dumpedOps = dumpVectorForInstructions(linkBuffer, prefix, labels, endLabel);
+
+ for (unsigned i = 0; i < dumpedOps.size(); ++i) {
+ compilation->addDescription(
+ Profiler::CompiledBytecode(
+ Profiler::OriginStack(Profiler::Origin(compilation->bytecodes(), dumpedOps[i].index)),
+ dumpedOps[i].disassembly));
+ }
}
void JITDisassembler::dumpDisassembly(PrintStream& out, LinkBuffer& linkBuffer, MacroAssembler::Label from, MacroAssembler::Label to)
diff --git a/Source/JavaScriptCore/jit/JITDisassembler.h b/Source/JavaScriptCore/jit/JITDisassembler.h
index ca914748c..7edbb9cf7 100644
--- a/Source/JavaScriptCore/jit/JITDisassembler.h
+++ b/Source/JavaScriptCore/jit/JITDisassembler.h
@@ -32,6 +32,7 @@
#include "LinkBuffer.h"
#include "MacroAssembler.h"
+#include "ProfilerDatabase.h"
#include <wtf/Vector.h>
namespace JSC {
@@ -58,9 +59,21 @@ public:
void dump(LinkBuffer&);
void dump(PrintStream&, LinkBuffer&);
+ void reportToProfiler(Profiler::Compilation*, LinkBuffer&);
private:
+ void dumpHeader(PrintStream&, LinkBuffer&);
+ MacroAssembler::Label firstSlowLabel();
+
+ struct DumpedOp {
+ unsigned index;
+ CString disassembly;
+ };
+ Vector<DumpedOp> dumpVectorForInstructions(LinkBuffer&, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel);
+
void dumpForInstructions(PrintStream&, LinkBuffer&, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel);
+ void reportInstructions(Profiler::Compilation*, LinkBuffer&, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel);
+
void dumpDisassembly(PrintStream&, LinkBuffer&, MacroAssembler::Label from, MacroAssembler::Label to);
CodeBlock* m_codeBlock;
diff --git a/Source/JavaScriptCore/jit/JITDriver.h b/Source/JavaScriptCore/jit/JITDriver.h
index 645c65b28..a2221fa0f 100644
--- a/Source/JavaScriptCore/jit/JITDriver.h
+++ b/Source/JavaScriptCore/jit/JITDriver.h
@@ -40,12 +40,12 @@ namespace JSC {
template<typename CodeBlockType>
inline bool jitCompileIfAppropriate(ExecState* exec, OwnPtr<CodeBlockType>& codeBlock, JITCode& jitCode, JITCode::JITType jitType, unsigned bytecodeIndex, JITCompilationEffort effort)
{
- JSGlobalData& globalData = exec->globalData();
+ VM& vm = exec->vm();
if (jitType == codeBlock->getJITType())
return true;
- if (!globalData.canUseJIT())
+ if (!vm.canUseJIT())
return true;
codeBlock->unlinkIncomingCalls();
@@ -64,7 +64,7 @@ inline bool jitCompileIfAppropriate(ExecState* exec, OwnPtr<CodeBlockType>& code
jitCode = oldJITCode;
return false;
}
- jitCode = JIT::compile(&globalData, codeBlock.get(), effort);
+ jitCode = JIT::compile(&vm, codeBlock.get(), effort);
if (!jitCode) {
jitCode = oldJITCode;
return false;
@@ -77,12 +77,12 @@ inline bool jitCompileIfAppropriate(ExecState* exec, OwnPtr<CodeBlockType>& code
inline bool jitCompileFunctionIfAppropriate(ExecState* exec, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, JITCode::JITType jitType, unsigned bytecodeIndex, JITCompilationEffort effort)
{
- JSGlobalData& globalData = exec->globalData();
+ VM& vm = exec->vm();
if (jitType == codeBlock->getJITType())
return true;
- if (!globalData.canUseJIT())
+ if (!vm.canUseJIT())
return true;
codeBlock->unlinkIncomingCalls();
@@ -103,7 +103,7 @@ inline bool jitCompileFunctionIfAppropriate(ExecState* exec, OwnPtr<FunctionCode
jitCodeWithArityCheck = oldJITCodeWithArityCheck;
return false;
}
- jitCode = JIT::compile(&globalData, codeBlock.get(), effort, &jitCodeWithArityCheck);
+ jitCode = JIT::compile(&vm, codeBlock.get(), effort, &jitCodeWithArityCheck);
if (!jitCode) {
jitCode = oldJITCode;
jitCodeWithArityCheck = oldJITCodeWithArityCheck;
diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp
index aeb869474..46f59a3a9 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.cpp
+++ b/Source/JavaScriptCore/jit/JITExceptions.cpp
@@ -29,20 +29,21 @@
#include "CallFrame.h"
#include "CodeBlock.h"
#include "Interpreter.h"
-#include "JSGlobalData.h"
-#include "JSValue.h"
+#include "JSCJSValue.h"
+#include "VM.h"
+#include "Operations.h"
#if ENABLE(JIT) || ENABLE(LLINT)
namespace JSC {
-ExceptionHandler genericThrow(JSGlobalData* globalData, ExecState* callFrame, JSValue exceptionValue, unsigned vPCIndex)
+ExceptionHandler genericThrow(VM* vm, ExecState* callFrame, JSValue exceptionValue, unsigned vPCIndex)
{
- ASSERT(exceptionValue);
+ RELEASE_ASSERT(exceptionValue);
- globalData->exception = JSValue();
- HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex); // This may update callFrame & exceptionValue!
- globalData->exception = exceptionValue;
+ vm->exception = JSValue();
+ HandlerInfo* handler = vm->interpreter->throwException(callFrame, exceptionValue, vPCIndex); // This may update callFrame & exceptionValue!
+ vm->exception = exceptionValue;
void* catchRoutine;
Instruction* catchPCForInterpreter = 0;
@@ -52,18 +53,18 @@ ExceptionHandler genericThrow(JSGlobalData* globalData, ExecState* callFrame, JS
} else
catchRoutine = FunctionPtr(LLInt::getCodePtr(ctiOpThrowNotCaught)).value();
- globalData->callFrameForThrow = callFrame;
- globalData->targetMachinePCForThrow = catchRoutine;
- globalData->targetInterpreterPCForThrow = catchPCForInterpreter;
+ vm->callFrameForThrow = callFrame;
+ vm->targetMachinePCForThrow = catchRoutine;
+ vm->targetInterpreterPCForThrow = catchPCForInterpreter;
- ASSERT(catchRoutine);
+ RELEASE_ASSERT(catchRoutine);
ExceptionHandler exceptionHandler = { catchRoutine, callFrame };
return exceptionHandler;
}
-ExceptionHandler jitThrow(JSGlobalData* globalData, ExecState* callFrame, JSValue exceptionValue, ReturnAddressPtr faultLocation)
+ExceptionHandler jitThrow(VM* vm, ExecState* callFrame, JSValue exceptionValue, ReturnAddressPtr faultLocation)
{
- return genericThrow(globalData, callFrame, exceptionValue, callFrame->codeBlock()->bytecodeOffset(callFrame, faultLocation));
+ return genericThrow(vm, callFrame, exceptionValue, callFrame->codeBlock()->bytecodeOffset(callFrame, faultLocation));
}
}
diff --git a/Source/JavaScriptCore/jit/JITExceptions.h b/Source/JavaScriptCore/jit/JITExceptions.h
index 1555f95dc..b611caf95 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.h
+++ b/Source/JavaScriptCore/jit/JITExceptions.h
@@ -26,7 +26,7 @@
#ifndef JITExceptions_h
#define JITExceptions_h
-#include "JSValue.h"
+#include "JSCJSValue.h"
#include "MacroAssemblerCodeRef.h"
#if ENABLE(JIT) || ENABLE(LLINT)
@@ -34,7 +34,7 @@
namespace JSC {
class ExecState;
-class JSGlobalData;
+class VM;
// This header gives other parts of the system access to the JIT's prototocol
// for the throwing and handling exceptions.
@@ -44,9 +44,9 @@ struct ExceptionHandler {
ExecState* callFrame;
};
-ExceptionHandler genericThrow(JSGlobalData*, ExecState*, JSValue exceptionValue, unsigned vPCIndex);
+ExceptionHandler genericThrow(VM*, ExecState*, JSValue exceptionValue, unsigned vPCIndex);
-ExceptionHandler jitThrow(JSGlobalData*, ExecState*, JSValue exceptionValue, ReturnAddressPtr faultLocation);
+ExceptionHandler jitThrow(VM*, ExecState*, JSValue exceptionValue, ReturnAddressPtr faultLocation);
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h
index e6f95b94c..5e5d834aa 100644
--- a/Source/JavaScriptCore/jit/JITInlines.h
+++ b/Source/JavaScriptCore/jit/JITInlines.h
@@ -42,16 +42,6 @@ ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
return m_codeBlock->getConstant(src);
}
-ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
-{
-#if USE(JSVALUE32_64)
- store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
- store32(from, payloadFor(entry, callFrameRegister));
-#else
- store64(from, addressFor(entry, callFrameRegister));
-#endif
-}
-
ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
{
#if USE(JSVALUE32_64)
@@ -62,20 +52,6 @@ ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::Ca
#endif
}
-ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
-{
-#if USE(JSVALUE32_64)
- storePtr(from, payloadFor(entry, callFrameRegister));
-#else
- store64(from, addressFor(entry, callFrameRegister));
-#endif
-}
-
-ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
-{
- storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
-}
-
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
loadPtr(Address(from, entry * sizeof(Register)), to);
@@ -102,16 +78,16 @@ ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEnt
ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
{
- failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
+ failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
failures.append(branchTest32(Zero, dst));
- loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplFlagsOffset()), regT1);
- loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
+ loadPtr(MacroAssembler::Address(dst, StringImpl::flagsOffset()), regT1);
+ loadPtr(MacroAssembler::Address(dst, StringImpl::dataOffset()), dst);
JumpList is16Bit;
JumpList cont8Bit;
- is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
+ is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(StringImpl::flagIs8Bit())));
load8(MacroAssembler::Address(dst, 0), dst);
cont8Bit.append(jump());
is16Bit.link(this);
@@ -160,18 +136,15 @@ ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace
m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
#endif
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
#ifndef NDEBUG
m_uninterruptedInstructionSequenceBegin = label();
m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
#endif
-#endif
}
ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
{
- UNUSED_PARAM(dst);
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+#ifndef NDEBUG
/* There are several cases when the uninterrupted sequence is larger than
* maximum required offset for pathing the same sequence. Eg.: if in a
* uninterrupted sequence the last macroassembler's instruction is a stub
@@ -179,6 +152,7 @@ ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace,
* calculation of length of uninterrupted sequence. So, the insnSpace and
* constSpace should be upper limit instead of hard limit.
*/
+
#if CPU(SH4)
if ((dst > 15) || (dst < -16)) {
insnSpace += 8;
@@ -187,100 +161,30 @@ ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace,
if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
insnSpace += 8;
+#else
+ UNUSED_PARAM(dst);
#endif
+
ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
+#else
+ UNUSED_PARAM(insnSpace);
+ UNUSED_PARAM(constSpace);
+ UNUSED_PARAM(dst);
#endif
}
-#endif
-
-#if CPU(ARM)
-
-ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
-{
- move(linkRegister, reg);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
-{
- move(reg, linkRegister);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
-{
- loadPtr(address, linkRegister);
-}
-#elif CPU(SH4)
-
-ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
-{
- m_assembler.stspr(reg);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
-{
- m_assembler.ldspr(reg);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
-{
- loadPtrLinkReg(address);
-}
-
-#elif CPU(MIPS)
-
-ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
-{
- move(returnAddressRegister, reg);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
-{
- move(reg, returnAddressRegister);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
-{
- loadPtr(address, returnAddressRegister);
-}
-
-#else // CPU(X86) || CPU(X86_64)
-
-ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
-{
- pop(reg);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
-{
- push(reg);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
-{
- push(address);
-}
-
-#endif
-
-ALWAYS_INLINE void JIT::restoreArgumentReference()
-{
- move(stackPointerRegister, firstArgumentRegister);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-}
+#endif // ASSEMBLER_HAS_CONSTANT_POOL
ALWAYS_INLINE void JIT::updateTopCallFrame()
{
ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
- if (m_bytecodeOffset) {
#if USE(JSVALUE32_64)
- storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount));
+ storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount));
#else
- store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount));
+ store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount));
#endif
- }
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
+ storePtr(callFrameRegister, &m_vm->topCallFrame);
}
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
@@ -351,12 +255,6 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type)
-{
- loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg);
- return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type));
-}
-
#if ENABLE(SAMPLING_FLAGS)
ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
{
@@ -415,22 +313,15 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
}
-template <typename ClassType, MarkedBlock::DestructorType destructorType, typename StructureType> inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr)
+template<typename StructureType>
+inline void JIT::emitAllocateJSObject(RegisterID allocator, StructureType structure, RegisterID result, RegisterID scratch)
{
- size_t size = ClassType::allocationSize(INLINE_STORAGE_CAPACITY);
- MarkedAllocator* allocator = 0;
- if (destructorType == MarkedBlock::Normal)
- allocator = &m_globalData->heap.allocatorForObjectWithNormalDestructor(size);
- else if (destructorType == MarkedBlock::ImmortalStructure)
- allocator = &m_globalData->heap.allocatorForObjectWithImmortalStructureDestructor(size);
- else
- allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(size);
- loadPtr(&allocator->m_freeList.head, result);
+ loadPtr(Address(allocator, MarkedAllocator::offsetOfFreeListHead()), result);
addSlowCase(branchTestPtr(Zero, result));
// remove the object from the free list
- loadPtr(Address(result), storagePtr);
- storePtr(storagePtr, &allocator->m_freeList.head);
+ loadPtr(Address(result), scratch);
+ storePtr(scratch, Address(allocator, MarkedAllocator::offsetOfFreeListHead()));
// initialize the object's structure
storePtr(structure, Address(result, JSCell::structureOffset()));
@@ -439,11 +330,6 @@ template <typename ClassType, MarkedBlock::DestructorType destructorType, typena
storePtr(TrustedImmPtr(0), Address(result, JSObject::butterflyOffset()));
}
-template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch)
-{
- emitAllocateBasicJSObject<JSFinalObject, MarkedBlock::None, T>(structure, result, scratch);
-}
-
#if ENABLE(VALUE_PROFILER)
inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
{
@@ -503,7 +389,7 @@ inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, Reg
RegisterID structure = structureAndIndexingType;
RegisterID indexingType = structureAndIndexingType;
- if (canBeOptimized())
+ if (shouldEmitProfiling())
storePtr(structure, arrayProfile->addressOfLastSeenStructure());
load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
@@ -528,6 +414,15 @@ inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfi
#endif
}
+inline void JIT::emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile* arrayProfile)
+{
+#if ENABLE(VALUE_PROFILER)
+ store8(TrustedImm32(1), arrayProfile->addressOfOutOfBounds());
+#else
+ UNUSED_PARAM(arrayProfile);
+#endif
+}
+
static inline bool arrayProfileSaw(ArrayModes arrayModes, IndexingType capability)
{
#if ENABLE(VALUE_PROFILER)
@@ -605,10 +500,10 @@ inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
{
- ASSERT(tag != payload);
+ RELEASE_ASSERT(tag != payload);
if (base == callFrameRegister) {
- ASSERT(payload != base);
+ RELEASE_ASSERT(payload != base);
emitLoadPayload(index, payload);
emitLoadTag(index, tag);
return;
@@ -732,8 +627,8 @@ inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, Register
m_mappedTag = tag;
m_mappedPayload = payload;
- ASSERT(!canBeOptimized() || m_mappedPayload == regT0);
- ASSERT(!canBeOptimized() || m_mappedTag == regT1);
+ ASSERT(!canBeOptimizedOrInlined() || m_mappedPayload == regT0);
+ ASSERT(!canBeOptimizedOrInlined() || m_mappedTag == regT1);
}
inline void JIT::unmap(RegisterID registerID)
@@ -919,11 +814,6 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
addSlowCase(emitJumpIfJSCell(reg));
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
-{
- return branchTest64(NonZero, reg, tagMaskRegister);
-}
-
ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
{
addSlowCase(emitJumpIfNotJSCell(reg));
@@ -990,14 +880,6 @@ ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID d
emitFastArithIntToImmNoCheck(src, dest);
}
-// operand is int32_t, must have been zero-extended if register is 64-bit.
-ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
-{
- if (src != dest)
- move(src, dest);
- or64(tagTypeNumberRegister, dest);
-}
-
ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
{
or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index 36e7ece1b..2a88f5052 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -43,324 +43,9 @@ namespace JSC {
#if USE(JSVALUE64)
-PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
+JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction)
{
- // (2) The second function provides fast property access for string length
- Label stringLengthBegin = align();
-
- // Check eax is a string
- Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(globalData->stringStructure.get()));
-
- // Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0);
-
- Jump string_failureCases3 = branch32(LessThan, regT0, TrustedImm32(0));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-
- // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
- COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
-
- JumpList callSlowCase;
- JumpList constructSlowCase;
-
- // VirtualCallLink Trampoline
- // regT0 holds callee; callFrame is moved and partially initialized.
- Label virtualCallLinkBegin = align();
- callSlowCase.append(emitJumpIfNotJSCell(regT0));
- callSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
-
- // Finish canonical initialization before JS function call.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- // Also initialize ReturnPC for use by lazy linking and exceptions.
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callLazyLinkCall = call();
- restoreReturnAddressBeforeReturn(regT3);
- jump(regT0);
-
- // VirtualConstructLink Trampoline
- // regT0 holds callee; callFrame is moved and partially initialized.
- Label virtualConstructLinkBegin = align();
- constructSlowCase.append(emitJumpIfNotJSCell(regT0));
- constructSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
-
- // Finish canonical initialization before JS function call.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- // Also initialize ReturnPC for use by lazy linking and exeptions.
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callLazyLinkConstruct = call();
- restoreReturnAddressBeforeReturn(regT3);
- jump(regT0);
-
- // VirtualCall Trampoline
- // regT0 holds callee; regT2 will hold the FunctionExecutable.
- Label virtualCallBegin = align();
- callSlowCase.append(emitJumpIfNotJSCell(regT0));
- callSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
-
- // Finish canonical initialization before JS function call.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump hasCodeBlock1 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
- preserveReturnAddressAfterCall(regT3);
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callCompileCall = call();
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- hasCodeBlock1.link(this);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
- jump(regT0);
-
- // VirtualConstruct Trampoline
- // regT0 holds callee; regT2 will hold the FunctionExecutable.
- Label virtualConstructBegin = align();
- constructSlowCase.append(emitJumpIfNotJSCell(regT0));
- constructSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
-
- // Finish canonical initialization before JS function call.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump hasCodeBlock2 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
- preserveReturnAddressAfterCall(regT3);
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callCompileConstruct = call();
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- hasCodeBlock2.link(this);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
- jump(regT0);
-
- callSlowCase.link(this);
- // Finish canonical initialization before JS function call.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2, regT2);
- emitPutCellToCallFrameHeader(regT2, JSStack::ScopeChain);
-
- // Also initialize ReturnPC and CodeBlock, like a JS function would.
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
- emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callCallNotJSFunction = call();
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
- restoreReturnAddressBeforeReturn(regT3);
- ret();
-
- constructSlowCase.link(this);
- // Finish canonical initialization before JS function call.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2, regT2);
- emitPutCellToCallFrameHeader(regT2, JSStack::ScopeChain);
-
- // Also initialize ReturnPC and CodeBlock, like a JS function would.
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
- emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callConstructNotJSFunction = call();
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
- restoreReturnAddressBeforeReturn(regT3);
- ret();
-
- // NativeCall Trampoline
- Label nativeCallThunk = privateCompileCTINativeCall(globalData);
- Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
-
- Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
- Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
- Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(*m_globalData, this, GLOBAL_THUNK_ID);
-
- patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
- patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
- patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
- patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
- patchBuffer.link(callCallNotJSFunction, FunctionPtr(cti_op_call_NotJSFunction));
- patchBuffer.link(callConstructNotJSFunction, FunctionPtr(cti_op_construct_NotJSConstruct));
-
- CodeRef finalCode = FINALIZE_CODE(patchBuffer, ("JIT CTI machine trampolines"));
- RefPtr<ExecutableMemoryHandle> executableMemory = finalCode.executableMemory();
-
- trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
- trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
- trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
- trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
- trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
- trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
- trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
-
- return executableMemory.release();
-}
-
-JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
-{
- int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
-
- Label nativeCallThunk = align();
-
- emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
-
-#if CPU(X86_64)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- peek(regT1);
- emitPutToCallFrameHeader(regT1, JSStack::ReturnPC);
-
-#if !OS(WINDOWS)
- // Calling convention: f(edi, esi, edx, ecx, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, X86Registers::edi);
-
- subPtr(TrustedImm32(16 - sizeof(int64_t)), stackPointerRegister); // Align stack after call.
-
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
- loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(X86Registers::r9, executableOffsetToFunction));
-
- addPtr(TrustedImm32(16 - sizeof(int64_t)), stackPointerRegister);
-#else
- // Calling convention: f(ecx, edx, r8, r9, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, X86Registers::ecx);
-
- // Leave space for the callee parameter home addresses and align the stack.
- subPtr(TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), stackPointerRegister);
-
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
- loadPtr(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(X86Registers::r9, executableOffsetToFunction));
-
- addPtr(TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), stackPointerRegister);
-#endif
-
-#elif CPU(ARM)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- // Calling convention: f(r0 == regT0, r1 == regT1, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, ARMRegisters::r0);
-
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
- move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- call(Address(regT2, executableOffsetToFunction));
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#elif CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- // Calling convention: f(a0, a1, a2, a3);
- // Host function signature: f(ExecState*);
-
- // Allocate stack space for 16 bytes (8-byte aligned)
- // 16 bytes (unused) for 4 arguments
- subPtr(TrustedImm32(16), stackPointerRegister);
-
- // Setup arg0
- move(callFrameRegister, MIPSRegisters::a0);
-
- // Call
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
- loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(regT2, executableOffsetToFunction));
-
- // Restore stack space
- addPtr(TrustedImm32(16), stackPointerRegister);
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#else
-#error "JIT not supported on this platform."
- UNUSED_PARAM(executableOffsetToFunction);
- breakpoint();
-#endif
-
- // Check for an exception
- load64(&(globalData->exception), regT2);
- Jump exceptionHandler = branchTest64(NonZero, regT2);
-
- // Return.
- ret();
-
- // Handle an exception
- exceptionHandler.link(this);
-
- // Grab the return address.
- preserveReturnAddressAfterCall(regT1);
-
- move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- // Set the return address.
- move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
- restoreReturnAddressBeforeReturn(regT1);
-
- ret();
-
- return nativeCallThunk;
-}
-
-JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFunction)
-{
- return CodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return vm->getCTIStub(nativeCallGenerator);
}
void JIT::emit_op_mov(Instruction* currentInstruction)
@@ -368,7 +53,7 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
int dst = currentInstruction[1].u.operand;
int src = currentInstruction[2].u.operand;
- if (canBeOptimized()) {
+ if (canBeOptimizedOrInlined()) {
// Use simpler approach, since the DFG thinks that the last result register
// is always set to the destination on every operation.
emitGetVirtualRegister(src, regT0);
@@ -396,7 +81,7 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
void JIT::emit_op_end(Instruction* currentInstruction)
{
- ASSERT(returnValueRegister != callFrameRegister);
+ RELEASE_ASSERT(returnValueRegister != callFrameRegister);
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
restoreReturnAddressBeforeReturn(Address(callFrameRegister, JSStack::ReturnPC * static_cast<int>(sizeof(Register))));
ret();
@@ -410,15 +95,25 @@ void JIT::emit_op_jmp(Instruction* currentInstruction)
void JIT::emit_op_new_object(Instruction* currentInstruction)
{
- emitAllocateJSFinalObject(TrustedImmPtr(m_codeBlock->globalObject()->emptyObjectStructure()), regT0, regT1);
-
+ Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
+ size_t allocationSize = JSObject::allocationSize(structure->inlineCapacity());
+ MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize);
+
+ RegisterID resultReg = regT0;
+ RegisterID allocatorReg = regT1;
+ RegisterID scratchReg = regT2;
+
+ move(TrustedImmPtr(allocator), allocatorReg);
+ emitAllocateJSObject(allocatorReg, TrustedImmPtr(structure), resultReg, scratchReg);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+ JITStubCall stubCall(this, cti_op_new_object);
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.objectAllocationProfile->structure()));
+ stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
@@ -655,7 +350,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitGetVirtualRegister(src, regT0);
Jump isImm = emitJumpIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
isImm.link(this);
if (dst != src)
@@ -671,14 +366,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_ensure_property_exists);
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_not(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
@@ -989,18 +676,12 @@ void JIT::emit_op_nstricteq(Instruction* currentInstruction)
compileOpStrictEq(currentInstruction, OpNStrictEq);
}
-void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+void JIT::emit_op_to_number(Instruction* currentInstruction)
{
int srcVReg = currentInstruction[2].u.operand;
emitGetVirtualRegister(srcVReg, regT0);
- Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
-
- emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addSlowCase(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(NumberType)));
-
- wasImmediate.link(this);
+ addSlowCase(emitJumpIfNotImmediateNumber(regT0));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -1018,20 +699,12 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
{
killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
move(regT0, callFrameRegister);
- peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, globalData) / sizeof(void*));
- load64(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
- store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
+ peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, vm) / sizeof(void*));
+ load64(Address(regT3, OBJECT_OFFSETOF(VM, exception)), regT0);
+ store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(VM, exception)));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_jmp_scopes);
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.call();
- addJump(jump(), currentInstruction[2].u.operand);
-}
-
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
{
unsigned tableIndex = currentInstruction[1].u.operand;
@@ -1176,7 +849,7 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
void JIT::emit_op_enter(Instruction*)
{
- emitOptimizationCheck(EnterOptimizationCheck);
+ emitEnterOptimizationCheck();
// Even though CTI doesn't use them, we initialize our constant
// registers to zap stale pointers, to avoid unnecessarily prolonging
@@ -1223,7 +896,7 @@ void JIT::emit_op_convert_this(Instruction* currentInstruction)
loadPtr(Address(regT1, JSCell::structureOffset()), regT0);
emitValueProfilingSite();
}
- addSlowCase(branchPtr(Equal, Address(regT1, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
+ addSlowCase(branchPtr(Equal, Address(regT1, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
}
void JIT::emit_op_get_callee(Instruction* currentInstruction)
@@ -1237,22 +910,28 @@ void JIT::emit_op_get_callee(Instruction* currentInstruction)
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
int callee = currentInstruction[2].u.operand;
- emitGetVirtualRegister(callee, regT0);
- loadPtr(Address(regT0, JSFunction::offsetOfCachedInheritorID()), regT2);
- addSlowCase(branchTestPtr(Zero, regT2));
-
- // now regT2 contains the inheritorID, which is the structure that the newly
- // allocated object will have.
-
- emitAllocateJSFinalObject(regT2, regT0, regT1);
+ RegisterID calleeReg = regT0;
+ RegisterID resultReg = regT0;
+ RegisterID allocatorReg = regT1;
+ RegisterID structureReg = regT2;
+ RegisterID scratchReg = regT3;
+
+ emitGetVirtualRegister(callee, calleeReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
+ addSlowCase(branchTestPtr(Zero, allocatorReg));
+
+ emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkSlowCase(iter); // doesn't have an inheritor ID
+ linkSlowCase(iter); // doesn't have an allocation profile
linkSlowCase(iter); // allocation failed
+
JITStubCall stubCall(this, cti_op_create_this);
+ stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1288,7 +967,7 @@ void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowC
linkSlowCase(iter);
if (shouldEmitProfiling())
- move(TrustedImm64(JSValue::encode(m_globalData->stringStructure.get())), regT0);
+ move(TrustedImm64(JSValue::encode(m_vm->stringStructure.get())), regT0);
isNotUndefined.link(this);
emitValueProfilingSite();
JITStubCall stubCall(this, cti_op_convert_this);
@@ -1446,12 +1125,11 @@ void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCase
compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
}
-void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_to_jsnumber);
+ JITStubCall stubCall(this, cti_op_to_number);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1529,7 +1207,7 @@ void JIT::emit_op_put_to_base(Instruction* currentInstruction)
int id = currentInstruction[2].u.operand;
int value = currentInstruction[3].u.operand;
- PutToBaseOperation* operation = m_codeBlock->putToBaseOperation(currentInstruction[4].u.operand);
+ PutToBaseOperation* operation = currentInstruction[4].u.putToBaseOperation;
switch (operation->m_kind) {
case PutToBaseOperation::GlobalVariablePutChecked:
addSlowCase(branchTest8(NonZero, AbsoluteAddress(operation->m_predicatePointer)));
@@ -1584,6 +1262,45 @@ void JIT::emit_op_put_to_base(Instruction* currentInstruction)
#endif // USE(JSVALUE64)
+void JIT::emit_op_loop_hint(Instruction*)
+{
+ // Emit the JIT optimization check:
+ if (canBeOptimized())
+ addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
+ AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
+
+ // Emit the watchdog timer check:
+ if (m_vm->watchdog.isEnabled())
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress())));
+}
+
+void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
+{
+#if ENABLE(DFG_JIT)
+ // Emit the slow path for the JIT optimization check:
+ if (canBeOptimized()) {
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_optimize);
+ stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
+ stubCall.call();
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
+ }
+#endif
+
+ // Emit the slow path of the watchdog timer check:
+ if (m_vm->watchdog.isEnabled()) {
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_handle_watchdog_timer);
+ stubCall.call();
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
+ }
+
+}
+
void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const int* baseVR, const int* valueVR)
{
@@ -1621,7 +1338,7 @@ void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const in
emitStoreCell(*baseVR, value);
return;
case ResolveOperation::SetBaseToGlobal:
- ASSERT(baseVR);
+ RELEASE_ASSERT(baseVR);
setBase = true;
move(TrustedImmPtr(globalObject), scratch);
emitStoreCell(*baseVR, scratch);
@@ -1629,7 +1346,7 @@ void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const in
++pc;
break;
case ResolveOperation::SetBaseToUndefined: {
- ASSERT(baseVR);
+ RELEASE_ASSERT(baseVR);
setBase = true;
#if USE(JSVALUE64)
move(TrustedImm64(JSValue::encode(jsUndefined())), scratch);
@@ -1642,7 +1359,7 @@ void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const in
break;
}
case ResolveOperation::SetBaseToScope:
- ASSERT(baseVR);
+ RELEASE_ASSERT(baseVR);
setBase = true;
emitStoreCell(*baseVR, scope);
resolvingBase = false;
@@ -1650,7 +1367,7 @@ void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const in
break;
case ResolveOperation::ReturnScopeAsBase:
emitStoreCell(*baseVR, scope);
- ASSERT(value == regT0);
+ RELEASE_ASSERT(value == regT0);
move(scope, value);
#if USE(JSVALUE32_64)
move(TrustedImm32(JSValue::CellTag), valueTag);
@@ -1700,7 +1417,7 @@ void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const in
if (baseVR && !setBase)
emitStoreCell(*baseVR, scope);
- ASSERT(valueVR);
+ RELEASE_ASSERT(valueVR);
ResolveOperation* resolveValueOperation = pc;
switch (resolveValueOperation->m_operation) {
case ResolveOperation::GetAndReturnGlobalProperty: {
@@ -1821,42 +1538,42 @@ void JIT::emitSlow_link_resolve_operations(ResolveOperations* resolveOperations,
void JIT::emit_op_resolve(Instruction* currentInstruction)
{
- ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[3].u.operand);
+ ResolveOperations* operations = currentInstruction[3].u.resolveOperations;
int dst = currentInstruction[1].u.operand;
emit_resolve_operations(operations, 0, &dst);
}
void JIT::emitSlow_op_resolve(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[3].u.operand);
+ ResolveOperations* operations = currentInstruction[3].u.resolveOperations;
emitSlow_link_resolve_operations(operations, iter);
JITStubCall stubCall(this, cti_op_resolve);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[3].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.resolveOperations));
stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
}
void JIT::emit_op_resolve_base(Instruction* currentInstruction)
{
- ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
int dst = currentInstruction[1].u.operand;
emit_resolve_operations(operations, &dst, 0);
}
void JIT::emitSlow_op_resolve_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
emitSlow_link_resolve_operations(operations, iter);
JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[4].u.operand)));
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->putToBaseOperation(currentInstruction[5].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations));
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[5].u.putToBaseOperation));
stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
}
void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
{
- ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
int base = currentInstruction[1].u.operand;
int value = currentInstruction[2].u.operand;
emit_resolve_operations(operations, &base, &value);
@@ -1864,19 +1581,19 @@ void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
void JIT::emitSlow_op_resolve_with_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
emitSlow_link_resolve_operations(operations, iter);
JITStubCall stubCall(this, cti_op_resolve_with_base);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[4].u.operand)));
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->putToBaseOperation(currentInstruction[5].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations));
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[5].u.putToBaseOperation));
stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
}
void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
{
- ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
int base = currentInstruction[1].u.operand;
int value = currentInstruction[2].u.operand;
emit_resolve_operations(operations, &base, &value);
@@ -1884,12 +1601,12 @@ void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
void JIT::emitSlow_op_resolve_with_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
emitSlow_link_resolve_operations(operations, iter);
JITStubCall stubCall(this, cti_op_resolve_with_this);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[4].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations));
stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
}
@@ -1898,13 +1615,14 @@ void JIT::emitSlow_op_put_to_base(Instruction* currentInstruction, Vector<SlowCa
int base = currentInstruction[1].u.operand;
int id = currentInstruction[2].u.operand;
int value = currentInstruction[3].u.operand;
- int operation = currentInstruction[4].u.operand;
- PutToBaseOperation* putToBaseOperation = m_codeBlock->putToBaseOperation(currentInstruction[4].u.operand);
+ PutToBaseOperation* putToBaseOperation = currentInstruction[4].u.putToBaseOperation;
switch (putToBaseOperation->m_kind) {
case PutToBaseOperation::VariablePut:
return;
+ case PutToBaseOperation::GlobalVariablePutChecked:
+ linkSlowCase(iter);
case PutToBaseOperation::GlobalVariablePut:
if (!putToBaseOperation->m_isDynamic)
return;
@@ -1916,7 +1634,6 @@ void JIT::emitSlow_op_put_to_base(Instruction* currentInstruction, Vector<SlowCa
case PutToBaseOperation::Generic:
return;
- case PutToBaseOperation::GlobalVariablePutChecked:
case PutToBaseOperation::GlobalPropertyPut:
linkSlowCase(iter);
break;
@@ -1928,7 +1645,7 @@ void JIT::emitSlow_op_put_to_base(Instruction* currentInstruction, Vector<SlowCa
stubCall.addArgument(TrustedImm32(base));
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
stubCall.addArgument(TrustedImm32(value));
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->putToBaseOperation(operation)));
+ stubCall.addArgument(TrustedImmPtr(putToBaseOperation));
stubCall.call();
}
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 13daf962a..4836a66b5 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -41,330 +41,12 @@
namespace JSC {
-PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
-{
- // (1) This function provides fast property access for string length
- Label stringLengthBegin = align();
-
- // regT0 holds payload, regT1 holds tag
-
- Jump string_failureCases1 = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(globalData->stringStructure.get()));
-
- // Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2);
-
- Jump string_failureCases3 = branch32(Above, regT2, TrustedImm32(INT_MAX));
- move(regT2, regT0);
- move(TrustedImm32(JSValue::Int32Tag), regT1);
-
- ret();
-
- JumpList callSlowCase;
- JumpList constructSlowCase;
-
- // VirtualCallLink Trampoline
- // regT1, regT0 holds callee; callFrame is moved and partially initialized.
- Label virtualCallLinkBegin = align();
- callSlowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- callSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
-
- // Finish canonical initialization before JS function call.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- // Also initialize ReturnPC for use by lazy linking and exceptions.
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callLazyLinkCall = call();
- restoreReturnAddressBeforeReturn(regT3);
- jump(regT0);
-
- // VirtualConstructLink Trampoline
- // regT1, regT0 holds callee; callFrame is moved and partially initialized.
- Label virtualConstructLinkBegin = align();
- constructSlowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- constructSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
-
- // Finish canonical initialization before JS function call.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- // Also initialize ReturnPC for use by lazy linking and exeptions.
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callLazyLinkConstruct = call();
- restoreReturnAddressBeforeReturn(regT3);
- jump(regT0);
-
- // VirtualCall Trampoline
- // regT1, regT0 holds callee; regT2 will hold the FunctionExecutable.
- Label virtualCallBegin = align();
- callSlowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- callSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
-
- // Finish canonical initialization before JS function call.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump hasCodeBlock1 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
- preserveReturnAddressAfterCall(regT3);
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callCompileCall = call();
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- hasCodeBlock1.link(this);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
- jump(regT0);
-
- // VirtualConstruct Trampoline
- // regT1, regT0 holds callee; regT2 will hold the FunctionExecutable.
- Label virtualConstructBegin = align();
- constructSlowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- constructSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
-
- // Finish canonical initialization before JS function call.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump hasCodeBlock2 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
- preserveReturnAddressAfterCall(regT3);
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callCompileConstruct = call();
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- hasCodeBlock2.link(this);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
- jump(regT0);
-
- callSlowCase.link(this);
- // Finish canonical initialization before JS function call.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2, regT2);
- emitPutCellToCallFrameHeader(regT2, JSStack::ScopeChain);
-
- // Also initialize ReturnPC and CodeBlock, like a JS function would.
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
- emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callCallNotJSFunction = call();
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
- restoreReturnAddressBeforeReturn(regT3);
- ret();
-
- constructSlowCase.link(this);
- // Finish canonical initialization before JS function call.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2, regT2);
- emitPutCellToCallFrameHeader(regT2, JSStack::ScopeChain);
-
- // Also initialize ReturnPC and CodeBlock, like a JS function would.
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
- emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- restoreArgumentReference();
- Call callConstructNotJSFunction = call();
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
- restoreReturnAddressBeforeReturn(regT3);
- ret();
-
- // NativeCall Trampoline
- Label nativeCallThunk = privateCompileCTINativeCall(globalData);
- Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
-
- Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
- Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
- Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(*m_globalData, this, GLOBAL_THUNK_ID);
-
- patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
- patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
- patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
- patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
- patchBuffer.link(callCallNotJSFunction, FunctionPtr(cti_op_call_NotJSFunction));
- patchBuffer.link(callConstructNotJSFunction, FunctionPtr(cti_op_construct_NotJSConstruct));
-
- CodeRef finalCode = FINALIZE_CODE(patchBuffer, ("JIT CTI machine trampolines"));
- RefPtr<ExecutableMemoryHandle> executableMemory = finalCode.executableMemory();
-
- trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
- trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
- trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
- trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
- trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
- trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
- trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
-
- return executableMemory.release();
-}
-
-JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
-{
- int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
-
- Label nativeCallThunk = align();
-
- emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
-
-#if CPU(X86)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- peek(regT1);
- emitPutToCallFrameHeader(regT1, JSStack::ReturnPC);
-
- // Calling convention: f(ecx, edx, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, X86Registers::ecx);
-
- subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
-
- // call the function
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT1);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(regT1, executableOffsetToFunction));
-
- addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
-
-#elif CPU(ARM)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- // Calling convention: f(r0 == regT0, r1 == regT1, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, ARMRegisters::r0);
-
- // call the function
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
- move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- call(Address(regT2, executableOffsetToFunction));
-
- restoreReturnAddressBeforeReturn(regT3);
-#elif CPU(SH4)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- // Calling convention: f(r0 == regT4, r1 == regT5, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, regT4);
-
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT5);
- move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- call(Address(regT2, executableOffsetToFunction), regT0);
- restoreReturnAddressBeforeReturn(regT3);
-#elif CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- // Calling convention: f(a0, a1, a2, a3);
- // Host function signature: f(ExecState*);
-
- // Allocate stack space for 16 bytes (8-byte aligned)
- // 16 bytes (unused) for 4 arguments
- subPtr(TrustedImm32(16), stackPointerRegister);
-
- // Setup arg0
- move(callFrameRegister, MIPSRegisters::a0);
-
- // Call
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
- loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(regT2, executableOffsetToFunction));
-
- // Restore stack space
- addPtr(TrustedImm32(16), stackPointerRegister);
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#else
-#error "JIT not supported on this platform."
- UNUSED_PARAM(executableOffsetToFunction);
- breakpoint();
-#endif // CPU(X86)
-
- // Check for an exception
- Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
-
- // Return.
- ret();
-
- // Handle an exception
- sawException.link(this);
-
- // Grab the return address.
- preserveReturnAddressAfterCall(regT1);
-
- move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
- // Set the return address.
- move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
- restoreReturnAddressBeforeReturn(regT1);
-
- ret();
-
- return nativeCallThunk;
-}
-
-JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFunction func)
+JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
{
Call nativeCall;
emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
+ storePtr(callFrameRegister, &m_vm->topCallFrame);
#if CPU(X86)
// Load caller frame's scope chain into this callframe so that whatever we call can
@@ -472,7 +154,7 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu
#endif // CPU(X86)
// Check for an exception
- Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&vm->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
// Return.
ret();
@@ -483,11 +165,11 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu
// Grab the return address.
preserveReturnAddressAfterCall(regT1);
- move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
+ move(TrustedImmPtr(&vm->exceptionLocation), regT2);
storePtr(regT1, regT2);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- storePtr(callFrameRegister, &m_globalData->topCallFrame);
+ storePtr(callFrameRegister, &m_vm->topCallFrame);
// Set the return address.
move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
restoreReturnAddressBeforeReturn(regT1);
@@ -495,7 +177,7 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu
ret();
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(*m_globalData, this, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID);
patchBuffer.link(nativeCall, FunctionPtr(func));
return FINALIZE_CODE(patchBuffer, ("JIT CTI native call"));
@@ -531,15 +213,25 @@ void JIT::emit_op_jmp(Instruction* currentInstruction)
void JIT::emit_op_new_object(Instruction* currentInstruction)
{
- emitAllocateJSFinalObject(TrustedImmPtr(m_codeBlock->globalObject()->emptyObjectStructure()), regT0, regT1);
-
- emitStoreCell(currentInstruction[1].u.operand, regT0);
+ Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
+ size_t allocationSize = JSObject::allocationSize(structure->inlineCapacity());
+ MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize);
+
+ RegisterID resultReg = regT0;
+ RegisterID allocatorReg = regT1;
+ RegisterID scratchReg = regT2;
+
+ move(TrustedImmPtr(allocator), allocatorReg);
+ emitAllocateJSObject(allocatorReg, TrustedImmPtr(structure), resultReg, scratchReg);
+ emitStoreCell(currentInstruction[1].u.operand, resultReg);
}
void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+ JITStubCall stubCall(this, cti_op_new_object);
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.objectAllocationProfile->structure()));
+ stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
@@ -727,7 +419,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isImm = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
isImm.link(this);
if (dst != src)
@@ -754,14 +446,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_ensure_property_exists);
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_not(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -951,8 +635,8 @@ void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>:
genericCase.append(getSlowCase(iter)); // tags not equal
linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
- genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
// String case.
JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
@@ -999,8 +683,8 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>
genericCase.append(getSlowCase(iter)); // tags not equal
linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
- genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
// String case.
JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
@@ -1036,8 +720,8 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
// Jump to a slow case if both are strings.
Jump notCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- Jump firstNotString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get()));
- addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
+ Jump firstNotString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
+ addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
notCell.link(this);
firstNotString.link(this);
@@ -1100,8 +784,8 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT1);
Jump wasNotMasqueradesAsUndefined = jump();
@@ -1131,8 +815,8 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(1), regT1);
Jump wasNotMasqueradesAsUndefined = jump();
@@ -1287,7 +971,7 @@ void JIT::emit_op_pop_scope(Instruction*)
JITStubCall(this, cti_op_pop_scope).call();
}
-void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+void JIT::emit_op_to_number(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int src = currentInstruction[2].u.operand;
@@ -1295,21 +979,21 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
isInt32.link(this);
if (src != dst)
emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_to_number), dst, regT1, regT0);
}
-void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_to_jsnumber);
+ JITStubCall stubCall(this, cti_op_to_number);
stubCall.addArgument(regT1, regT0);
stubCall.call(dst);
}
@@ -1329,25 +1013,17 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
move(regT0, callFrameRegister);
// Now store the exception returned by cti_op_throw.
- loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, globalData)), regT3);
- load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
- store32(TrustedImm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- store32(TrustedImm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, vm)), regT3);
+ load32(Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ store32(TrustedImm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(TrustedImm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
unsigned exception = currentInstruction[1].u.operand;
emitStore(exception, regT1, regT0);
map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
}
-void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_jmp_scopes);
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.call();
- addJump(jump(), currentInstruction[2].u.operand);
-}
-
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
{
unsigned tableIndex = currentInstruction[1].u.operand;
@@ -1429,7 +1105,7 @@ void JIT::emit_op_debug(Instruction* currentInstruction)
void JIT::emit_op_enter(Instruction*)
{
- emitOptimizationCheck(EnterOptimizationCheck);
+ emitEnterOptimizationCheck();
// Even though JIT code doesn't use them, we initialize our constant
// registers to zap stale pointers, to avoid unnecessarily prolonging
@@ -1479,22 +1155,28 @@ void JIT::emit_op_get_callee(Instruction* currentInstruction)
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
int callee = currentInstruction[2].u.operand;
- emitLoadPayload(callee, regT0);
- loadPtr(Address(regT0, JSFunction::offsetOfCachedInheritorID()), regT2);
- addSlowCase(branchTestPtr(Zero, regT2));
-
- // now regT2 contains the inheritorID, which is the structure that the newly
- // allocated object will have.
-
- emitAllocateJSFinalObject(regT2, regT0, regT1);
- emitStoreCell(currentInstruction[1].u.operand, regT0);
+ RegisterID calleeReg = regT0;
+ RegisterID resultReg = regT0;
+ RegisterID allocatorReg = regT1;
+ RegisterID structureReg = regT2;
+ RegisterID scratchReg = regT3;
+
+ emitLoadPayload(callee, calleeReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
+ addSlowCase(branchTestPtr(Zero, allocatorReg));
+
+ emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg);
+ emitStoreCell(currentInstruction[1].u.operand, resultReg);
}
void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkSlowCase(iter); // doesn't have an inheritor ID
+ linkSlowCase(iter); // doesn't have an allocation profile
linkSlowCase(iter); // allocation failed
+
JITStubCall stubCall(this, cti_op_create_this);
+ stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1510,7 +1192,7 @@ void JIT::emit_op_convert_this(Instruction* currentInstruction)
move(regT3, regT1);
emitValueProfilingSite();
}
- addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
+ addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
}
void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1533,7 +1215,7 @@ void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowC
linkSlowCase(iter);
if (shouldEmitProfiling()) {
move(TrustedImm32(JSValue::CellTag), regT1);
- move(TrustedImmPtr(m_globalData->stringStructure.get()), regT0);
+ move(TrustedImmPtr(m_vm->stringStructure.get()), regT0);
}
isNotUndefined.link(this);
emitValueProfilingSite();
@@ -1627,7 +1309,7 @@ void JIT::emit_op_put_to_base(Instruction* currentInstruction)
int id = currentInstruction[2].u.operand;
int value = currentInstruction[3].u.operand;
- PutToBaseOperation* operation = m_codeBlock->putToBaseOperation(currentInstruction[4].u.operand);
+ PutToBaseOperation* operation = currentInstruction[4].u.putToBaseOperation;
switch (operation->m_kind) {
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index e377c8adb..2d1b2929d 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -53,11 +53,11 @@ using namespace std;
namespace JSC {
#if USE(JSVALUE64)
-JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
+JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
{
JSInterfaceJIT jit;
JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(globalData->stringStructure.get())));
+ failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get())));
// Load string length to regT2, and start the process of loading the data pointer into regT0
jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
@@ -71,9 +71,9 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
JumpList is16Bit;
JumpList cont8Bit;
// Load the string flags
- jit.loadPtr(Address(regT0, ThunkHelpers::stringImplFlagsOffset()), regT2);
- jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
- is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
+ jit.loadPtr(Address(regT0, StringImpl::flagsOffset()), regT2);
+ jit.loadPtr(Address(regT0, StringImpl::dataOffset()), regT0);
+ is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(StringImpl::flagIs8Bit())));
jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
cont8Bit.append(jit.jump());
is16Bit.link(&jit);
@@ -81,7 +81,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
cont8Bit.link(&jit);
failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
- jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.move(TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), regT1);
jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
jit.ret();
@@ -89,7 +89,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
@@ -204,13 +204,14 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
unsigned dst = currentInstruction[1].u.operand;
unsigned base = currentInstruction[2].u.operand;
unsigned property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get()));
- emitNakedCall(CodeLocationLabel(m_globalData->getCTIStub(stringGetByValStubGenerator).code()));
+ Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
+ emitNakedCall(CodeLocationLabel(m_vm->getCTIStub(stringGetByValStubGenerator).code()));
Jump failed = branchTest64(Zero, regT0);
emitPutVirtualRegister(dst, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
@@ -218,9 +219,15 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
notString.link(this);
nonCell.link(this);
+ Jump skipProfiling = jump();
+
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+
+ skipProfiling.link(this);
+
Label slowPath = label();
JITStubCall stubCall(this, cti_op_get_by_val);
@@ -451,7 +458,6 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // out of bounds
JITArrayMode mode = chooseArrayMode(profile);
switch (mode) {
@@ -463,6 +469,11 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
break;
}
+ Jump skipProfiling = jump();
+ linkSlowCase(iter); // out of bounds
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+ skipProfiling.link(this);
+
Label slowPath = label();
JITStubCall stubPutByValCall(this, cti_op_put_by_val);
@@ -524,7 +535,7 @@ void JIT::compileGetByIdHotPath(int baseVReg, Identifier* ident)
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
- if (*ident == m_globalData->propertyNames->length && canBeOptimized()) {
+ if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
emitArrayProfilingSiteForBytecodeIndex(regT1, regT2, m_bytecodeOffset);
}
@@ -694,7 +705,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
// If we succeed in all of our checks, and the code was optimizable, then make sure we
// decrement the rare case counter.
#if ENABLE(VALUE_PROFILER)
- if (m_codeBlock->canCompileWithDFG() >= DFG::ShouldProfile) {
+ if (m_codeBlock->canCompileWithDFG() >= DFG::MayInline) {
sub32(
TrustedImm32(1),
AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
@@ -735,7 +746,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
@@ -749,7 +760,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
patchBuffer,
("Baseline put_by_id transition for %s, return point %p",
toCString(*m_codeBlock).data(), returnAddress.value())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
willNeedStorageRealloc,
newStructure);
@@ -802,7 +813,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
@@ -864,7 +875,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
} else
compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
@@ -888,7 +899,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
("Baseline JIT get_by_id proto stub for %s, return point %p",
toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
needsStubLink);
@@ -928,7 +939,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
}
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -953,11 +964,11 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
("Baseline JIT get_by_id list stub for %s, return point %p",
toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
needsStubLink);
- polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect);
+ polymorphicStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
@@ -1003,7 +1014,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -1027,10 +1038,10 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
("Baseline JIT get_by_id proto list stub for %s, return point %p",
toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
needsStubLink);
- prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect);
+ prototypeStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
@@ -1081,7 +1092,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
}
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -1104,12 +1115,12 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
("Baseline JIT get_by_id chain list stub for %s, return point %p",
toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
needsStubLink);
// Track the stub we have created so that it will be deleted later.
- prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
+ prototypeStructures->list[currentIndex].set(callFrame->vm(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
@@ -1157,7 +1168,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -1179,7 +1190,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
("Baseline JIT get_by_id chain stub for %s, return point %p",
toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
needsStubLink);
stubInfo->stubRoutine = stubRoutine;
@@ -1193,6 +1204,54 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[3].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT0);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+
+ loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
+ loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1);
+
+ emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+
+ loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
+ storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
+}
+
void JIT::emit_op_init_global_const(Instruction* currentInstruction)
{
JSGlobalObject* globalObject = m_codeBlock->globalObject();
@@ -1230,7 +1289,7 @@ void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, V
void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
{
repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(unusedPointer));
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), 0);
repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
}
@@ -1241,7 +1300,7 @@ void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stu
repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
else
repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(unusedPointer));
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), 0);
}
@@ -1261,26 +1320,6 @@ void JIT::emitWriteBarrier(RegisterID owner, RegisterID value, RegisterID scratc
#if ENABLE(WRITE_BARRIER_PROFILING)
emitCount(WriteBarrierCounters::jitCounterFor(useKind));
#endif
-
-#if ENABLE(GGC)
- Jump filterCells;
- if (mode == ShouldFilterImmediates)
- filterCells = emitJumpIfNotJSCell(value);
- move(owner, scratch);
- andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch);
- move(owner, scratch2);
- // consume additional 8 bits as we're using an approximate filter
- rshift32(TrustedImm32(MarkedBlock::atomShift + 8), scratch2);
- andPtr(TrustedImm32(MarkedBlock::atomMask >> 8), scratch2);
- Jump filter = branchTest8(Zero, BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfMarks()));
- move(owner, scratch2);
- rshift32(TrustedImm32(MarkedBlock::cardShift), scratch2);
- andPtr(TrustedImm32(MarkedBlock::cardMask), scratch2);
- store8(TrustedImm32(1), BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfCards()));
- filter.link(this);
- if (mode == ShouldFilterImmediates)
- filterCells.link(this);
-#endif
}
void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode mode, WriteBarrierUseKind useKind)
@@ -1294,17 +1333,6 @@ void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch,
#if ENABLE(WRITE_BARRIER_PROFILING)
emitCount(WriteBarrierCounters::jitCounterFor(useKind));
#endif
-
-#if ENABLE(GGC)
- Jump filterCells;
- if (mode == ShouldFilterImmediates)
- filterCells = emitJumpIfNotJSCell(value);
- uint8_t* cardAddress = Heap::addressOfCardFor(owner);
- move(TrustedImmPtr(cardAddress), scratch);
- store8(TrustedImm32(1), Address(scratch));
- if (mode == ShouldFilterImmediates)
- filterCells.link(this);
-#endif
}
JIT::Jump JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, RegisterID scratch)
@@ -1363,7 +1391,7 @@ bool JIT::isDirectPutById(StructureStubInfo* stubInfo)
return false;
}
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
return false;
}
}
@@ -1389,31 +1417,31 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
break;
case JITInt8Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->int8ArrayDescriptor(), 1, SignedTypedArray);
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int8ArrayDescriptor(), 1, SignedTypedArray);
break;
case JITInt16Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->int16ArrayDescriptor(), 2, SignedTypedArray);
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int16ArrayDescriptor(), 2, SignedTypedArray);
break;
case JITInt32Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->int32ArrayDescriptor(), 4, SignedTypedArray);
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int32ArrayDescriptor(), 4, SignedTypedArray);
break;
case JITUint8Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->uint8ArrayDescriptor(), 1, UnsignedTypedArray);
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint8ArrayDescriptor(), 1, UnsignedTypedArray);
break;
case JITUint8ClampedArray:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray);
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray);
break;
case JITUint16Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->uint16ArrayDescriptor(), 2, UnsignedTypedArray);
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint16ArrayDescriptor(), 2, UnsignedTypedArray);
break;
case JITUint32Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->uint32ArrayDescriptor(), 4, UnsignedTypedArray);
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint32ArrayDescriptor(), 4, UnsignedTypedArray);
break;
case JITFloat32Array:
- slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_globalData->float32ArrayDescriptor(), 4);
+ slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_vm->float32ArrayDescriptor(), 4);
break;
case JITFloat64Array:
- slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_globalData->float64ArrayDescriptor(), 8);
+ slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_vm->float64ArrayDescriptor(), 8);
break;
default:
CRASH();
@@ -1421,7 +1449,7 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
Jump done = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
@@ -1458,31 +1486,31 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
break;
case JITInt8Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->int8ArrayDescriptor(), 1, SignedTypedArray, TruncateRounding);
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int8ArrayDescriptor(), 1, SignedTypedArray, TruncateRounding);
break;
case JITInt16Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->int16ArrayDescriptor(), 2, SignedTypedArray, TruncateRounding);
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int16ArrayDescriptor(), 2, SignedTypedArray, TruncateRounding);
break;
case JITInt32Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->int32ArrayDescriptor(), 4, SignedTypedArray, TruncateRounding);
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int32ArrayDescriptor(), 4, SignedTypedArray, TruncateRounding);
break;
case JITUint8Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->uint8ArrayDescriptor(), 1, UnsignedTypedArray, TruncateRounding);
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint8ArrayDescriptor(), 1, UnsignedTypedArray, TruncateRounding);
break;
case JITUint8ClampedArray:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray, ClampRounding);
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray, ClampRounding);
break;
case JITUint16Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->uint16ArrayDescriptor(), 2, UnsignedTypedArray, TruncateRounding);
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint16ArrayDescriptor(), 2, UnsignedTypedArray, TruncateRounding);
break;
case JITUint32Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->uint32ArrayDescriptor(), 4, UnsignedTypedArray, TruncateRounding);
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint32ArrayDescriptor(), 4, UnsignedTypedArray, TruncateRounding);
break;
case JITFloat32Array:
- slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_globalData->float32ArrayDescriptor(), 4);
+ slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_vm->float32ArrayDescriptor(), 4);
break;
case JITFloat64Array:
- slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_globalData->float64ArrayDescriptor(), 8);
+ slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_vm->float64ArrayDescriptor(), 8);
break;
default:
CRASH();
@@ -1491,7 +1519,7 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
Jump done = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
@@ -1608,16 +1636,17 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
break;
case 8: {
loadDouble(BaseIndex(base, property, TimesEight), fpRegT0);
- Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0);
- static const double NaN = QNaN;
- loadDouble(&NaN, fpRegT0);
- notNaN.link(this);
break;
}
default:
CRASH();
}
+ Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0);
+ static const double NaN = QNaN;
+ loadDouble(&NaN, fpRegT0);
+ notNaN.link(this);
+
#if USE(JSVALUE64)
moveDoubleTo64(fpRegT0, resultPayload);
sub64(tagTypeNumberRegister, resultPayload);
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index 391dd1d8c..1cc98ef66 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -92,11 +92,11 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction)
stubCall.call(dst);
}
-JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
+JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
{
JSInterfaceJIT jit;
JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(globalData->stringStructure.get())));
+ failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get())));
// Load string length to regT1, and start the process of loading the data pointer into regT0
jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
@@ -110,9 +110,9 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
JumpList is16Bit;
JumpList cont8Bit;
// Load the string flags
- jit.loadPtr(Address(regT0, ThunkHelpers::stringImplFlagsOffset()), regT1);
- jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
- is16Bit.append(jit.branchTest32(Zero, regT1, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
+ jit.loadPtr(Address(regT0, StringImpl::flagsOffset()), regT1);
+ jit.loadPtr(Address(regT0, StringImpl::dataOffset()), regT0);
+ is16Bit.append(jit.branchTest32(Zero, regT1, TrustedImm32(StringImpl::flagIs8Bit())));
jit.load8(BaseIndex(regT0, regT2, TimesOne, 0), regT0);
cont8Bit.append(jit.jump());
is16Bit.link(&jit);
@@ -121,7 +121,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
cont8Bit.link(&jit);
failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
- jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.move(TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), regT1);
jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
jit.move(TrustedImm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
jit.ret();
@@ -130,7 +130,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
@@ -242,24 +242,31 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
unsigned dst = currentInstruction[1].u.operand;
unsigned base = currentInstruction[2].u.operand;
unsigned property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get()));
- emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator).code());
+ Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
+ emitNakedCall(m_vm->getCTIStub(stringGetByValStubGenerator).code());
Jump failed = branchTestPtr(Zero, regT0);
emitStore(dst, regT1, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
failed.link(this);
notString.link(this);
nonCell.link(this);
+
+ Jump skipProfiling = jump();
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+
+ skipProfiling.link(this);
+
Label slowPath = label();
JITStubCall stubCall(this, cti_op_get_by_val);
@@ -420,7 +427,6 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // out of bounds
JITArrayMode mode = chooseArrayMode(profile);
switch (mode) {
@@ -432,6 +438,11 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
break;
}
+ Jump skipProfiling = jump();
+ linkSlowCase(iter); // out of bounds
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+ skipProfiling.link(this);
+
Label slowPath = label();
JITStubCall stubPutByValCall(this, cti_op_put_by_val);
@@ -466,7 +477,7 @@ void JIT::compileGetByIdHotPath(Identifier* ident)
// to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
// to jump back to if one of these trampolies finds a match.
- if (*ident == m_globalData->propertyNames->length && canBeOptimized()) {
+ if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
emitArrayProfilingSiteForBytecodeIndex(regT2, regT3, m_bytecodeOffset);
}
@@ -637,7 +648,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
// If we succeed in all of our checks, and the code was optimizable, then make sure we
// decrement the rare case counter.
#if ENABLE(VALUE_PROFILER)
- if (m_codeBlock->canCompileWithDFG() >= DFG::ShouldProfile) {
+ if (m_codeBlock->canCompileWithDFG() >= DFG::MayInline) {
sub32(
TrustedImm32(1),
AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
@@ -693,7 +704,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
@@ -707,7 +718,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
patchBuffer,
("Baseline put_by_id transition stub for %s, return point %p",
toCString(*m_codeBlock).data(), returnAddress.value())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
willNeedStorageRealloc,
newStructure);
@@ -765,7 +776,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
move(TrustedImm32(JSValue::Int32Tag), regT1);
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
@@ -828,7 +839,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
@@ -853,7 +864,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
("Baseline get_by_id proto stub for %s, return point %p",
toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
needsStubLink);
@@ -896,7 +907,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
@@ -919,11 +930,11 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
("Baseline get_by_id self list stub for %s, return point %p",
toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
needsStubLink);
- polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
+ polymorphicStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
@@ -970,7 +981,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
@@ -992,11 +1003,11 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
("Baseline get_by_id proto list stub for %s, return point %p",
toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
needsStubLink);
- prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure, isDirect);
+ prototypeStructures->list[currentIndex].set(callFrame->vm(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
@@ -1049,7 +1060,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
@@ -1070,12 +1081,12 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
("Baseline get_by_id chain list stub for %s, return point %p",
toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
needsStubLink);
// Track the stub we have created so that it will be deleted later.
- prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
+ prototypeStructures->list[currentIndex].set(callFrame->vm(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
@@ -1124,7 +1135,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
@@ -1144,7 +1155,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
("Baseline get_by_id chain stub for %s, return point %p",
toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_globalData,
+ *m_vm,
m_codeBlock->ownerExecutable(),
needsStubLink);
stubInfo->stubRoutine = stubRoutine;
@@ -1233,6 +1244,59 @@ void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowC
stubCall.call(dst);
}
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int skip = currentInstruction[3].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
+
+ loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitValueProfilingSite();
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int index = currentInstruction[1].u.operand;
+ int skip = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
+
+ loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT3);
+ emitStore(index, regT1, regT0, regT3);
+ emitWriteBarrier(regT2, regT1, regT0, regT1, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+}
+
void JIT::emit_op_init_global_const(Instruction* currentInstruction)
{
WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
@@ -1287,7 +1351,7 @@ void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, V
void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
{
repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(unusedPointer));
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), 0);
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), 0);
repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
@@ -1299,7 +1363,7 @@ void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stu
repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
else
repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(unusedPointer));
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), 0);
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), 0);
}
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.cpp b/Source/JavaScriptCore/jit/JITStubRoutine.cpp
index 121836ce9..28543a8b8 100644
--- a/Source/JavaScriptCore/jit/JITStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.cpp
@@ -38,7 +38,7 @@ JITStubRoutine::~JITStubRoutine() { }
void JITStubRoutine::observeZeroRefCount()
{
- ASSERT(!m_refCount);
+ RELEASE_ASSERT(!m_refCount);
delete this;
}
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
index 168769c12..e4a30aee2 100644
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
* Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
*
@@ -56,17 +56,19 @@
#include "JSPropertyNameIterator.h"
#include "JSString.h"
#include "JSWithScope.h"
+#include "LegacyProfiler.h"
#include "NameInstance.h"
+#include "ObjectConstructor.h"
#include "ObjectPrototype.h"
#include "Operations.h"
#include "Parser.h"
-#include "Profiler.h"
#include "RegExpObject.h"
#include "RegExpPrototype.h"
#include "Register.h"
#include "RepatchBuffer.h"
#include "SamplingTool.h"
#include "Strong.h"
+#include "StructureRareDataInlines.h"
#include <wtf/StdLibExtras.h>
#include <stdarg.h>
#include <stdio.h>
@@ -97,6 +99,9 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"pushl %edi" "\n"
"pushl %ebx" "\n"
"subl $0x3c, %esp" "\n"
+ "movw $0x02FF, %bx" "\n"
+ "movw %bx, 0(%esp)" "\n"
+ "fldcw 0(%esp)" "\n"
"movl 0x58(%esp), %edi" "\n"
"call *0x50(%esp)" "\n"
"addl $0x3c, %esp" "\n"
@@ -104,6 +109,7 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"popl %edi" "\n"
"popl %esi" "\n"
"popl %ebp" "\n"
+ "ffree %st(1)" "\n"
"ret" "\n"
".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
HIDE_SYMBOL(ctiTrampolineEnd) "\n"
@@ -226,7 +232,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_
extern "C" {
- __declspec(naked) EncodedJSValue ctiTrampoline(void* code, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*)
+ __declspec(naked) EncodedJSValue ctiTrampoline(void* code, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, VM*)
{
__asm {
push ebp;
@@ -287,9 +293,13 @@ extern "C" {
#define REGISTER_FILE_OFFSET 92
#define GLOBAL_DATA_OFFSET 108
#define STACK_LENGTH 112
+
#elif CPU(SH4)
#define SYMBOL_STRING(name) #name
-/* code (r4), JSStack* (r5), CallFrame* (r6), void* unused1 (r7), void* unused2(sp), JSGlobalData (sp)*/
+/* code (r4), JSStack* (r5), CallFrame* (r6), void* unused1 (r7), void* unused2(sp), VM (sp)*/
+
+#define THUNK_RETURN_ADDRESS_OFFSET 56
+#define SAVED_R8_OFFSET 60
asm volatile (
".text\n"
@@ -299,27 +309,31 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"mov.l r7, @-r15" "\n"
"mov.l r6, @-r15" "\n"
"mov.l r5, @-r15" "\n"
- "mov.l r8, @-r15" "\n"
- "mov #127, r8" "\n"
"mov.l r14, @-r15" "\n"
"sts.l pr, @-r15" "\n"
"mov.l r13, @-r15" "\n"
"mov.l r11, @-r15" "\n"
"mov.l r10, @-r15" "\n"
- "add #-60, r15" "\n"
+ "mov.l r9, @-r15" "\n"
+ "mov.l r8, @-r15" "\n"
+ "add #-" STRINGIZE_VALUE_OF(SAVED_R8_OFFSET) ", r15" "\n"
"mov r6, r14" "\n"
"jsr @r4" "\n"
"nop" "\n"
- "add #60, r15" "\n"
+ "add #" STRINGIZE_VALUE_OF(SAVED_R8_OFFSET) ", r15" "\n"
+ "mov.l @r15+,r8" "\n"
+ "mov.l @r15+,r9" "\n"
"mov.l @r15+,r10" "\n"
"mov.l @r15+,r11" "\n"
"mov.l @r15+,r13" "\n"
"lds.l @r15+,pr" "\n"
"mov.l @r15+,r14" "\n"
- "mov.l @r15+,r8" "\n"
"add #12, r15" "\n"
"rts" "\n"
"nop" "\n"
+".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
+HIDE_SYMBOL(ctiTrampolineEnd) "\n"
+SYMBOL_STRING(ctiTrampolineEnd) ":" "\n"
);
asm volatile (
@@ -331,13 +345,14 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"mov.l @(r0,r12),r11" "\n"
"jsr @r11" "\n"
"nop" "\n"
- "add #60, r15" "\n"
+ "add #" STRINGIZE_VALUE_OF(SAVED_R8_OFFSET) ", r15" "\n"
+ "mov.l @r15+,r8" "\n"
+ "mov.l @r15+,r9" "\n"
"mov.l @r15+,r10" "\n"
"mov.l @r15+,r11" "\n"
"mov.l @r15+,r13" "\n"
"lds.l @r15+,pr" "\n"
"mov.l @r15+,r14" "\n"
- "mov.l @r15+,r8" "\n"
"add #12, r15" "\n"
"rts" "\n"
"nop" "\n"
@@ -349,13 +364,14 @@ asm volatile (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "add #60, r15" "\n"
+ "add #" STRINGIZE_VALUE_OF(SAVED_R8_OFFSET) ", r15" "\n"
+ "mov.l @r15+,r8" "\n"
+ "mov.l @r15+,r9" "\n"
"mov.l @r15+,r10" "\n"
"mov.l @r15+,r11" "\n"
"mov.l @r15+,r13" "\n"
"lds.l @r15+,pr" "\n"
"mov.l @r15+,r14" "\n"
- "mov.l @r15+,r8" "\n"
"add #12, r15" "\n"
"rts" "\n"
"nop" "\n"
@@ -538,12 +554,11 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"sw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n"
#endif
"move $16,$6 # set callFrameRegister" "\n"
- "li $17,512 # set timeoutCheckRegister" "\n"
"move $25,$4 # move executableAddress to t9" "\n"
"sw $5," STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "($29) # store JSStack to current stack" "\n"
- "lw $9," STRINGIZE_VALUE_OF(STACK_LENGTH + 20) "($29) # load globalData from previous stack" "\n"
+ "lw $9," STRINGIZE_VALUE_OF(STACK_LENGTH + 20) "($29) # load vm from previous stack" "\n"
"jalr $25" "\n"
- "sw $9," STRINGIZE_VALUE_OF(GLOBAL_DATA_OFFSET) "($29) # store globalData to current stack" "\n"
+ "sw $9," STRINGIZE_VALUE_OF(GLOBAL_DATA_OFFSET) "($29) # store vm to current stack" "\n"
"lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
"lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
"lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
@@ -750,7 +765,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(RVCT) && CPU(ARM_THUMB2)
-__asm EncodedJSValue ctiTrampoline(void*, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*)
+__asm EncodedJSValue ctiTrampoline(void*, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, VM*)
{
PRESERVE8
sub sp, sp, # FIRST_STACK_ARGUMENT
@@ -818,7 +833,7 @@ __asm void ctiOpThrowNotCaught()
#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
-__asm EncodedJSValue ctiTrampoline(void*, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*)
+__asm EncodedJSValue ctiTrampoline(void*, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, VM*)
{
ARM
stmdb sp!, {r1-r3}
@@ -860,19 +875,16 @@ __asm void ctiOpThrowNotCaught()
#endif
#if ENABLE(OPCODE_SAMPLING)
- #define CTI_SAMPLER stackFrame.globalData->interpreter->sampler()
+ #define CTI_SAMPLER stackFrame.vm->interpreter->sampler()
#else
#define CTI_SAMPLER 0
#endif
-JITThunks::JITThunks(JSGlobalData* globalData)
- : m_hostFunctionStubMap(adoptPtr(new HostFunctionStubMap))
+void performPlatformSpecificJITAssertions(VM* vm)
{
- if (!globalData->canUseJIT())
+ if (!vm->canUseJIT())
return;
- m_executableMemory = JIT::compileCTIMachineTrampolines(globalData, &m_trampolineStructure);
- ASSERT(!!m_executableMemory);
#if CPU(ARM_THUMB2)
// Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
// and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
@@ -907,16 +919,15 @@ JITThunks::JITThunks(JSGlobalData* globalData)
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, stack) == REGISTER_FILE_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, globalData) == GLOBAL_DATA_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, vm) == GLOBAL_DATA_OFFSET);
+#elif CPU(SH4)
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, savedR8) == SAVED_R8_OFFSET);
#endif
}
-JITThunks::~JITThunks()
-{
-}
-
-NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo, bool direct)
+NEVER_INLINE static void tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo, bool direct)
{
// The interpreter checks for recursion here; I do not believe this can occur in CTI.
@@ -960,17 +971,17 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
ASSERT(structure->previousID()->transitionWatchpointSetHasBeenInvalidated());
- stubInfo->initPutByIdTransition(callFrame->globalData(), codeBlock->ownerExecutable(), structure->previousID(), structure, prototypeChain, direct);
- JIT::compilePutByIdTransition(callFrame->scope()->globalData(), codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress, direct);
+ stubInfo->initPutByIdTransition(callFrame->vm(), codeBlock->ownerExecutable(), structure->previousID(), structure, prototypeChain, direct);
+ JIT::compilePutByIdTransition(callFrame->scope()->vm(), codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress, direct);
return;
}
- stubInfo->initPutByIdReplace(callFrame->globalData(), codeBlock->ownerExecutable(), structure);
+ stubInfo->initPutByIdReplace(callFrame->vm(), codeBlock->ownerExecutable(), structure);
JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress, direct);
}
-NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
+NEVER_INLINE static void tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
{
// FIXME: Write a test that proves we need to check for recursion here just
// like the interpreter does, then add a check for recursion.
@@ -981,17 +992,17 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
return;
}
- JSGlobalData* globalData = &callFrame->globalData();
+ VM* vm = &callFrame->vm();
if (isJSArray(baseValue) && propertyName == callFrame->propertyNames().length) {
- JIT::compilePatchGetArrayLength(callFrame->scope()->globalData(), codeBlock, returnAddress);
+ JIT::compilePatchGetArrayLength(callFrame->scope()->vm(), codeBlock, returnAddress);
return;
}
if (isJSString(baseValue) && propertyName == callFrame->propertyNames().length) {
// The tradeoff of compiling an patched inline string length access routine does not seem
// to pay off, so we currently only do this for arrays.
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs->ctiStringLengthTrampoline());
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, vm->getCTIStub(stringLengthTrampolineGenerator).code());
return;
}
@@ -1014,13 +1025,14 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
// Cache hit: Specialize instruction and ref Structures.
if (slot.slotBase() == baseValue) {
- // set this up, so derefStructures can do it's job.
- stubInfo->initGetByIdSelf(callFrame->globalData(), codeBlock->ownerExecutable(), structure);
+ RELEASE_ASSERT(stubInfo->accessType == access_unset);
if ((slot.cachedPropertyType() != PropertySlot::Value)
- || !MacroAssembler::isCompactPtrAlignedAddressOffset(offsetRelativeToPatchedStorage(slot.cachedOffset())))
+ || !MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToPatchedStorage(slot.cachedOffset())))
ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
- else
+ else {
JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
+ stubInfo->initGetByIdSelf(callFrame->vm(), codeBlock->ownerExecutable(), structure);
+ }
return;
}
@@ -1045,15 +1057,15 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
// Since we're accessing a prototype in a loop, it's a good bet that it
// should not be treated as a dictionary.
if (slotBaseObject->structure()->isDictionary()) {
- slotBaseObject->flattenDictionaryObject(callFrame->globalData());
- offset = slotBaseObject->structure()->get(callFrame->globalData(), propertyName);
+ slotBaseObject->flattenDictionaryObject(callFrame->vm());
+ offset = slotBaseObject->structure()->get(callFrame->vm(), propertyName);
}
- stubInfo->initGetByIdProto(callFrame->globalData(), codeBlock->ownerExecutable(), structure, slotBaseObject->structure(), slot.cachedPropertyType() == PropertySlot::Value);
+ stubInfo->initGetByIdProto(callFrame->vm(), codeBlock->ownerExecutable(), structure, slotBaseObject->structure(), slot.cachedPropertyType() == PropertySlot::Value);
ASSERT(!structure->isDictionary());
ASSERT(!slotBaseObject->structure()->isDictionary());
- JIT::compileGetByIdProto(callFrame->scope()->globalData(), callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), propertyName, slot, offset, returnAddress);
+ JIT::compileGetByIdProto(callFrame->scope()->vm(), callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), propertyName, slot, offset, returnAddress);
return;
}
@@ -1066,8 +1078,8 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
}
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
- stubInfo->initGetByIdChain(callFrame->globalData(), codeBlock->ownerExecutable(), structure, prototypeChain, count, slot.cachedPropertyType() == PropertySlot::Value);
- JIT::compileGetByIdChain(callFrame->scope()->globalData(), callFrame, codeBlock, stubInfo, structure, prototypeChain, count, propertyName, slot, offset, returnAddress);
+ stubInfo->initGetByIdChain(callFrame->vm(), codeBlock->ownerExecutable(), structure, prototypeChain, count, slot.cachedPropertyType() == PropertySlot::Value);
+ JIT::compileGetByIdChain(callFrame->scope()->vm(), callFrame, codeBlock, stubInfo, structure, prototypeChain, count, propertyName, slot, offset, returnAddress);
}
#if !defined(NDEBUG)
@@ -1117,10 +1129,10 @@ struct StackHack {
// to get the address of the ctiVMThrowTrampoline function. It's also
// good to keep the code size down by leaving as much of the exception
// handling code out of line as possible.
-static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
+static NEVER_INLINE void returnToThrowTrampoline(VM* vm, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
{
- ASSERT(globalData->exception);
- globalData->exceptionLocation = exceptionLocation;
+ RELEASE_ASSERT(vm->exception);
+ vm->exceptionLocation = exceptionLocation;
returnAddressSlot = ReturnAddressPtr(FunctionPtr(ctiVMThrowTrampoline));
}
@@ -1131,44 +1143,102 @@ static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, Retur
} while (0)
#define VM_THROW_EXCEPTION_AT_END() \
do {\
- returnToThrowTrampoline(stackFrame.globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS);\
+ returnToThrowTrampoline(stackFrame.vm, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS);\
} while (0)
#define CHECK_FOR_EXCEPTION() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception)) \
+ if (UNLIKELY(stackFrame.vm->exception)) \
VM_THROW_EXCEPTION(); \
} while (0)
#define CHECK_FOR_EXCEPTION_AT_END() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception)) \
+ if (UNLIKELY(stackFrame.vm->exception)) \
VM_THROW_EXCEPTION_AT_END(); \
} while (0)
#define CHECK_FOR_EXCEPTION_VOID() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception)) { \
+ if (UNLIKELY(stackFrame.vm->exception)) { \
VM_THROW_EXCEPTION_AT_END(); \
return; \
} \
} while (0)
+class ErrorFunctor {
+public:
+ virtual ~ErrorFunctor() { }
+ virtual JSValue operator()(ExecState*) = 0;
+};
+
+class ErrorWithExecFunctor : public ErrorFunctor {
+public:
+ typedef JSObject* (*Factory)(ExecState* exec);
+
+ ErrorWithExecFunctor(Factory factory)
+ : m_factory(factory)
+ {
+ }
+ JSValue operator()(ExecState* exec)
+ {
+ return m_factory(exec);
+ }
+
+private:
+ Factory m_factory;
+};
+
+class ErrorWithExecAndCalleeFunctor : public ErrorFunctor {
+public:
+ typedef JSObject* (*Factory)(ExecState* exec, JSValue callee);
+
+ ErrorWithExecAndCalleeFunctor(Factory factory, JSValue callee)
+ : m_factory(factory), m_callee(callee)
+ {
+ }
+ JSValue operator()(ExecState* exec)
+ {
+ return m_factory(exec, m_callee);
+ }
+private:
+ Factory m_factory;
+ JSValue m_callee;
+};
+
+class ErrorWithExceptionFunctor : public ErrorFunctor {
+ public:
+ ErrorWithExceptionFunctor(JSValue exception)
+ : m_exception(exception)
+ {
+ }
+ JSValue operator()(ExecState*)
+ {
+ return m_exception;
+ }
+
+private:
+ JSValue m_exception;
+};
+
// Helper function for JIT stubs that may throw an exception in the middle of
// processing a function call. This function rolls back the stack to
// our caller, so exception processing can proceed from a valid state.
-template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFrame, CallFrame* newCallFrame, ReturnAddressPtr& returnAddressSlot)
+template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFrame, CallFrame* newCallFrame, ReturnAddressPtr& returnAddressSlot, ErrorFunctor& createError )
{
CallFrame* callFrame = newCallFrame->callerFrame();
- ASSERT(callFrame->globalData().exception);
jitStackFrame.callFrame = callFrame;
- callFrame->globalData().topCallFrame = callFrame;
- returnToThrowTrampoline(&callFrame->globalData(), ReturnAddressPtr(newCallFrame->returnPC()), returnAddressSlot);
+ callFrame->vm().topCallFrame = callFrame;
+ callFrame->vm().exception = createError(callFrame);
+ ASSERT(callFrame->vm().exception);
+ returnToThrowTrampoline(&callFrame->vm(), ReturnAddressPtr(newCallFrame->returnPC()), returnAddressSlot);
return T();
}
-template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFrame, CallFrame* newCallFrame, ReturnAddressPtr& returnAddressSlot, JSValue exception)
+template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFrame, CallFrame* newCallFrame, ReturnAddressPtr& returnAddressSlot)
{
- newCallFrame->callerFrame()->globalData().exception = exception;
- return throwExceptionFromOpCall<T>(jitStackFrame, newCallFrame, returnAddressSlot);
+ CallFrame* callFrame = newCallFrame->callerFrame();
+ ASSERT(callFrame->vm().exception);
+ ErrorWithExceptionFunctor functor = ErrorWithExceptionFunctor(callFrame->vm().exception);
+ return throwExceptionFromOpCall<T>(jitStackFrame, newCallFrame, returnAddressSlot, functor);
}
#if CPU(ARM_THUMB2) && COMPILER(GCC)
@@ -1362,12 +1432,12 @@ MSVC_END( END)
".globl " SYMBOL_STRING(cti_##op) "\n" \
SYMBOL_STRING(cti_##op) ":" "\n" \
"sts pr, r11" "\n" \
- "mov.l r11, @(0x38, r15)" "\n" \
+ "mov.l r11, @(" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) ", r15)" "\n" \
"mov.l .L2"SYMBOL_STRING(JITStubThunked_##op)",r0" "\n" \
"mov.l @(r0,r12),r11" "\n" \
"jsr @r11" "\n" \
"nop" "\n" \
- "mov.l @(0x38, r15), r11 " "\n" \
+ "mov.l @(" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) ", r15), r11 " "\n" \
"lds r11, pr " "\n" \
"rts" "\n" \
"nop" "\n" \
@@ -1383,6 +1453,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_this)
{
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
+ size_t inlineCapacity = stackFrame.args[0].int32();
JSFunction* constructor = jsCast<JSFunction*>(callFrame->callee());
#if !ASSERT_DISABLED
@@ -1390,7 +1461,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_this)
ASSERT(constructor->methodTable()->getConstructData(constructor, constructData) == ConstructTypeJS);
#endif
- Structure* structure = constructor->cachedInheritorID(callFrame);
+ Structure* structure = constructor->allocationProfile(callFrame, inlineCapacity)->structure();
JSValue result = constructEmptyObject(callFrame, structure);
return JSValue::encode(result);
@@ -1433,7 +1504,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_inc)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_inc)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1445,22 +1516,16 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_inc)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(int, timeout_check)
+DEFINE_STUB_FUNCTION(void, handle_watchdog_timer)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
- JSGlobalData* globalData = stackFrame.globalData;
- TimeoutChecker& timeoutChecker = globalData->timeoutChecker;
-
- if (globalData->terminator.shouldTerminate()) {
- globalData->exception = createTerminatedExecutionException(globalData);
- VM_THROW_EXCEPTION_AT_END();
- } else if (timeoutChecker.didTimeOut(stackFrame.callFrame)) {
- globalData->exception = createInterruptedExecutionException(globalData);
+ CallFrame* callFrame = stackFrame.callFrame;
+ VM* vm = stackFrame.vm;
+ if (UNLIKELY(vm->watchdog.didFire(callFrame))) {
+ vm->exception = createTerminatedExecutionException(vm);
VM_THROW_EXCEPTION_AT_END();
+ return;
}
-
- return timeoutChecker.ticksUntilNextCheck();
}
DEFINE_STUB_FUNCTION(void*, stack_check)
@@ -1468,8 +1533,10 @@ DEFINE_STUB_FUNCTION(void*, stack_check)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- if (UNLIKELY(!stackFrame.stack->grow(&callFrame->registers()[callFrame->codeBlock()->m_numCalleeRegisters])))
- return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
+ if (UNLIKELY(!stackFrame.stack->grow(&callFrame->registers()[callFrame->codeBlock()->m_numCalleeRegisters]))) {
+ ErrorWithExecFunctor functor = ErrorWithExecFunctor(createStackOverflowError);
+ return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, functor);
+ }
return callFrame;
}
@@ -1478,7 +1545,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_object)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return constructEmptyObject(stackFrame.callFrame);
+ return constructEmptyObject(stackFrame.callFrame, stackFrame.args[0].structure());
}
DEFINE_STUB_FUNCTION(void, op_put_by_id_generic)
@@ -1497,7 +1564,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_generic)
PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
JSValue baseValue = stackFrame.args[0].jsValue();
ASSERT(baseValue.isObject());
- asObject(baseValue)->putDirect(stackFrame.callFrame->globalData(), stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
+ asObject(baseValue)->putDirect(stackFrame.callFrame->vm(), stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
CHECK_FOR_EXCEPTION_AT_END();
}
@@ -1530,10 +1597,8 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id)
stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
if (accessType == static_cast<AccessType>(stubInfo->accessType)) {
- if (!stubInfo->seenOnce())
- stubInfo->setSeen();
- else
- JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, false);
+ stubInfo->setSeen();
+ tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, false);
}
CHECK_FOR_EXCEPTION_AT_END();
@@ -1553,13 +1618,11 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id_direct)
JSValue baseValue = stackFrame.args[0].jsValue();
ASSERT(baseValue.isObject());
- asObject(baseValue)->putDirect(callFrame->globalData(), ident, stackFrame.args[2].jsValue(), slot);
+ asObject(baseValue)->putDirect(callFrame->vm(), ident, stackFrame.args[2].jsValue(), slot);
if (accessType == static_cast<AccessType>(stubInfo->accessType)) {
- if (!stubInfo->seenOnce())
- stubInfo->setSeen();
- else
- JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, true);
+ stubInfo->setSeen();
+ tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, true);
}
CHECK_FOR_EXCEPTION_AT_END();
@@ -1588,7 +1651,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_fail)
PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
JSValue baseValue = stackFrame.args[0].jsValue();
ASSERT(baseValue.isObject());
- asObject(baseValue)->putDirect(callFrame->globalData(), ident, stackFrame.args[2].jsValue(), slot);
+ asObject(baseValue)->putDirect(callFrame->vm(), ident, stackFrame.args[2].jsValue(), slot);
CHECK_FOR_EXCEPTION_AT_END();
}
@@ -1607,9 +1670,9 @@ DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc)
ASSERT(baseValue.isObject());
JSObject* base = asObject(baseValue);
- JSGlobalData& globalData = *stackFrame.globalData;
- Butterfly* butterfly = base->growOutOfLineStorage(globalData, oldSize, newSize);
- base->setButterfly(globalData, butterfly, newStructure);
+ VM& vm = *stackFrame.vm;
+ Butterfly* butterfly = base->growOutOfLineStorage(vm, oldSize, newSize);
+ base->setButterfly(vm, butterfly, newStructure);
return base;
}
@@ -1634,7 +1697,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
if (!stubInfo->seenOnce())
stubInfo->setSeen();
else
- JITThunks::tryCacheGetByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, ident, slot, stubInfo);
+ tryCacheGetByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, ident, slot, stubInfo);
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
@@ -1670,9 +1733,12 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
PolymorphicAccessStructureList* polymorphicStructureList;
int listIndex = 1;
+ if (stubInfo->accessType == access_unset)
+ stubInfo->initGetByIdSelf(callFrame->vm(), codeBlock->ownerExecutable(), baseValue.asCell()->structure());
+
if (stubInfo->accessType == access_get_by_id_self) {
ASSERT(!stubInfo->stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->globalData(), codeBlock->ownerExecutable(), 0, stubInfo->u.getByIdSelf.baseObjectStructure.get(), true);
+ polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->vm(), codeBlock->ownerExecutable(), 0, stubInfo->u.getByIdSelf.baseObjectStructure.get(), true);
stubInfo->initGetByIdSelfList(polymorphicStructureList, 1);
} else {
polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
@@ -1680,7 +1746,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
}
if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
stubInfo->u.getByIdSelfList.listSize++;
- JIT::compileGetByIdSelfList(callFrame->scope()->globalData(), codeBlock, stubInfo, polymorphicStructureList, listIndex, baseValue.asCell()->structure(), ident, slot, slot.cachedOffset());
+ JIT::compileGetByIdSelfList(callFrame->scope()->vm(), codeBlock, stubInfo, polymorphicStructureList, listIndex, baseValue.asCell()->structure(), ident, slot, slot.cachedOffset());
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
@@ -1690,19 +1756,19 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
return JSValue::encode(result);
}
-static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(JSGlobalData& globalData, ScriptExecutable* owner, StructureStubInfo* stubInfo, int& listIndex)
+static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(VM& vm, ScriptExecutable* owner, StructureStubInfo* stubInfo, int& listIndex)
{
PolymorphicAccessStructureList* prototypeStructureList = 0;
listIndex = 1;
switch (stubInfo->accessType) {
case access_get_by_id_proto:
- prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure.get(), stubInfo->u.getByIdProto.prototypeStructure.get(), true);
+ prototypeStructureList = new PolymorphicAccessStructureList(vm, owner, stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure.get(), stubInfo->u.getByIdProto.prototypeStructure.get(), true);
stubInfo->stubRoutine.clear();
stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
break;
case access_get_by_id_chain:
- prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get(), true);
+ prototypeStructureList = new PolymorphicAccessStructureList(vm, owner, stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get(), true);
stubInfo->stubRoutine.clear();
stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
break;
@@ -1713,7 +1779,7 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(JSG
stubInfo->u.getByIdProtoList.listSize++;
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
ASSERT(listIndex <= POLYMORPHIC_LIST_CACHE_SIZE);
@@ -1732,7 +1798,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_getter_stub)
CallType callType = getter->methodTable()->getCallData(getter, callData);
JSValue result = call(callFrame, getter, callType, callData, stackFrame.args[1].jsObject(), ArgList());
if (callFrame->hadException())
- returnToThrowTrampoline(&callFrame->globalData(), stackFrame.args[2].returnAddress(), STUB_RETURN_ADDRESS);
+ returnToThrowTrampoline(&callFrame->vm(), stackFrame.args[2].returnAddress(), STUB_RETURN_ADDRESS);
return JSValue::encode(result);
}
@@ -1746,7 +1812,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_custom_stub)
const Identifier& ident = stackFrame.args[2].identifier();
JSValue result = getter(callFrame, slotBase, ident);
if (callFrame->hadException())
- returnToThrowTrampoline(&callFrame->globalData(), stackFrame.args[3].returnAddress(), STUB_RETURN_ADDRESS);
+ returnToThrowTrampoline(&callFrame->vm(), stackFrame.args[3].returnAddress(), STUB_RETURN_ADDRESS);
return JSValue::encode(result);
}
@@ -1797,14 +1863,14 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
// Since we're accessing a prototype in a loop, it's a good bet that it
// should not be treated as a dictionary.
if (slotBaseObject->structure()->isDictionary()) {
- slotBaseObject->flattenDictionaryObject(callFrame->globalData());
- offset = slotBaseObject->structure()->get(callFrame->globalData(), propertyName);
+ slotBaseObject->flattenDictionaryObject(callFrame->vm());
+ offset = slotBaseObject->structure()->get(callFrame->vm(), propertyName);
}
int listIndex;
- PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->globalData(), codeBlock->ownerExecutable(), stubInfo, listIndex);
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->vm(), codeBlock->ownerExecutable(), stubInfo, listIndex);
if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
- JIT::compileGetByIdProtoList(callFrame->scope()->globalData(), callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), propertyName, slot, offset);
+ JIT::compileGetByIdProtoList(callFrame->scope()->vm(), callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), propertyName, slot, offset);
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
@@ -1818,11 +1884,11 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
ASSERT(!baseValue.asCell()->structure()->isDictionary());
int listIndex;
- PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->globalData(), codeBlock->ownerExecutable(), stubInfo, listIndex);
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->vm(), codeBlock->ownerExecutable(), stubInfo, listIndex);
if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
StructureChain* protoChain = structure->prototypeChain(callFrame);
- JIT::compileGetByIdChainList(callFrame->scope()->globalData(), callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, propertyName, slot, offset);
+ JIT::compileGetByIdChainList(callFrame->scope()->vm(), callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, propertyName, slot, offset);
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
@@ -1898,7 +1964,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_check_has_instance)
}
}
- stackFrame.globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal);
+ stackFrame.vm->exception = createInvalidParameterError(callFrame, "instanceof", baseVal);
VM_THROW_EXCEPTION_AT_END();
return JSValue::encode(JSValue());
}
@@ -1927,12 +1993,15 @@ DEFINE_STUB_FUNCTION(void, optimize)
if (!codeBlock->checkIfOptimizationThresholdReached()) {
codeBlock->updateAllPredictions();
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog("Choosing not to optimize ", *codeBlock, " yet.\n");
+#endif
return;
}
if (codeBlock->hasOptimizedReplacement()) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("Considering OSR ", *codeBlock, " -> ", *codeBlock->replacement(), ".\n");
+ dataLog("Considering OSR ", *codeBlock, " -> ", *codeBlock->replacement(), ".\n");
#endif
// If we have an optimized replacement, then it must be the case that we entered
// cti_optimize from a loop. That's because is there's an optimized replacement,
@@ -1949,7 +2018,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
// additional checking anyway, to reduce the amount of recompilation thrashing.
if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("Triggering reoptimization of ", *codeBlock, "(", *codeBlock->replacement(), ") (in loop).\n");
+ dataLog("Triggering reoptimization of ", *codeBlock, "(", *codeBlock->replacement(), ") (in loop).\n");
#endif
codeBlock->reoptimize();
return;
@@ -1957,23 +2026,27 @@ DEFINE_STUB_FUNCTION(void, optimize)
} else {
if (!codeBlock->shouldOptimizeNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("Delaying optimization for ", *codeBlock, " (in loop) because of insufficient profiling.\n");
+ dataLog("Delaying optimization for ", *codeBlock, " (in loop) because of insufficient profiling.\n");
#endif
return;
}
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog("Triggering optimized compilation of ", *codeBlock, "\n");
+#endif
+
JSScope* scope = callFrame->scope();
JSObject* error = codeBlock->compileOptimized(callFrame, scope, bytecodeIndex);
#if ENABLE(JIT_VERBOSE_OSR)
if (error)
- dataLogF("WARNING: optimized compilation failed.\n");
+ dataLog("WARNING: optimized compilation failed.\n");
#else
UNUSED_PARAM(error);
#endif
if (codeBlock->replacement() == codeBlock) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("Optimizing ", *codeBlock, " failed.\n");
+ dataLog("Optimizing ", *codeBlock, " failed.\n");
#endif
ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
@@ -1992,7 +2065,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
RawPointer((STUB_RETURN_ADDRESS).value()), " -> ", RawPointer(address), ".\n");
}
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("Optimizing ", *codeBlock, " succeeded, performing OSR after a delay of ", codeBlock->optimizationDelayCounter(), ".\n");
+ dataLog("Optimizing ", *codeBlock, " succeeded, performing OSR after a delay of ", codeBlock->optimizationDelayCounter(), ".\n");
#endif
codeBlock->optimizeSoon();
@@ -2001,7 +2074,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
}
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("Optimizing ", *codeBlock, " succeeded, OSR failed, after a delay of ", codeBlock->optimizationDelayCounter(), ".\n");
+ dataLog("Optimizing ", *codeBlock, " succeeded, OSR failed, after a delay of ", codeBlock->optimizationDelayCounter(), ".\n");
#endif
// Count the OSR failure as a speculation failure. If this happens a lot, then
@@ -2009,7 +2082,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
optimizedCodeBlock->countOSRExit();
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("Encountered OSR failure ", *codeBlock, " -> ", *codeBlock->replacement(), ".\n");
+ dataLog("Encountered OSR failure ", *codeBlock, " -> ", *codeBlock->replacement(), ".\n");
#endif
// We are a lot more conservative about triggering reoptimization after OSR failure than
@@ -2022,7 +2095,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
// reoptimization trigger.
if (optimizedCodeBlock->shouldReoptimizeNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("Triggering reoptimization of ", *codeBlock, " -> ", *codeBlock->replacement(), " (after OSR fail).\n");
+ dataLog("Triggering reoptimization of ", *codeBlock, " -> ", *codeBlock->replacement(), " (after OSR fail).\n");
#endif
codeBlock->reoptimize();
return;
@@ -2060,7 +2133,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_id)
bool couldDelete = baseObj->methodTable()->deleteProperty(baseObj, callFrame, stackFrame.args[1].identifier());
JSValue result = jsBoolean(couldDelete);
if (!couldDelete && callFrame->codeBlock()->isStrictMode())
- stackFrame.globalData->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
+ stackFrame.vm->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
@@ -2116,7 +2189,7 @@ inline void* jitCompileFor(CallFrame* callFrame, CodeSpecializationKind kind)
JSObject* error = executable->compileFor(callFrame, callDataScopeChain, kind);
if (!error)
return function;
- callFrame->globalData().exception = error;
+ callFrame->vm().exception = error;
return 0;
}
@@ -2161,9 +2234,10 @@ DEFINE_STUB_FUNCTION(void*, op_call_arityCheck)
CallFrame* callFrame = stackFrame.callFrame;
CallFrame* newCallFrame = CommonSlowPaths::arityCheckFor(callFrame, stackFrame.stack, CodeForCall);
- if (!newCallFrame)
- return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
-
+ if (!newCallFrame) {
+ ErrorWithExecFunctor functor = ErrorWithExecFunctor(createStackOverflowError);
+ return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, functor);
+ }
return newCallFrame;
}
@@ -2174,9 +2248,10 @@ DEFINE_STUB_FUNCTION(void*, op_construct_arityCheck)
CallFrame* callFrame = stackFrame.callFrame;
CallFrame* newCallFrame = CommonSlowPaths::arityCheckFor(callFrame, stackFrame.stack, CodeForConstruct);
- if (!newCallFrame)
- return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
-
+ if (!newCallFrame) {
+ ErrorWithExecFunctor functor = ErrorWithExecFunctor(createStackOverflowError);
+ return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, functor);
+ }
return newCallFrame;
}
@@ -2211,7 +2286,7 @@ inline void* lazyLinkFor(CallFrame* callFrame, CodeSpecializationKind kind)
else {
FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
if (JSObject* error = functionExecutable->compileFor(callFrame, callee->scope(), kind)) {
- callFrame->globalData().exception = error;
+ callFrame->vm().exception = error;
return 0;
}
codeBlock = &functionExecutable->generatedBytecodeFor(kind);
@@ -2225,7 +2300,7 @@ inline void* lazyLinkFor(CallFrame* callFrame, CodeSpecializationKind kind)
if (!callLinkInfo->seenOnce())
callLinkInfo->setSeen();
else
- JIT::linkFor(callee, callFrame->callerFrame()->codeBlock(), codeBlock, codePtr, callLinkInfo, &callFrame->globalData(), kind);
+ JIT::linkFor(callee, callFrame->callerFrame()->codeBlock(), codeBlock, codePtr, callLinkInfo, &callFrame->vm(), kind);
return codePtr.executableAddress();
}
@@ -2242,6 +2317,69 @@ DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
return result;
}
+DEFINE_STUB_FUNCTION(void*, vm_lazyLinkClosureCall)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ CodeBlock* callerCodeBlock = callFrame->callerFrame()->codeBlock();
+ VM* vm = callerCodeBlock->vm();
+ CallLinkInfo* callLinkInfo = &callerCodeBlock->getCallLinkInfo(callFrame->returnPC());
+ JSFunction* callee = jsCast<JSFunction*>(callFrame->callee());
+ ExecutableBase* executable = callee->executable();
+ Structure* structure = callee->structure();
+
+ ASSERT(callLinkInfo->callType == CallLinkInfo::Call);
+ ASSERT(callLinkInfo->isLinked());
+ ASSERT(callLinkInfo->callee);
+ ASSERT(callee != callLinkInfo->callee.get());
+
+ bool shouldLink = false;
+ CodeBlock* calleeCodeBlock = 0;
+ MacroAssemblerCodePtr codePtr;
+
+ if (executable == callLinkInfo->callee.get()->executable()
+ && structure == callLinkInfo->callee.get()->structure()) {
+
+ shouldLink = true;
+
+ ASSERT(executable->hasJITCodeForCall());
+ codePtr = executable->generatedJITCodeForCall().addressForCall();
+ if (!callee->executable()->isHostFunction()) {
+ calleeCodeBlock = &jsCast<FunctionExecutable*>(executable)->generatedBytecodeForCall();
+ if (callFrame->argumentCountIncludingThis() < static_cast<size_t>(calleeCodeBlock->numParameters())) {
+ shouldLink = false;
+ codePtr = executable->generatedJITCodeWithArityCheckFor(CodeForCall);
+ }
+ }
+ } else if (callee->isHostFunction())
+ codePtr = executable->generatedJITCodeForCall().addressForCall();
+ else {
+ // Need to clear the code block before compilation, because compilation can GC.
+ callFrame->setCodeBlock(0);
+
+ FunctionExecutable* functionExecutable = jsCast<FunctionExecutable*>(executable);
+ JSScope* scopeChain = callee->scope();
+ JSObject* error = functionExecutable->compileFor(callFrame, scopeChain, CodeForCall);
+ if (error) {
+ callFrame->vm().exception = error;
+ return 0;
+ }
+
+ codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(CodeForCall);
+ }
+
+ if (shouldLink) {
+ ASSERT(codePtr);
+ JIT::compileClosureCall(vm, callLinkInfo, callerCodeBlock, calleeCodeBlock, structure, executable, codePtr);
+ callLinkInfo->hasSeenClosure = true;
+ } else
+ JIT::linkSlowCall(callerCodeBlock, callLinkInfo);
+
+ return codePtr.executableAddress();
+}
+
DEFINE_STUB_FUNCTION(void*, vm_lazyLinkConstruct)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2258,7 +2396,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
{
STUB_INIT_STACK_FRAME(stackFrame);
- JSActivation* activation = JSActivation::create(stackFrame.callFrame->globalData(), stackFrame.callFrame, stackFrame.callFrame->codeBlock());
+ JSActivation* activation = JSActivation::create(stackFrame.callFrame->vm(), stackFrame.callFrame, stackFrame.callFrame->codeBlock());
stackFrame.callFrame->setScope(activation);
return activation;
}
@@ -2277,7 +2415,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
ASSERT(callType != CallTypeJS);
if (callType != CallTypeHost) {
ASSERT(callType == CallTypeNone);
- return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createNotAFunctionError(callFrame->callerFrame(), callee));
+ ErrorWithExecAndCalleeFunctor functor = ErrorWithExecAndCalleeFunctor(createNotAConstructorError, callee);
+ return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS, functor);
}
EncodedJSValue returnValue;
@@ -2286,7 +2425,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
returnValue = callData.native.function(callFrame);
}
- if (stackFrame.globalData->exception)
+ if (stackFrame.vm->exception)
return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
return returnValue;
@@ -2296,7 +2435,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments)
{
STUB_INIT_STACK_FRAME(stackFrame);
- Arguments* arguments = Arguments::create(*stackFrame.globalData, stackFrame.callFrame);
+ Arguments* arguments = Arguments::create(*stackFrame.vm, stackFrame.callFrame);
return JSValue::encode(JSValue(arguments));
}
@@ -2305,7 +2444,7 @@ DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
STUB_INIT_STACK_FRAME(stackFrame);
ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
- jsCast<JSActivation*>(stackFrame.args[0].jsValue())->tearOff(*stackFrame.globalData);
+ jsCast<JSActivation*>(stackFrame.args[0].jsValue())->tearOff(*stackFrame.vm);
}
DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
@@ -2326,7 +2465,7 @@ DEFINE_STUB_FUNCTION(void, op_profile_will_call)
{
STUB_INIT_STACK_FRAME(stackFrame);
- if (Profiler* profiler = stackFrame.globalData->enabledProfiler())
+ if (LegacyProfiler* profiler = stackFrame.vm->enabledProfiler())
profiler->willExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
}
@@ -2334,7 +2473,7 @@ DEFINE_STUB_FUNCTION(void, op_profile_did_call)
{
STUB_INIT_STACK_FRAME(stackFrame);
- if (Profiler* profiler = stackFrame.globalData->enabledProfiler())
+ if (LegacyProfiler* profiler = stackFrame.vm->enabledProfiler())
profiler->didExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
}
@@ -2403,7 +2542,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
ASSERT(constructType != ConstructTypeJS);
if (constructType != ConstructTypeHost) {
ASSERT(constructType == ConstructTypeNone);
- return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createNotAConstructorError(callFrame->callerFrame(), callee));
+ ErrorWithExecAndCalleeFunctor functor = ErrorWithExecAndCalleeFunctor(createNotAConstructorError, callee);
+ return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS, functor);
}
EncodedJSValue returnValue;
@@ -2412,7 +2552,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
returnValue = constructData.native.function(callFrame);
}
- if (stackFrame.globalData->exception)
+ if (stackFrame.vm->exception)
return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
return returnValue;
@@ -2465,7 +2605,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
// Attempt to optimize.
JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
if (arrayMode != byValInfo.arrayMode) {
- JIT::compileGetByVal(&callFrame->globalData(), callFrame->codeBlock(), &byValInfo, STUB_RETURN_ADDRESS, arrayMode);
+ JIT::compileGetByVal(&callFrame->vm(), callFrame->codeBlock(), &byValInfo, STUB_RETURN_ADDRESS, arrayMode);
didOptimize = true;
}
}
@@ -2558,7 +2698,7 @@ static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript,
if (baseValue.isObject()) {
JSObject* object = asObject(baseValue);
if (object->canSetIndexQuickly(i))
- object->setIndexQuickly(callFrame->globalData(), i, value);
+ object->setIndexQuickly(callFrame->vm(), i, value);
else
object->methodTable()->putByIndex(object, callFrame, i, value, callFrame->codeBlock()->isStrictMode());
} else
@@ -2568,7 +2708,7 @@ static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript,
baseValue.put(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot);
} else {
Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
- if (!callFrame->globalData().exception) { // Don't put to an object if toString threw an exception.
+ if (!callFrame->vm().exception) { // Don't put to an object if toString threw an exception.
PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
baseValue.put(callFrame, property, value, slot);
}
@@ -2599,7 +2739,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
// Attempt to optimize.
JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
if (arrayMode != byValInfo.arrayMode) {
- JIT::compilePutByVal(&callFrame->globalData(), callFrame->codeBlock(), &byValInfo, STUB_RETURN_ADDRESS, arrayMode);
+ JIT::compilePutByVal(&callFrame->vm(), callFrame->codeBlock(), &byValInfo, STUB_RETURN_ADDRESS, arrayMode);
didOptimize = true;
}
}
@@ -2726,21 +2866,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base_strict_put)
VM_THROW_EXCEPTION();
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_ensure_property_exists)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- JSValue base = stackFrame.callFrame->r(stackFrame.args[0].int32()).jsValue();
- JSObject* object = asObject(base);
- PropertySlot slot(object);
- ASSERT(stackFrame.callFrame->codeBlock()->isStrictMode());
- if (!object->getPropertySlot(stackFrame.callFrame, stackFrame.args[1].identifier(), slot)) {
- stackFrame.globalData->exception = createErrorForInvalidGlobalAssignment(stackFrame.callFrame, stackFrame.args[1].identifier().string());
- VM_THROW_EXCEPTION();
- }
-
- return JSValue::encode(base);
-}
-
DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2757,7 +2882,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_dec)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_dec)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2843,21 +2968,6 @@ DEFINE_STUB_FUNCTION(int, op_jtrue)
return result;
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- double number = v.toNumber(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
-
- callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(number + 1);
- return JSValue::encode(jsNumber(number));
-}
-
DEFINE_STUB_FUNCTION(int, op_eq)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2969,7 +3079,7 @@ DEFINE_STUB_FUNCTION(int, op_eq_strings)
return string1->value(stackFrame.callFrame) == string2->value(stackFrame.callFrame);
#else
UNUSED_PARAM(args);
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
return 0;
#endif
}
@@ -3061,21 +3171,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_mod)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_dec)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- double number = v.toNumber(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
-
- callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(number - 1);
- return JSValue::encode(jsNumber(number));
-}
-
DEFINE_STUB_FUNCTION(EncodedJSValue, op_urshift)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -3111,11 +3206,11 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
RegExp* regExp = stackFrame.args[0].regExp();
if (!regExp->isValid()) {
- stackFrame.globalData->exception = createSyntaxError(callFrame, "Invalid flags supplied to RegExp constructor.");
+ stackFrame.vm->exception = createSyntaxError(callFrame, "Invalid flags supplied to RegExp constructor.");
VM_THROW_EXCEPTION();
}
- return RegExpObject::create(*stackFrame.globalData, stackFrame.callFrame->lexicalGlobalObject(), stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), regExp);
+ return RegExpObject::create(*stackFrame.vm, stackFrame.callFrame->lexicalGlobalObject(), stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), regExp);
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
@@ -3150,7 +3245,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
return JSValue::encode(JSValue());
JSValue result = eval(callFrame);
- if (stackFrame.globalData->exception)
+ if (stackFrame.vm->exception)
return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
return JSValue::encode(result);
@@ -3159,7 +3254,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
DEFINE_STUB_FUNCTION(void*, op_throw)
{
STUB_INIT_STACK_FRAME(stackFrame);
- ExceptionHandler handler = jitThrow(stackFrame.globalData, stackFrame.callFrame, stackFrame.args[0].jsValue(), STUB_RETURN_ADDRESS);
+ ExceptionHandler handler = jitThrow(stackFrame.vm, stackFrame.callFrame, stackFrame.args[0].jsValue(), STUB_RETURN_ADDRESS);
STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
return handler.callFrame;
}
@@ -3265,7 +3360,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
return JSValue::encode(jsBoolean(result));
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_jsnumber)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_number)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -3285,7 +3380,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_in)
JSValue baseVal = stackFrame.args[1].jsValue();
if (!baseVal.isObject()) {
- stackFrame.globalData->exception = createInvalidParamError(stackFrame.callFrame, "in", baseVal);
+ stackFrame.vm->exception = createInvalidParameterError(stackFrame.callFrame, "in", baseVal);
VM_THROW_EXCEPTION();
}
@@ -3314,19 +3409,6 @@ DEFINE_STUB_FUNCTION(void, op_push_name_scope)
callFrame->setScope(scope);
}
-DEFINE_STUB_FUNCTION(void, op_jmp_scopes)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- unsigned count = stackFrame.args[0].int32();
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSScope* tmp = callFrame->scope();
- while (count--)
- tmp = tmp->next();
- callFrame->setScope(tmp);
-}
-
DEFINE_STUB_FUNCTION(void, op_put_by_index)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -3420,7 +3502,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_val)
}
if (!result && callFrame->codeBlock()->isStrictMode())
- stackFrame.globalData->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
+ stackFrame.vm->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(jsBoolean(result));
@@ -3444,9 +3526,9 @@ DEFINE_STUB_FUNCTION(void, op_put_getter_setter)
ASSERT(getter.isObject() || setter.isObject());
if (!getter.isUndefined())
- accessor->setGetter(callFrame->globalData(), asObject(getter));
+ accessor->setGetter(callFrame->vm(), asObject(getter));
if (!setter.isUndefined())
- accessor->setSetter(callFrame->globalData(), asObject(setter));
+ accessor->setSetter(callFrame->vm(), asObject(setter));
baseObj->putDirectAccessor(callFrame, stackFrame.args[1].identifier(), accessor, Accessor);
}
@@ -3455,11 +3537,11 @@ DEFINE_STUB_FUNCTION(void, op_throw_static_error)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- String message = stackFrame.args[0].jsValue().toString(callFrame)->value(callFrame);
+ String message = errorDescriptionForValue(callFrame, stackFrame.args[0].jsValue())->value(callFrame);
if (stackFrame.args[1].asInt32)
- stackFrame.globalData->exception = createReferenceError(callFrame, message);
+ stackFrame.vm->exception = createReferenceError(callFrame, message);
else
- stackFrame.globalData->exception = createTypeError(callFrame, message);
+ stackFrame.vm->exception = createTypeError(callFrame, message);
VM_THROW_EXCEPTION_AT_END();
}
@@ -3474,14 +3556,14 @@ DEFINE_STUB_FUNCTION(void, op_debug)
int lastLine = stackFrame.args[2].int32();
int column = stackFrame.args[3].int32();
- stackFrame.globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine, column);
+ stackFrame.vm->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine, column);
}
DEFINE_STUB_FUNCTION(void*, vm_throw)
{
STUB_INIT_STACK_FRAME(stackFrame);
- JSGlobalData* globalData = stackFrame.globalData;
- ExceptionHandler handler = jitThrow(globalData, stackFrame.callFrame, globalData->exception, globalData->exceptionLocation);
+ VM* vm = stackFrame.vm;
+ ExceptionHandler handler = jitThrow(vm, stackFrame.callFrame, vm->exception, vm->exceptionLocation);
STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
return handler.callFrame;
}
@@ -3494,48 +3576,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
return JSValue::encode(stackFrame.args[0].jsValue().toObject(callFrame));
}
-MacroAssemblerCodeRef JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerator generator)
-{
- CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
- if (entry.isNewEntry)
- entry.iterator->value = generator(globalData);
- return entry.iterator->value;
-}
-
-NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, NativeFunction constructor)
-{
- if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(function))
- return nativeExecutable;
-
- NativeExecutable* nativeExecutable = NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), constructor, NoIntrinsic);
- weakAdd(*m_hostFunctionStubMap, function, PassWeak<NativeExecutable>(nativeExecutable));
- return nativeExecutable;
-}
-
-NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic)
-{
- if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(function))
- return nativeExecutable;
-
- MacroAssemblerCodeRef code;
- if (generator) {
- if (globalData->canUseJIT())
- code = generator(globalData);
- else
- code = MacroAssemblerCodeRef();
- } else
- code = JIT::compileCTINativeCall(globalData, function);
-
- NativeExecutable* nativeExecutable = NativeExecutable::create(*globalData, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor, intrinsic);
- weakAdd(*m_hostFunctionStubMap, function, PassWeak<NativeExecutable>(nativeExecutable));
- return nativeExecutable;
-}
-
-void JITThunks::clearHostFunctionStubs()
-{
- m_hostFunctionStubMap.clear();
-}
-
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
index c03bc929e..51873507e 100644
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
* Copyright (C) Research In Motion Limited 2010. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,264 +36,257 @@
#include "MacroAssemblerCodeRef.h"
#include "Register.h"
#include "ResolveOperation.h"
-#include "ThunkGenerators.h"
-#include <wtf/HashMap.h>
namespace JSC {
#if ENABLE(JIT)
- struct StructureStubInfo;
-
- class ArrayAllocationProfile;
- class CodeBlock;
- class ExecutablePool;
- class FunctionExecutable;
- class Identifier;
- class JSGlobalData;
- class JSGlobalObject;
- class JSObject;
- class JSPropertyNameIterator;
- class JSStack;
- class JSValue;
- class JSValueEncodedAsPointer;
- class NativeExecutable;
- class Profiler;
- class PropertySlot;
- class PutPropertySlot;
- class RegExp;
- class Structure;
-
- template <typename T> class Weak;
-
- union JITStubArg {
- void* asPointer;
- EncodedJSValue asEncodedJSValue;
- int32_t asInt32;
-
- JSValue jsValue() { return JSValue::decode(asEncodedJSValue); }
- JSObject* jsObject() { return static_cast<JSObject*>(asPointer); }
- Register* reg() { return static_cast<Register*>(asPointer); }
- Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
- int32_t int32() { return asInt32; }
- CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
- FunctionExecutable* function() { return static_cast<FunctionExecutable*>(asPointer); }
- RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
- JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
- JSGlobalObject* globalObject() { return static_cast<JSGlobalObject*>(asPointer); }
- JSString* jsString() { return static_cast<JSString*>(asPointer); }
- Structure* structure() { return static_cast<Structure*>(asPointer); }
- ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
- ResolveOperations* resolveOperations() { return static_cast<ResolveOperations*>(asPointer); }
- PutToBaseOperation* putToBaseOperation() { return static_cast<PutToBaseOperation*>(asPointer); }
- ArrayAllocationProfile* arrayAllocationProfile() { return static_cast<ArrayAllocationProfile*>(asPointer); }
- };
+struct StructureStubInfo;
+
+class ArrayAllocationProfile;
+class CodeBlock;
+class ExecutablePool;
+class FunctionExecutable;
+class Identifier;
+class VM;
+class JSGlobalObject;
+class JSObject;
+class JSPropertyNameIterator;
+class JSStack;
+class JSValue;
+class JSValueEncodedAsPointer;
+class LegacyProfiler;
+class NativeExecutable;
+class PropertySlot;
+class PutPropertySlot;
+class RegExp;
+class Structure;
+
+template <typename T> class Weak;
+
+union JITStubArg {
+ void* asPointer;
+ EncodedJSValue asEncodedJSValue;
+ int32_t asInt32;
+
+ JSValue jsValue() { return JSValue::decode(asEncodedJSValue); }
+ JSObject* jsObject() { return static_cast<JSObject*>(asPointer); }
+ Register* reg() { return static_cast<Register*>(asPointer); }
+ Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
+ int32_t int32() { return asInt32; }
+ CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
+ FunctionExecutable* function() { return static_cast<FunctionExecutable*>(asPointer); }
+ RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
+ JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
+ JSGlobalObject* globalObject() { return static_cast<JSGlobalObject*>(asPointer); }
+ JSString* jsString() { return static_cast<JSString*>(asPointer); }
+ Structure* structure() { return static_cast<Structure*>(asPointer); }
+ ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
+ ResolveOperations* resolveOperations() { return static_cast<ResolveOperations*>(asPointer); }
+ PutToBaseOperation* putToBaseOperation() { return static_cast<PutToBaseOperation*>(asPointer); }
+ ArrayAllocationProfile* arrayAllocationProfile() { return static_cast<ArrayAllocationProfile*>(asPointer); }
+};
- struct TrampolineStructure {
- MacroAssemblerCodePtr ctiStringLengthTrampoline;
- MacroAssemblerCodePtr ctiVirtualCallLink;
- MacroAssemblerCodePtr ctiVirtualConstructLink;
- MacroAssemblerCodePtr ctiVirtualCall;
- MacroAssemblerCodePtr ctiVirtualConstruct;
- MacroAssemblerCodePtr ctiNativeCall;
- MacroAssemblerCodePtr ctiNativeConstruct;
- };
-
#if !OS(WINDOWS) && CPU(X86_64)
- struct JITStackFrame {
- void* reserved; // Unused
- JITStubArg args[6];
- void* padding[2]; // Maintain 32-byte stack alignment (possibly overkill).
-
- void* code;
- JSStack* stack;
- CallFrame* callFrame;
- void* unused1;
- void* unused2;
- JSGlobalData* globalData;
-
- void* savedRBX;
- void* savedR15;
- void* savedR14;
- void* savedR13;
- void* savedR12;
- void* savedRBP;
- void* savedRIP;
-
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
- };
+struct JITStackFrame {
+ void* reserved; // Unused
+ JITStubArg args[6];
+ void* padding[2]; // Maintain 32-byte stack alignment (possibly overkill).
+
+ void* code;
+ JSStack* stack;
+ CallFrame* callFrame;
+ void* unused1;
+ void* unused2;
+ VM* vm;
+
+ void* savedRBX;
+ void* savedR15;
+ void* savedR14;
+ void* savedR13;
+ void* savedR12;
+ void* savedRBP;
+ void* savedRIP;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+};
#elif OS(WINDOWS) && CPU(X86_64)
- struct JITStackFrame {
- void* shadow[4]; // Shadow space reserved for a callee's parameters home addresses
- void* reserved; // Unused, also maintains the 16-bytes stack alignment
- JITStubArg args[6];
-
- void* savedRBX;
- void* savedR15;
- void* savedR14;
- void* savedR13;
- void* savedR12;
- void* savedRBP;
- void* savedRIP;
-
- // Home addresses for our register passed parameters
- // http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
- void* code;
- JSStack* stack;
- CallFrame* callFrame;
- void* unused1;
-
- // Passed on the stack
- void* unused2;
- JSGlobalData* globalData;
-
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
- };
+struct JITStackFrame {
+ void* shadow[4]; // Shadow space reserved for a callee's parameters home addresses
+ void* reserved; // Unused, also maintains the 16-bytes stack alignment
+ JITStubArg args[6];
+
+ void* savedRBX;
+ void* savedR15;
+ void* savedR14;
+ void* savedR13;
+ void* savedR12;
+ void* savedRBP;
+ void* savedRIP;
+
+ // Home addresses for our register passed parameters
+ // http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
+ void* code;
+ JSStack* stack;
+ CallFrame* callFrame;
+ void* unused1;
+
+ // Passed on the stack
+ void* unused2;
+ VM* vm;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+};
#elif CPU(X86)
#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
#pragma pack(push)
#pragma pack(4)
#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
- struct JITStackFrame {
- void* reserved; // Unused
- JITStubArg args[6];
+struct JITStackFrame {
+ void* reserved; // Unused
+ JITStubArg args[6];
#if USE(JSVALUE32_64)
- void* padding[2]; // Maintain 16-byte stack alignment.
+ void* padding[2]; // Maintain 16-byte stack alignment.
#endif
- void* savedEBX;
- void* savedEDI;
- void* savedESI;
- void* savedEBP;
- void* savedEIP;
-
- void* code;
- JSStack* stack;
- CallFrame* callFrame;
- void* unused1;
- void* unused2;
- JSGlobalData* globalData;
+ void* savedEBX;
+ void* savedEDI;
+ void* savedESI;
+ void* savedEBP;
+ void* savedEIP;
+
+ void* code;
+ JSStack* stack;
+ CallFrame* callFrame;
+ void* unused1;
+ void* unused2;
+ VM* vm;
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
- };
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+};
#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
#pragma pack(pop)
#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
#elif CPU(ARM_THUMB2)
- struct JITStackFrame {
- JITStubArg reserved; // Unused
- JITStubArg args[6];
-
- ReturnAddressPtr thunkReturnAddress;
-
- void* preservedReturnAddress;
- void* preservedR4;
- void* preservedR5;
- void* preservedR6;
- void* preservedR7;
- void* preservedR8;
- void* preservedR9;
- void* preservedR10;
- void* preservedR11;
-
- // These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved)
- JSStack* stack;
- CallFrame* callFrame;
-
- // These arguments passed on the stack.
- void* unused1;
- JSGlobalData* globalData;
+struct JITStackFrame {
+ JITStubArg reserved; // Unused
+ JITStubArg args[6];
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ void* preservedReturnAddress;
+ void* preservedR4;
+ void* preservedR5;
+ void* preservedR6;
+ void* preservedR7;
+ void* preservedR8;
+ void* preservedR9;
+ void* preservedR10;
+ void* preservedR11;
+
+ // These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved)
+ JSStack* stack;
+ CallFrame* callFrame;
+
+ // These arguments passed on the stack.
+ void* unused1;
+ VM* vm;
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
- };
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+};
#elif CPU(ARM_TRADITIONAL)
#if COMPILER(MSVC)
#pragma pack(push)
#pragma pack(4)
#endif // COMPILER(MSVC)
- struct JITStackFrame {
- JITStubArg padding; // Unused
- JITStubArg args[7];
-
- ReturnAddressPtr thunkReturnAddress;
-
- void* preservedR4;
- void* preservedR5;
- void* preservedR6;
- void* preservedR8;
- void* preservedR9;
- void* preservedR10;
- void* preservedR11;
- void* preservedLink;
-
- JSStack* stack;
- CallFrame* callFrame;
- void* unused1;
-
- // These arguments passed on the stack.
- void* unused2;
- JSGlobalData* globalData;
-
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
- };
+struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[7];
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ void* preservedR4;
+ void* preservedR5;
+ void* preservedR6;
+ void* preservedR8;
+ void* preservedR9;
+ void* preservedR10;
+ void* preservedR11;
+ void* preservedLink;
+
+ JSStack* stack;
+ CallFrame* callFrame;
+ void* unused1;
+
+ // These arguments passed on the stack.
+ void* unused2;
+ VM* vm;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+};
#if COMPILER(MSVC)
#pragma pack(pop)
#endif // COMPILER(MSVC)
#elif CPU(MIPS)
- struct JITStackFrame {
- JITStubArg reserved; // Unused
- JITStubArg args[6];
+struct JITStackFrame {
+ JITStubArg reserved; // Unused
+ JITStubArg args[6];
#if USE(JSVALUE32_64)
- void* padding; // Make the overall stack length 8-byte aligned.
+ void* padding; // Make the overall stack length 8-byte aligned.
#endif
- void* preservedGP; // store GP when using PIC code
- void* preservedS0;
- void* preservedS1;
- void* preservedS2;
- void* preservedS3;
- void* preservedS4;
- void* preservedReturnAddress;
+ void* preservedGP; // store GP when using PIC code
+ void* preservedS0;
+ void* preservedS1;
+ void* preservedS2;
+ void* preservedS3;
+ void* preservedS4;
+ void* preservedReturnAddress;
- ReturnAddressPtr thunkReturnAddress;
+ ReturnAddressPtr thunkReturnAddress;
- // These arguments passed in a1..a3 (a0 contained the entry code pointed, which is not preserved)
- JSStack* stack;
- CallFrame* callFrame;
- void* unused1;
+ // These arguments passed in a1..a3 (a0 contained the entry code pointed, which is not preserved)
+ JSStack* stack;
+ CallFrame* callFrame;
+ void* unused1;
- // These arguments passed on the stack.
- void* unused2;
- JSGlobalData* globalData;
+ // These arguments passed on the stack.
+ void* unused2;
+ VM* vm;
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
- };
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+};
#elif CPU(SH4)
- struct JITStackFrame {
- JITStubArg padding; // Unused
- JITStubArg args[6];
-
- ReturnAddressPtr thunkReturnAddress;
- void* savedR10;
- void* savedR11;
- void* savedR13;
- void* savedRPR;
- void* savedR14;
- void* savedTimeoutReg;
-
- JSStack* stack;
- CallFrame* callFrame;
- JSValue* exception;
- void* unused1;
- JSGlobalData* globalData;
-
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
- };
+struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[6];
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ void* savedR8;
+ void* savedR9;
+ void* savedR10;
+ void* savedR11;
+ void* savedR13;
+ void* savedRPR;
+ void* savedR14;
+
+ // These arguments are passed in r5, r6 and r7.
+ JSStack* stack;
+ CallFrame* callFrame;
+ JSValue* exception;
+
+ // These arguments are passed on the stack.
+ void* unused1;
+ VM* vm;
+
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+};
#else
#error "JITStackFrame not defined for this platform."
#endif
@@ -304,208 +297,160 @@ namespace JSC {
#define STUB_ARGS (args)
#if CPU(X86)
- #if COMPILER(MSVC)
- #define JIT_STUB __fastcall
- #elif COMPILER(GCC)
- #define JIT_STUB __attribute__ ((fastcall))
- #elif COMPILER(SUNCC)
- #define JIT_STUB
- #else
- #error "JIT_STUB function calls require fastcall conventions on x86, add appropriate directive/attribute here for your compiler!"
- #endif
+#if COMPILER(MSVC)
+#define JIT_STUB __fastcall
+#elif COMPILER(GCC)
+#define JIT_STUB __attribute__ ((fastcall))
+#elif COMPILER(SUNCC)
+#define JIT_STUB
+#else
+#error "JIT_STUB function calls require fastcall conventions on x86, add appropriate directive/attribute here for your compiler!"
+#endif
#else
- #define JIT_STUB
+#define JIT_STUB
#endif
- extern "C" void ctiVMThrowTrampoline();
- extern "C" void ctiOpThrowNotCaught();
- extern "C" EncodedJSValue ctiTrampoline(void* code, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*);
+extern "C" void ctiVMThrowTrampoline();
+extern "C" void ctiOpThrowNotCaught();
+extern "C" EncodedJSValue ctiTrampoline(void* code, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, VM*);
#if ENABLE(DFG_JIT)
- extern "C" void ctiTrampolineEnd();
+extern "C" void ctiTrampolineEnd();
- inline bool returnAddressIsInCtiTrampoline(ReturnAddressPtr returnAddress)
- {
- return returnAddress.value() >= bitwise_cast<void*>(&ctiTrampoline)
- && returnAddress.value() < bitwise_cast<void*>(&ctiTrampolineEnd);
- }
+inline bool returnAddressIsInCtiTrampoline(ReturnAddressPtr returnAddress)
+{
+ return returnAddress.value() >= bitwise_cast<void*>(&ctiTrampoline)
+ && returnAddress.value() < bitwise_cast<void*>(&ctiTrampolineEnd);
+}
#endif
- class JITThunks {
- public:
- JITThunks(JSGlobalData*);
- ~JITThunks();
-
- static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&, StructureStubInfo* stubInfo);
- static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo, bool direct);
-
- MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_trampolineStructure.ctiStringLengthTrampoline; }
- MacroAssemblerCodePtr ctiVirtualCallLink() { return m_trampolineStructure.ctiVirtualCallLink; }
- MacroAssemblerCodePtr ctiVirtualConstructLink() { return m_trampolineStructure.ctiVirtualConstructLink; }
- MacroAssemblerCodePtr ctiVirtualCall() { return m_trampolineStructure.ctiVirtualCall; }
- MacroAssemblerCodePtr ctiVirtualConstruct() { return m_trampolineStructure.ctiVirtualConstruct; }
- MacroAssemblerCodePtr ctiNativeCall()
- {
-#if ENABLE(LLINT)
- if (!m_executableMemory)
- return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_call_trampoline);
-#endif
- return m_trampolineStructure.ctiNativeCall;
- }
- MacroAssemblerCodePtr ctiNativeConstruct()
- {
-#if ENABLE(LLINT)
- if (!m_executableMemory)
- return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_construct_trampoline);
-#endif
- return m_trampolineStructure.ctiNativeConstruct;
- }
-
- MacroAssemblerCodeRef ctiStub(JSGlobalData*, ThunkGenerator);
-
- NativeExecutable* hostFunctionStub(JSGlobalData*, NativeFunction, NativeFunction constructor);
- NativeExecutable* hostFunctionStub(JSGlobalData*, NativeFunction, ThunkGenerator, Intrinsic);
-
- void clearHostFunctionStubs();
-
- private:
- typedef HashMap<ThunkGenerator, MacroAssemblerCodeRef> CTIStubMap;
- CTIStubMap m_ctiStubMap;
- typedef HashMap<NativeFunction, Weak<NativeExecutable> > HostFunctionStubMap;
- OwnPtr<HostFunctionStubMap> m_hostFunctionStubMap;
- RefPtr<ExecutableMemoryHandle> m_executableMemory;
-
- TrampolineStructure m_trampolineStructure;
- };
+void performPlatformSpecificJITAssertions(VM*);
extern "C" {
- EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_bitor(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_bitxor(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_check_has_instance(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_create_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_id_custom_stub(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_id_getter_stub(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_is_boolean(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_is_function(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_is_number(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_is_object(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_is_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_is_undefined(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_less(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_lesseq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_greater(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_greatereq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_lshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_post_inc(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_pre_dec(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_pre_inc(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve_base_strict_put(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_ensure_property_exists(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve_with_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_to_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_sub(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_to_jsnumber(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_to_primitive(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_to_object(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- JSObject* JIT_STUB cti_op_new_array_with_size(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- JSObject* JIT_STUB cti_op_new_array_buffer(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_push_name_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_push_with_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- JSObject* JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- int JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- int JIT_STUB cti_op_eq_strings(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- int JIT_STUB cti_op_jgreater(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- int JIT_STUB cti_op_jgreatereq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- int JIT_STUB cti_has_property(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_by_id_direct(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_by_id_direct_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_by_id_direct_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_getter_setter(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_init_global_const_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_throw_static_error(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_bitor(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_bitxor(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_check_has_instance(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_create_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_id_custom_stub(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_id_getter_stub(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_is_boolean(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_is_function(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_is_number(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_is_object(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_is_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_is_undefined(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_less(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_lesseq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_greater(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_greatereq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_lshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_dec(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_inc(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_resolve_base_strict_put(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_resolve_with_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_to_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_sub(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_to_number(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_to_primitive(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+EncodedJSValue JIT_STUB cti_to_object(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+JSObject* JIT_STUB cti_op_new_array_with_size(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+JSObject* JIT_STUB cti_op_new_array_buffer(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_push_name_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_push_with_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+JSObject* JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+int JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+int JIT_STUB cti_op_eq_strings(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+int JIT_STUB cti_op_jgreater(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+int JIT_STUB cti_op_jgreatereq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_handle_watchdog_timer(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+int JIT_STUB cti_has_property(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_by_id_direct(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_by_id_direct_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_by_id_direct_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_put_getter_setter(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_init_global_const_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_op_throw_static_error(STUB_ARGS_DECLARATION) WTF_INTERNAL;
#if ENABLE(DFG_JIT)
- void JIT_STUB cti_optimize(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void JIT_STUB cti_optimize(STUB_ARGS_DECLARATION) WTF_INTERNAL;
#endif
- void* JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_op_construct_arityCheck(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_op_call_jitCompile(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_op_construct_jitCompile(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_stack_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_vm_lazyLinkConstruct(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION) REFERENCED_FROM_ASM WTF_INTERNAL;
+void* JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_op_construct_arityCheck(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_op_call_jitCompile(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_op_construct_jitCompile(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_stack_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_vm_lazyLinkClosureCall(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_vm_lazyLinkConstruct(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION) REFERENCED_FROM_ASM WTF_INTERNAL;
} // extern "C"
#elif ENABLE(LLINT_C_LOOP)
struct JITStackFrame {
- JSGlobalData* globalData;
+ VM* vm;
};
#endif // ENABLE(LLINT_C_LOOP)
diff --git a/Source/JavaScriptCore/jit/JITThunks.cpp b/Source/JavaScriptCore/jit/JITThunks.cpp
new file mode 100644
index 000000000..e11774be0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITThunks.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITThunks.h"
+
+#if ENABLE(JIT)
+
+#include "Executable.h"
+#include "JIT.h"
+#include "VM.h"
+#include "Operations.h"
+
+namespace JSC {
+
+JITThunks::JITThunks()
+ : m_hostFunctionStubMap(adoptPtr(new HostFunctionStubMap))
+{
+}
+
+JITThunks::~JITThunks()
+{
+}
+
+MacroAssemblerCodePtr JITThunks::ctiNativeCall(VM* vm)
+{
+#if ENABLE(LLINT)
+ if (!vm->canUseJIT())
+ return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_call_trampoline);
+#endif
+ return ctiStub(vm, nativeCallGenerator).code();
+}
+MacroAssemblerCodePtr JITThunks::ctiNativeConstruct(VM* vm)
+{
+#if ENABLE(LLINT)
+ if (!vm->canUseJIT())
+ return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_construct_trampoline);
+#endif
+ return ctiStub(vm, nativeConstructGenerator).code();
+}
+
+MacroAssemblerCodeRef JITThunks::ctiStub(VM* vm, ThunkGenerator generator)
+{
+ CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
+ if (entry.isNewEntry)
+ entry.iterator->value = generator(vm);
+ return entry.iterator->value;
+}
+
+NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, NativeFunction constructor)
+{
+ if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, constructor)))
+ return nativeExecutable;
+
+ NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, JIT::compileCTINativeCall(vm, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), constructor, NoIntrinsic);
+ weakAdd(*m_hostFunctionStubMap, std::make_pair(function, constructor), PassWeak<NativeExecutable>(nativeExecutable));
+ return nativeExecutable;
+}
+
+NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic)
+{
+ if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, &callHostFunctionAsConstructor)))
+ return nativeExecutable;
+
+ MacroAssemblerCodeRef code;
+ if (generator) {
+ if (vm->canUseJIT())
+ code = generator(vm);
+ else
+ code = MacroAssemblerCodeRef();
+ } else
+ code = JIT::compileCTINativeCall(vm, function);
+
+ NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), callHostFunctionAsConstructor, intrinsic);
+ weakAdd(*m_hostFunctionStubMap, std::make_pair(function, &callHostFunctionAsConstructor), PassWeak<NativeExecutable>(nativeExecutable));
+ return nativeExecutable;
+}
+
+void JITThunks::clearHostFunctionStubs()
+{
+ m_hostFunctionStubMap.clear();
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITThunks.h b/Source/JavaScriptCore/jit/JITThunks.h
new file mode 100644
index 000000000..769583b1d
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITThunks.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITThunks_h
+#define JITThunks_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#include "CallData.h"
+#include "Intrinsic.h"
+#include "LowLevelInterpreter.h"
+#include "MacroAssemblerCodeRef.h"
+#include "ThunkGenerator.h"
+#include "Weak.h"
+#include "WeakInlines.h"
+#include <wtf/HashMap.h>
+#include <wtf/OwnPtr.h>
+#include <wtf/RefPtr.h>
+
+namespace JSC {
+
+class VM;
+class NativeExecutable;
+
+class JITThunks {
+public:
+ JITThunks();
+ ~JITThunks();
+
+ MacroAssemblerCodePtr ctiNativeCall(VM*);
+ MacroAssemblerCodePtr ctiNativeConstruct(VM*);
+
+ MacroAssemblerCodeRef ctiStub(VM*, ThunkGenerator);
+
+ NativeExecutable* hostFunctionStub(VM*, NativeFunction, NativeFunction constructor);
+ NativeExecutable* hostFunctionStub(VM*, NativeFunction, ThunkGenerator, Intrinsic);
+
+ void clearHostFunctionStubs();
+
+private:
+ typedef HashMap<ThunkGenerator, MacroAssemblerCodeRef> CTIStubMap;
+ CTIStubMap m_ctiStubMap;
+ typedef HashMap<std::pair<NativeFunction, NativeFunction>, Weak<NativeExecutable> > HostFunctionStubMap;
+ OwnPtr<HostFunctionStubMap> m_hostFunctionStubMap;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITThunks_h
+
diff --git a/Source/JavaScriptCore/jit/JITWriteBarrier.h b/Source/JavaScriptCore/jit/JITWriteBarrier.h
index ee73b702f..9da1ea782 100644
--- a/Source/JavaScriptCore/jit/JITWriteBarrier.h
+++ b/Source/JavaScriptCore/jit/JITWriteBarrier.h
@@ -30,12 +30,13 @@
#include "MacroAssembler.h"
#include "SlotVisitor.h"
+#include "UnusedPointer.h"
#include "WriteBarrier.h"
namespace JSC {
class JSCell;
-class JSGlobalData;
+class VM;
// Needs to be even to appease some of the backends.
#define JITWriteBarrierFlag ((void*)2)
@@ -69,14 +70,14 @@ public:
}
void clear() { clear(0); }
- void clearToMaxUnsigned() { clear(reinterpret_cast<void*>(-1)); }
+ void clearToUnusedPointer() { clear(reinterpret_cast<void*>(unusedPointer)); }
protected:
JITWriteBarrierBase()
{
}
- void set(JSGlobalData&, CodeLocationDataLabelPtr location, JSCell* owner, JSCell* value)
+ void set(VM&, CodeLocationDataLabelPtr location, JSCell* owner, JSCell* value)
{
Heap::writeBarrier(owner, value);
m_location = location;
@@ -90,8 +91,7 @@ protected:
if (!m_location || m_location.executableAddress() == JITWriteBarrierFlag)
return 0;
void* result = static_cast<JSCell*>(MacroAssembler::readPointer(m_location));
- // We use -1 to indicate a "safe" empty value in the instruction stream
- if (result == (void*)-1)
+ if (result == reinterpret_cast<void*>(unusedPointer))
return 0;
return static_cast<JSCell*>(result);
}
@@ -116,15 +116,15 @@ public:
{
}
- void set(JSGlobalData& globalData, CodeLocationDataLabelPtr location, JSCell* owner, T* value)
+ void set(VM& vm, CodeLocationDataLabelPtr location, JSCell* owner, T* value)
{
validateCell(owner);
validateCell(value);
- JITWriteBarrierBase::set(globalData, location, owner, value);
+ JITWriteBarrierBase::set(vm, location, owner, value);
}
- void set(JSGlobalData& globalData, JSCell* owner, T* value)
+ void set(VM& vm, JSCell* owner, T* value)
{
- set(globalData, location(), owner, value);
+ set(vm, location(), owner, value);
}
T* get() const
{
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
index 7afdc06dc..0cf7589dd 100644
--- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -29,11 +29,10 @@
#include "BytecodeConventions.h"
#include "JITCode.h"
#include "JITStubs.h"
+#include "JSCJSValue.h"
#include "JSStack.h"
#include "JSString.h"
-#include "JSValue.h"
#include "MacroAssembler.h"
-#include <wtf/AlwaysInline.h>
#include <wtf/Vector.h>
#if ENABLE(JIT)
@@ -67,7 +66,6 @@ namespace JSC {
static const RegisterID bucketCounterRegister = X86Registers::r10;
#endif
- static const RegisterID timeoutCheckRegister = X86Registers::r12;
static const RegisterID callFrameRegister = X86Registers::r13;
static const RegisterID tagTypeNumberRegister = X86Registers::r14;
static const RegisterID tagMaskRegister = X86Registers::r15;
@@ -118,7 +116,6 @@ namespace JSC {
// Update ctiTrampoline in JITStubs.cpp if these values are changed!
static const RegisterID callFrameRegister = ARMRegisters::r5;
- static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
static const FPRegisterID fpRegT0 = ARMRegisters::d0;
static const FPRegisterID fpRegT1 = ARMRegisters::d1;
@@ -145,16 +142,18 @@ namespace JSC {
static const RegisterID regT3 = MIPSRegisters::s2;
static const RegisterID callFrameRegister = MIPSRegisters::s0;
- static const RegisterID timeoutCheckRegister = MIPSRegisters::s1;
static const FPRegisterID fpRegT0 = MIPSRegisters::f4;
static const FPRegisterID fpRegT1 = MIPSRegisters::f6;
static const FPRegisterID fpRegT2 = MIPSRegisters::f8;
static const FPRegisterID fpRegT3 = MIPSRegisters::f10;
#elif CPU(SH4)
- static const RegisterID timeoutCheckRegister = SH4Registers::r8;
static const RegisterID callFrameRegister = SH4Registers::fp;
+#if ENABLE(VALUE_PROFILER)
+ static const RegisterID bucketCounterRegister = SH4Registers::r8;
+#endif
+
static const RegisterID regT0 = SH4Registers::r0;
static const RegisterID regT1 = SH4Registers::r1;
static const RegisterID regT2 = SH4Registers::r2;
@@ -163,19 +162,17 @@ namespace JSC {
static const RegisterID regT5 = SH4Registers::r5;
static const RegisterID regT6 = SH4Registers::r6;
static const RegisterID regT7 = SH4Registers::r7;
- static const RegisterID firstArgumentRegister =regT4;
+ static const RegisterID firstArgumentRegister = regT4;
static const RegisterID returnValueRegister = SH4Registers::r0;
static const RegisterID cachedResultRegister = SH4Registers::r0;
- static const FPRegisterID fpRegT0 = SH4Registers::fr0;
- static const FPRegisterID fpRegT1 = SH4Registers::fr2;
- static const FPRegisterID fpRegT2 = SH4Registers::fr4;
- static const FPRegisterID fpRegT3 = SH4Registers::fr6;
- static const FPRegisterID fpRegT4 = SH4Registers::fr8;
- static const FPRegisterID fpRegT5 = SH4Registers::fr10;
- static const FPRegisterID fpRegT6 = SH4Registers::fr12;
- static const FPRegisterID fpRegT7 = SH4Registers::fr14;
+ static const FPRegisterID fpRegT0 = SH4Registers::dr0;
+ static const FPRegisterID fpRegT1 = SH4Registers::dr2;
+ static const FPRegisterID fpRegT2 = SH4Registers::dr4;
+ static const FPRegisterID fpRegT3 = SH4Registers::dr6;
+ static const FPRegisterID fpRegT4 = SH4Registers::dr8;
+ static const FPRegisterID fpRegT5 = SH4Registers::dr10;
#else
#error "JIT not supported on this platform."
#endif
@@ -197,11 +194,25 @@ namespace JSC {
#endif
#if USE(JSVALUE64)
+ Jump emitJumpIfNotJSCell(RegisterID);
Jump emitJumpIfImmediateNumber(RegisterID reg);
Jump emitJumpIfNotImmediateNumber(RegisterID reg);
void emitFastArithImmToInt(RegisterID reg);
+ void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
#endif
+ Jump emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType);
+
+ void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
+ void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry);
+ void emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
+
+ void preserveReturnAddressAfterCall(RegisterID);
+ void restoreReturnAddressBeforeReturn(RegisterID);
+ void restoreReturnAddressBeforeReturn(Address);
+ void restoreArgumentReference();
+
inline Address payloadFor(int index, RegisterID base = callFrameRegister);
inline Address intPayloadFor(int index, RegisterID base = callFrameRegister);
inline Address intTagFor(int index, RegisterID base = callFrameRegister);
@@ -209,9 +220,6 @@ namespace JSC {
};
struct ThunkHelpers {
- static unsigned stringImplFlagsOffset() { return StringImpl::flagsOffset(); }
- static unsigned stringImpl8BitFlag() { return StringImpl::flagIs8Bit(); }
- static unsigned stringImplDataOffset() { return StringImpl::dataOffset(); }
static unsigned jsStringLengthOffset() { return OBJECT_OFFSETOF(JSString, m_length); }
static unsigned jsStringValueOffset() { return OBJECT_OFFSETOF(JSString, m_value); }
};
@@ -276,6 +284,11 @@ namespace JSC {
#endif
#if USE(JSVALUE64)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotJSCell(RegisterID reg)
+ {
+ return branchTest64(NonZero, reg, tagMaskRegister);
+ }
+
ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
{
return branchTest64(NonZero, reg, tagTypeNumberRegister);
@@ -316,6 +329,13 @@ namespace JSC {
{
}
+ // operand is int32_t, must have been zero-extended if register is 64-bit.
+ ALWAYS_INLINE void JSInterfaceJIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ or64(tagTypeNumberRegister, dest);
+ }
#endif
#if USE(JSVALUE64)
@@ -337,12 +357,122 @@ namespace JSC {
}
#endif
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type)
+ {
+ loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg);
+ return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type));
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+ {
+ loadPtr(Address(from, entry * sizeof(Register)), to);
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
+ {
+#if USE(JSVALUE32_64)
+ storePtr(from, payloadFor(entry, callFrameRegister));
+#else
+ store64(from, addressFor(entry, callFrameRegister));
+#endif
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
+ {
+ storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
+ {
+#if USE(JSVALUE32_64)
+ store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
+ store32(from, payloadFor(entry, callFrameRegister));
+#else
+ store64(from, addressFor(entry, callFrameRegister));
+#endif
+ }
+
inline JSInterfaceJIT::Address JSInterfaceJIT::addressFor(int virtualRegisterIndex, RegisterID base)
{
ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)));
}
+#if CPU(ARM)
+
+ ALWAYS_INLINE void JSInterfaceJIT::preserveReturnAddressAfterCall(RegisterID reg)
+ {
+ move(linkRegister, reg);
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+ {
+ move(reg, linkRegister);
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(Address address)
+ {
+ loadPtr(address, linkRegister);
+ }
+#elif CPU(SH4)
+
+ ALWAYS_INLINE void JSInterfaceJIT::preserveReturnAddressAfterCall(RegisterID reg)
+ {
+ m_assembler.stspr(reg);
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+ {
+ m_assembler.ldspr(reg);
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(Address address)
+ {
+ loadPtrLinkReg(address);
+ }
+
+#elif CPU(MIPS)
+
+ ALWAYS_INLINE void JSInterfaceJIT::preserveReturnAddressAfterCall(RegisterID reg)
+ {
+ move(returnAddressRegister, reg);
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+ {
+ move(reg, returnAddressRegister);
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(Address address)
+ {
+ loadPtr(address, returnAddressRegister);
+ }
+
+#else // CPU(X86) || CPU(X86_64)
+
+ ALWAYS_INLINE void JSInterfaceJIT::preserveReturnAddressAfterCall(RegisterID reg)
+ {
+ pop(reg);
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+ {
+ push(reg);
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(Address address)
+ {
+ push(address);
+ }
+
+#endif
+
+ ALWAYS_INLINE void JSInterfaceJIT::restoreArgumentReference()
+ {
+ move(stackPointerRegister, firstArgumentRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ }
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.h b/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.h
index b4f35c724..457cbb286 100644
--- a/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.h
+++ b/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.h
@@ -50,6 +50,13 @@ public:
{
}
+ MacroAssembler::Label sourceLabel() const
+ {
+ MacroAssembler::Label label;
+ label.m_label.m_offset = m_source;
+ return label;
+ }
+
void setDestination(MacroAssembler::Label destination)
{
m_destination = destination.m_label.m_offset;
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index 9c7fbce81..9a0e0a30e 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -55,10 +55,10 @@ namespace JSC {
m_failures.append(emitLoadJSCell(src, dst));
}
- void loadJSStringArgument(JSGlobalData& globalData, int argument, RegisterID dst)
+ void loadJSStringArgument(VM& vm, int argument, RegisterID dst)
{
loadCellArgument(argument, dst);
- m_failures.append(branchPtr(NotEqual, Address(dst, JSCell::structureOffset()), TrustedImmPtr(globalData.stringStructure.get())));
+ m_failures.append(branchPtr(NotEqual, Address(dst, JSCell::structureOffset()), TrustedImmPtr(vm.stringStructure.get())));
}
void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
@@ -130,9 +130,9 @@ namespace JSC {
ret();
}
- MacroAssemblerCodeRef finalize(JSGlobalData& globalData, MacroAssemblerCodePtr fallback, const char* thunkKind)
+ MacroAssemblerCodeRef finalize(VM& vm, MacroAssemblerCodePtr fallback, const char* thunkKind)
{
- LinkBuffer patchBuffer(globalData, this, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(vm, this, GLOBAL_THUNK_ID);
patchBuffer.link(m_failures, CodeLocationLabel(fallback));
for (unsigned i = 0; i < m_calls.size(); i++)
patchBuffer.link(m_calls[i].first, m_calls[i].second);
@@ -145,6 +145,15 @@ namespace JSC {
{
m_calls.append(std::make_pair(call(), function));
}
+
+ void callDoubleToDoublePreservingReturn(FunctionPtr function)
+ {
+ if (!isX86())
+ preserveReturnAddressAfterCall(regT3);
+ callDoubleToDouble(function);
+ if (!isX86())
+ restoreReturnAddressBeforeReturn(regT3);
+ }
private:
diff --git a/Source/JavaScriptCore/jit/ThunkGenerator.h b/Source/JavaScriptCore/jit/ThunkGenerator.h
new file mode 100644
index 000000000..a9d7e04ee
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ThunkGenerator.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ThunkGenerator_h
+#define ThunkGenerator_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+namespace JSC {
+class VM;
+class MacroAssemblerCodeRef;
+
+typedef MacroAssemblerCodeRef (*ThunkGenerator)(VM*);
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // ThunkGenerator_h
+
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
index cbfc1eb0f..9684df2d0 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,18 +27,399 @@
#include "ThunkGenerators.h"
#include "CodeBlock.h"
-#include <wtf/InlineASM.h>
+#include "Operations.h"
#include "SpecializedThunkJIT.h"
+#include <wtf/InlineASM.h>
+#include <wtf/StringPrintStream.h>
#include <wtf/text/StringImpl.h>
#if ENABLE(JIT)
namespace JSC {
-static void stringCharLoad(SpecializedThunkJIT& jit, JSGlobalData* globalData)
+static JSInterfaceJIT::Call generateSlowCaseFor(VM* vm, JSInterfaceJIT& jit)
+{
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT2, JSInterfaceJIT::regT2);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT2, JSStack::ScopeChain);
+
+ // Also initialize ReturnPC and CodeBlock, like a JS function would.
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
+ jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
+ jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
+
+ jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
+ jit.restoreArgumentReference();
+ JSInterfaceJIT::Call callNotJSFunction = jit.call();
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::callFrameRegister);
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
+ jit.ret();
+
+ return callNotJSFunction;
+}
+
+static MacroAssemblerCodeRef linkForGenerator(VM* vm, FunctionPtr lazyLink, FunctionPtr notJSFunction, const char* name)
+{
+ JSInterfaceJIT jit;
+
+ JSInterfaceJIT::JumpList slowCase;
+
+#if USE(JSVALUE64)
+ slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
+ slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
+#else // USE(JSVALUE64)
+ slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
+ slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
+#endif // USE(JSVALUE64)
+
+ // Finish canonical initialization before JS function call.
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ // Also initialize ReturnPC for use by lazy linking and exceptions.
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
+ jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
+
+ jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
+ jit.restoreArgumentReference();
+ JSInterfaceJIT::Call callLazyLink = jit.call();
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
+ jit.jump(JSInterfaceJIT::regT0);
+
+ slowCase.link(&jit);
+ JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
+
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ patchBuffer.link(callLazyLink, lazyLink);
+ patchBuffer.link(callNotJSFunction, notJSFunction);
+
+ return FINALIZE_CODE(patchBuffer, ("link %s trampoline", name));
+}
+
+MacroAssemblerCodeRef linkCallGenerator(VM* vm)
+{
+ return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkCall), FunctionPtr(cti_op_call_NotJSFunction), "call");
+}
+
+MacroAssemblerCodeRef linkConstructGenerator(VM* vm)
+{
+ return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkConstruct), FunctionPtr(cti_op_construct_NotJSConstruct), "construct");
+}
+
+MacroAssemblerCodeRef linkClosureCallGenerator(VM* vm)
+{
+ return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkClosureCall), FunctionPtr(cti_op_call_NotJSFunction), "closure call");
+}
+
+static MacroAssemblerCodeRef virtualForGenerator(VM* vm, FunctionPtr compile, FunctionPtr notJSFunction, const char* name, CodeSpecializationKind kind)
+{
+ JSInterfaceJIT jit;
+
+ JSInterfaceJIT::JumpList slowCase;
+
+#if USE(JSVALUE64)
+ slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
+#else // USE(JSVALUE64)
+ slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
+#endif // USE(JSVALUE64)
+ slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
+
+ // Finish canonical initialization before JS function call.
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
+ JSInterfaceJIT::Jump hasCodeBlock1 = jit.branch32(JSInterfaceJIT::GreaterThanOrEqual, JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfNumParametersFor(kind)), JSInterfaceJIT::TrustedImm32(0));
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
+ jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
+ jit.restoreArgumentReference();
+ JSInterfaceJIT::Call callCompile = jit.call();
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
+
+ hasCodeBlock1.link(&jit);
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfJITCodeWithArityCheckFor(kind)), JSInterfaceJIT::regT0);
+ jit.jump(JSInterfaceJIT::regT0);
+
+ slowCase.link(&jit);
+ JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
+
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ patchBuffer.link(callCompile, compile);
+ patchBuffer.link(callNotJSFunction, notJSFunction);
+
+ return FINALIZE_CODE(patchBuffer, ("virtual %s trampoline", name));
+}
+
+MacroAssemblerCodeRef virtualCallGenerator(VM* vm)
+{
+ return virtualForGenerator(vm, FunctionPtr(cti_op_call_jitCompile), FunctionPtr(cti_op_call_NotJSFunction), "call", CodeForCall);
+}
+
+MacroAssemblerCodeRef virtualConstructGenerator(VM* vm)
+{
+ return virtualForGenerator(vm, FunctionPtr(cti_op_construct_jitCompile), FunctionPtr(cti_op_construct_NotJSConstruct), "construct", CodeForConstruct);
+}
+
+MacroAssemblerCodeRef stringLengthTrampolineGenerator(VM* vm)
+{
+ JSInterfaceJIT jit;
+
+#if USE(JSVALUE64)
+ // Check eax is a string
+ JSInterfaceJIT::Jump failureCases1 = jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0);
+ JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
+ JSInterfaceJIT::NotEqual, JSInterfaceJIT::Address(
+ JSInterfaceJIT::regT0, JSCell::structureOffset()),
+ JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
+
+ // Checks out okay! - get the length from the Ustring.
+ jit.load32(
+ JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
+ JSInterfaceJIT::regT0);
+
+ JSInterfaceJIT::Jump failureCases3 = jit.branch32(
+ JSInterfaceJIT::LessThan, JSInterfaceJIT::regT0, JSInterfaceJIT::TrustedImm32(0));
+
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ jit.emitFastArithIntToImmNoCheck(JSInterfaceJIT::regT0, JSInterfaceJIT::regT0);
+
+#else // USE(JSVALUE64)
+ // regT0 holds payload, regT1 holds tag
+
+ JSInterfaceJIT::Jump failureCases1 = jit.branch32(
+ JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1,
+ JSInterfaceJIT::TrustedImm32(JSValue::CellTag));
+ JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
+ JSInterfaceJIT::NotEqual,
+ JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSCell::structureOffset()),
+ JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
+
+ // Checks out okay! - get the length from the Ustring.
+ jit.load32(
+ JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
+ JSInterfaceJIT::regT2);
+
+ JSInterfaceJIT::Jump failureCases3 = jit.branch32(
+ JSInterfaceJIT::Above, JSInterfaceJIT::regT2, JSInterfaceJIT::TrustedImm32(INT_MAX));
+ jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::regT0);
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::Int32Tag), JSInterfaceJIT::regT1);
+#endif // USE(JSVALUE64)
+
+ jit.ret();
+
+ JSInterfaceJIT::Call failureCases1Call = jit.makeTailRecursiveCall(failureCases1);
+ JSInterfaceJIT::Call failureCases2Call = jit.makeTailRecursiveCall(failureCases2);
+ JSInterfaceJIT::Call failureCases3Call = jit.makeTailRecursiveCall(failureCases3);
+
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+
+ patchBuffer.link(failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+
+ return FINALIZE_CODE(patchBuffer, ("string length trampoline"));
+}
+
+static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
+{
+ int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
+
+ JSInterfaceJIT jit;
+
+ jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
+ jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
+
+#if CPU(X86)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.peek(JSInterfaceJIT::regT1);
+ jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
+
+ // Calling convention: f(ecx, edx, ...);
+ // Host function signature: f(ExecState*);
+ jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
+
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
+
+ // call the function
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
+
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
+
+#elif CPU(X86_64)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.peek(JSInterfaceJIT::regT1);
+ jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
+
+#if !OS(WINDOWS)
+ // Calling convention: f(edi, esi, edx, ecx, ...);
+ // Host function signature: f(ExecState*);
+ jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
+
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
+
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
+ jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
+
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+#else
+ // Calling convention: f(ecx, edx, r8, r9, ...);
+ // Host function signature: f(ExecState*);
+ jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
+
+ // Leave space for the callee parameter home addresses and align the stack.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
+ jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
+
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+#endif
+
+#elif CPU(ARM)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
+ jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
+
+ // Calling convention: f(r0 == regT0, r1 == regT1, ...);
+ // Host function signature: f(ExecState*);
+ jit.move(JSInterfaceJIT::callFrameRegister, ARMRegisters::r0);
+
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
+ jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ jit.loadPtr(JSInterfaceJIT::Address(ARMRegisters::r1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
+ jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
+
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
+
+#elif CPU(SH4)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
+ jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
+
+ // Calling convention: f(r0 == regT4, r1 == regT5, ...);
+ // Host function signature: f(ExecState*);
+ jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT4);
+
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT5);
+ jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT5, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
+
+ jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSInterfaceJIT::regT0);
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
+
+#elif CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
+ jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
+
+ // Calling convention: f(a0, a1, a2, a3);
+ // Host function signature: f(ExecState*);
+
+ // Allocate stack space for 16 bytes (8-byte aligned)
+ // 16 bytes (unused) for 4 arguments
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
+
+ // Setup arg0
+ jit.move(JSInterfaceJIT::callFrameRegister, MIPSRegisters::a0);
+
+ // Call
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
+ jit.loadPtr(JSInterfaceJIT::Address(MIPSRegisters::a2, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
+
+ // Restore stack space
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
+
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
+#else
+#error "JIT not supported on this platform."
+ UNUSED_PARAM(executableOffsetToFunction);
+ breakpoint();
+#endif
+
+ // Check for an exception
+#if USE(JSVALUE64)
+ jit.load64(&(vm->exception), JSInterfaceJIT::regT2);
+ JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
+#else
+ JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
+ JSInterfaceJIT::NotEqual,
+ JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(&vm->exception) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
+#endif
+
+ // Return.
+ jit.ret();
+
+ // Handle an exception
+ exceptionHandler.link(&jit);
+
+ // Grab the return address.
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
+
+ jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
+ jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
+ jit.poke(JSInterfaceJIT::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
+ // Set the return address.
+ jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), JSInterfaceJIT::regT1);
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT1);
+
+ jit.ret();
+
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
+}
+
+MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
+{
+ return nativeForGenerator(vm, CodeForCall);
+}
+
+MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
+{
+ return nativeForGenerator(vm, CodeForConstruct);
+}
+
+static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
{
// load string
- jit.loadJSStringArgument(*globalData, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
+ jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
// Load string length to regT2, and start the process of loading the data pointer into regT0
jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
@@ -55,9 +436,9 @@ static void stringCharLoad(SpecializedThunkJIT& jit, JSGlobalData* globalData)
SpecializedThunkJIT::JumpList is16Bit;
SpecializedThunkJIT::JumpList cont8Bit;
// Load the string flags
- jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::stringImplFlagsOffset()), SpecializedThunkJIT::regT2);
- jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::stringImplDataOffset()), SpecializedThunkJIT::regT0);
- is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
+ is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
cont8Bit.append(jit.jump());
is16Bit.link(&jit);
@@ -65,51 +446,51 @@ static void stringCharLoad(SpecializedThunkJIT& jit, JSGlobalData* globalData)
cont8Bit.link(&jit);
}
-static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
+static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
{
jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
- jit.move(MacroAssembler::TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), scratch);
+ jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
}
-MacroAssemblerCodeRef charCodeAtThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
{
SpecializedThunkJIT jit(1);
- stringCharLoad(jit, globalData);
+ stringCharLoad(jit, vm);
jit.returnInt32(SpecializedThunkJIT::regT0);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "charCodeAt");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
}
-MacroAssemblerCodeRef charAtThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
{
SpecializedThunkJIT jit(1);
- stringCharLoad(jit, globalData);
- charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ stringCharLoad(jit, vm);
+ charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "charAt");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charAt");
}
-MacroAssemblerCodeRef fromCharCodeThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
{
SpecializedThunkJIT jit(1);
// load char code
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
- charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "fromCharCode");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
}
-MacroAssemblerCodeRef sqrtThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
{
SpecializedThunkJIT jit(1);
if (!jit.supportsFloatingPointSqrt())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "sqrt");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "sqrt");
}
@@ -126,7 +507,7 @@ double jsRound(double d)
}
}
-
+
#if CPU(X86_64) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
#define defineUnaryDoubleOpWrapper(function) \
@@ -163,6 +544,28 @@ double jsRound(double d)
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+#elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
+
+#define defineUnaryDoubleOpWrapper(function) \
+ asm( \
+ ".text\n" \
+ ".align 2\n" \
+ ".globl " SYMBOL_STRING(function##Thunk) "\n" \
+ HIDE_SYMBOL(function##Thunk) "\n" \
+ ".thumb\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
+ SYMBOL_STRING(function##Thunk) ":" "\n" \
+ "push {lr}\n" \
+ "vmov r0, r1, d0\n" \
+ "blx " GLOBAL_REFERENCE(function) "\n" \
+ "vmov d0, r0, r1\n" \
+ "pop {lr}\n" \
+ "bx lr\n" \
+ ); \
+ extern "C" { \
+ MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
+ } \
+ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
#else
#define defineUnaryDoubleOpWrapper(function) \
@@ -180,12 +583,12 @@ static const double negativeHalfConstant = -0.5;
static const double zeroConstant = 0.0;
static const double halfConstant = 0.5;
-MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
{
SpecializedThunkJIT jit(1);
MacroAssembler::Jump nonIntJump;
if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
@@ -202,40 +605,40 @@ MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData* globalData)
intResult = jit.jump();
slowPath.link(&jit);
}
- jit.callDoubleToDouble(UnaryDoubleOpWrapper(floor));
+ jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
if (jit.supportsFloatingPointTruncate())
intResult.link(&jit);
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "floor");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "floor");
}
-MacroAssemblerCodeRef ceilThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
{
SpecializedThunkJIT jit(1);
if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
MacroAssembler::Jump nonIntJump;
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- jit.callDoubleToDouble(UnaryDoubleOpWrapper(ceil));
+ jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
SpecializedThunkJIT::JumpList doubleResult;
jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "ceil");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "ceil");
}
-MacroAssemblerCodeRef roundThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
{
SpecializedThunkJIT jit(1);
if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
MacroAssembler::Jump nonIntJump;
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
jit.returnInt32(SpecializedThunkJIT::regT0);
@@ -255,47 +658,47 @@ MacroAssemblerCodeRef roundThunkGenerator(JSGlobalData* globalData)
intResult = jit.jump();
slowPath.link(&jit);
}
- jit.callDoubleToDouble(UnaryDoubleOpWrapper(jsRound));
+ jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
if (jit.supportsFloatingPointTruncate())
intResult.link(&jit);
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "round");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "round");
}
-MacroAssemblerCodeRef expThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef expThunkGenerator(VM* vm)
{
if (!UnaryDoubleOpWrapper(exp))
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
SpecializedThunkJIT jit(1);
if (!jit.supportsFloatingPoint())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- jit.callDoubleToDouble(UnaryDoubleOpWrapper(exp));
+ jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "exp");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "exp");
}
-MacroAssemblerCodeRef logThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef logThunkGenerator(VM* vm)
{
if (!UnaryDoubleOpWrapper(log))
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
SpecializedThunkJIT jit(1);
if (!jit.supportsFloatingPoint())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- jit.callDoubleToDouble(UnaryDoubleOpWrapper(log));
+ jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "log");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "log");
}
-MacroAssemblerCodeRef absThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef absThunkGenerator(VM* vm)
{
SpecializedThunkJIT jit(1);
if (!jit.supportsFloatingPointAbs())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
MacroAssembler::Jump nonIntJump;
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
@@ -308,14 +711,14 @@ MacroAssemblerCodeRef absThunkGenerator(JSGlobalData* globalData)
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
jit.returnDouble(SpecializedThunkJIT::fpRegT1);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "abs");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "abs");
}
-MacroAssemblerCodeRef powThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef powThunkGenerator(VM* vm)
{
SpecializedThunkJIT jit(2);
if (!jit.supportsFloatingPoint())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
@@ -360,7 +763,40 @@ MacroAssemblerCodeRef powThunkGenerator(JSGlobalData* globalData)
} else
jit.appendFailure(nonIntExponent);
- return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "pow");
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "pow");
+}
+
+MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
+{
+ SpecializedThunkJIT jit(2);
+ MacroAssembler::Jump nonIntArg0Jump;
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
+ SpecializedThunkJIT::Label doneLoadingArg0(&jit);
+ MacroAssembler::Jump nonIntArg1Jump;
+ jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
+ SpecializedThunkJIT::Label doneLoadingArg1(&jit);
+ jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+
+ if (jit.supportsFloatingPointTruncate()) {
+ nonIntArg0Jump.link(&jit);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
+ jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
+ jit.jump(doneLoadingArg0);
+ } else
+ jit.appendFailure(nonIntArg0Jump);
+
+ if (jit.supportsFloatingPointTruncate()) {
+ nonIntArg1Jump.link(&jit);
+ jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
+ jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
+ jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
+ jit.jump(doneLoadingArg1);
+ } else
+ jit.appendFailure(nonIntArg1Jump);
+
+ return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "imul");
}
}
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.h b/Source/JavaScriptCore/jit/ThunkGenerators.h
index b251f6be8..a4b0fc4b3 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.h
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,26 +26,34 @@
#ifndef ThunkGenerators_h
#define ThunkGenerators_h
+#include "ThunkGenerator.h"
+
#if ENABLE(JIT)
namespace JSC {
- class ExecutablePool;
- class JSGlobalData;
- class NativeExecutable;
- class MacroAssemblerCodeRef;
- typedef MacroAssemblerCodeRef (*ThunkGenerator)(JSGlobalData*);
- MacroAssemblerCodeRef charCodeAtThunkGenerator(JSGlobalData*);
- MacroAssemblerCodeRef charAtThunkGenerator(JSGlobalData*);
- MacroAssemblerCodeRef fromCharCodeThunkGenerator(JSGlobalData*);
- MacroAssemblerCodeRef absThunkGenerator(JSGlobalData*);
- MacroAssemblerCodeRef ceilThunkGenerator(JSGlobalData*);
- MacroAssemblerCodeRef expThunkGenerator(JSGlobalData*);
- MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData*);
- MacroAssemblerCodeRef logThunkGenerator(JSGlobalData*);
- MacroAssemblerCodeRef roundThunkGenerator(JSGlobalData*);
- MacroAssemblerCodeRef sqrtThunkGenerator(JSGlobalData*);
- MacroAssemblerCodeRef powThunkGenerator(JSGlobalData*);
+MacroAssemblerCodeRef linkCallGenerator(VM*);
+MacroAssemblerCodeRef linkConstructGenerator(VM*);
+MacroAssemblerCodeRef linkClosureCallGenerator(VM*);
+MacroAssemblerCodeRef virtualCallGenerator(VM*);
+MacroAssemblerCodeRef virtualConstructGenerator(VM*);
+MacroAssemblerCodeRef stringLengthTrampolineGenerator(VM*);
+MacroAssemblerCodeRef nativeCallGenerator(VM*);
+MacroAssemblerCodeRef nativeConstructGenerator(VM*);
+
+MacroAssemblerCodeRef charCodeAtThunkGenerator(VM*);
+MacroAssemblerCodeRef charAtThunkGenerator(VM*);
+MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM*);
+MacroAssemblerCodeRef absThunkGenerator(VM*);
+MacroAssemblerCodeRef ceilThunkGenerator(VM*);
+MacroAssemblerCodeRef expThunkGenerator(VM*);
+MacroAssemblerCodeRef floorThunkGenerator(VM*);
+MacroAssemblerCodeRef logThunkGenerator(VM*);
+MacroAssemblerCodeRef roundThunkGenerator(VM*);
+MacroAssemblerCodeRef sqrtThunkGenerator(VM*);
+MacroAssemblerCodeRef powThunkGenerator(VM*);
+MacroAssemblerCodeRef imulThunkGenerator(VM*);
+
}
-#endif
+#endif // ENABLE(JIT)
#endif // ThunkGenerator_h
diff --git a/Source/JavaScriptCore/jit/UnusedPointer.h b/Source/JavaScriptCore/jit/UnusedPointer.h
new file mode 100644
index 000000000..af41248d6
--- /dev/null
+++ b/Source/JavaScriptCore/jit/UnusedPointer.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UnusedPointer_h
+#define UnusedPointer_h
+
+namespace JSC {
+
+static const uintptr_t unusedPointer = 0xd1e7beef;
+
+} // namespace JSC
+
+using JSC::unusedPointer;
+
+#endif // UnusedPointer_h