summaryrefslogtreecommitdiff
path: root/src/3rdparty/v8/src
diff options
context:
space:
mode:
authorPeter Varga <pvarga@inf.u-szeged.hu>2012-02-24 13:51:58 +0100
committerQt by Nokia <qt-info@nokia.com>2012-03-01 12:03:31 +0100
commit6ea7510e6eb4d425c0b639cb3f95556324342f89 (patch)
tree057e88aa2ee48a2d96e19faf658ba714646df32f /src/3rdparty/v8/src
parent19f67820a0ca91f98d3a4a8f1cde53f0f5ccfdb5 (diff)
downloadqtjsbackend-6ea7510e6eb4d425c0b639cb3f95556324342f89.tar.gz
Updated V8 from git://github.com/v8/v8.git to 06e55bc22bcb8ddb0a602e54e11971576f2d9d8a
* Implement VirtualMemory on FreeBSD to fix build (issue 1807). * Fix error handling in Date.prototype.toISOString (issue 1792). * Crankshaft for MIPS * Bug fixes (mostly for MIPS) * Use placement-new for zone-allocation in the Lithium classes. Change-Id: I70ceacc4f7010cec58f73eb1826cbc06dd31149e Reviewed-by: Simon Hausmann <simon.hausmann@nokia.com>
Diffstat (limited to 'src/3rdparty/v8/src')
-rwxr-xr-xsrc/3rdparty/v8/src/SConscript3
-rw-r--r--src/3rdparty/v8/src/api.cc234
-rw-r--r--src/3rdparty/v8/src/api.h3
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.cc134
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.h11
-rw-r--r--src/3rdparty/v8/src/arm/full-codegen-arm.cc292
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.cc61
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.h22
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.cc9
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.cc67
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.h37
-rw-r--r--src/3rdparty/v8/src/arm/stub-cache-arm.cc1134
-rw-r--r--src/3rdparty/v8/src/ast-inl.h5
-rw-r--r--src/3rdparty/v8/src/ast.cc22
-rw-r--r--src/3rdparty/v8/src/ast.h69
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_mips_gcc.h48
-rw-r--r--src/3rdparty/v8/src/code-stubs.h21
-rw-r--r--src/3rdparty/v8/src/compiler.cc17
-rw-r--r--src/3rdparty/v8/src/compiler.h16
-rw-r--r--src/3rdparty/v8/src/contexts.cc35
-rw-r--r--src/3rdparty/v8/src/contexts.h4
-rw-r--r--src/3rdparty/v8/src/date.js14
-rw-r--r--src/3rdparty/v8/src/debug-debugger.js3
-rw-r--r--src/3rdparty/v8/src/debug.cc32
-rw-r--r--src/3rdparty/v8/src/deoptimizer.h15
-rw-r--r--src/3rdparty/v8/src/execution.cc31
-rw-r--r--src/3rdparty/v8/src/execution.h8
-rw-r--r--src/3rdparty/v8/src/factory.cc22
-rw-r--r--src/3rdparty/v8/src/flag-definitions.h11
-rw-r--r--src/3rdparty/v8/src/full-codegen.cc90
-rw-r--r--src/3rdparty/v8/src/full-codegen.h117
-rw-r--r--src/3rdparty/v8/src/globals.h6
-rw-r--r--src/3rdparty/v8/src/handles.cc18
-rw-r--r--src/3rdparty/v8/src/handles.h3
-rw-r--r--src/3rdparty/v8/src/hashmap.cc6
-rw-r--r--src/3rdparty/v8/src/hashmap.h5
-rw-r--r--src/3rdparty/v8/src/heap-inl.h65
-rw-r--r--src/3rdparty/v8/src/heap.cc46
-rw-r--r--src/3rdparty/v8/src/heap.h17
-rw-r--r--src/3rdparty/v8/src/hydrogen-instructions.cc89
-rw-r--r--src/3rdparty/v8/src/hydrogen-instructions.h101
-rw-r--r--src/3rdparty/v8/src/hydrogen.cc98
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.cc20
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.h2
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.cc205
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.h10
-rw-r--r--src/3rdparty/v8/src/ia32/full-codegen-ia32.cc460
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc9
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.cc439
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.h32
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc96
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.h51
-rw-r--r--src/3rdparty/v8/src/ia32/stub-cache-ia32.cc1092
-rw-r--r--src/3rdparty/v8/src/incremental-marking-inl.h62
-rw-r--r--src/3rdparty/v8/src/incremental-marking.cc40
-rw-r--r--src/3rdparty/v8/src/incremental-marking.h21
-rw-r--r--src/3rdparty/v8/src/isolate-inl.h4
-rw-r--r--src/3rdparty/v8/src/isolate.cc7
-rw-r--r--src/3rdparty/v8/src/isolate.h14
-rw-r--r--src/3rdparty/v8/src/macros.py2
-rw-r--r--src/3rdparty/v8/src/mark-compact.cc13
-rw-r--r--src/3rdparty/v8/src/messages.cc2
-rw-r--r--src/3rdparty/v8/src/messages.js1
-rw-r--r--src/3rdparty/v8/src/mips/builtins-mips.cc85
-rw-r--r--src/3rdparty/v8/src/mips/code-stubs-mips.cc159
-rw-r--r--src/3rdparty/v8/src/mips/code-stubs-mips.h11
-rw-r--r--src/3rdparty/v8/src/mips/deoptimizer-mips.cc735
-rw-r--r--src/3rdparty/v8/src/mips/full-codegen-mips.cc286
-rw-r--r--src/3rdparty/v8/src/mips/lithium-codegen-mips.cc4628
-rw-r--r--src/3rdparty/v8/src/mips/lithium-codegen-mips.h383
-rw-r--r--src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc309
-rw-r--r--src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.h83
-rw-r--r--src/3rdparty/v8/src/mips/lithium-mips.cc2203
-rw-r--r--src/3rdparty/v8/src/mips/lithium-mips.h2217
-rw-r--r--src/3rdparty/v8/src/mips/macro-assembler-mips.cc64
-rw-r--r--src/3rdparty/v8/src/mips/macro-assembler-mips.h42
-rw-r--r--src/3rdparty/v8/src/mips/stub-cache-mips.cc1112
-rw-r--r--src/3rdparty/v8/src/objects-inl.h88
-rw-r--r--src/3rdparty/v8/src/objects-printer.cc2
-rw-r--r--src/3rdparty/v8/src/objects.cc192
-rw-r--r--src/3rdparty/v8/src/objects.h72
-rw-r--r--src/3rdparty/v8/src/parser.cc47
-rw-r--r--src/3rdparty/v8/src/parser.h10
-rw-r--r--src/3rdparty/v8/src/platform-linux.cc4
-rw-r--r--src/3rdparty/v8/src/preparser-api.cc2
-rw-r--r--src/3rdparty/v8/src/preparser.cc4
-rw-r--r--src/3rdparty/v8/src/preparser.h6
-rw-r--r--src/3rdparty/v8/src/prettyprinter.cc3
-rw-r--r--src/3rdparty/v8/src/runtime.cc461
-rw-r--r--src/3rdparty/v8/src/runtime.h11
-rwxr-xr-x[-rw-r--r--]src/3rdparty/v8/src/scanner.cc93
-rw-r--r--src/3rdparty/v8/src/scanner.h249
-rw-r--r--src/3rdparty/v8/src/scopeinfo.cc28
-rw-r--r--src/3rdparty/v8/src/scopeinfo.h1
-rw-r--r--src/3rdparty/v8/src/scopes.cc81
-rw-r--r--src/3rdparty/v8/src/scopes.h14
-rw-r--r--src/3rdparty/v8/src/stub-cache.cc354
-rw-r--r--src/3rdparty/v8/src/stub-cache.h215
-rw-r--r--src/3rdparty/v8/src/v8natives.js15
-rw-r--r--src/3rdparty/v8/src/variables.cc3
-rw-r--r--src/3rdparty/v8/src/variables.h8
-rw-r--r--src/3rdparty/v8/src/version.cc4
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.cc114
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.h10
-rw-r--r--src/3rdparty/v8/src/x64/full-codegen-x64.cc284
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.cc9
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.cc61
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.h22
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.cc102
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.h57
-rw-r--r--src/3rdparty/v8/src/x64/stub-cache-x64.cc1095
111 files changed, 14021 insertions, 7640 deletions
diff --git a/src/3rdparty/v8/src/SConscript b/src/3rdparty/v8/src/SConscript
index be4a8f0..42de36b 100755
--- a/src/3rdparty/v8/src/SConscript
+++ b/src/3rdparty/v8/src/SConscript
@@ -172,6 +172,9 @@ SOURCES = {
mips/frames-mips.cc
mips/full-codegen-mips.cc
mips/ic-mips.cc
+ mips/lithium-codegen-mips.cc
+ mips/lithium-gap-resolver-mips.cc
+ mips/lithium-mips.cc
mips/macro-assembler-mips.cc
mips/regexp-macro-assembler-mips.cc
mips/stub-cache-mips.cc
diff --git a/src/3rdparty/v8/src/api.cc b/src/3rdparty/v8/src/api.cc
index a16ef65..40a89e2 100644
--- a/src/3rdparty/v8/src/api.cc
+++ b/src/3rdparty/v8/src/api.cc
@@ -1123,7 +1123,6 @@ void FunctionTemplate::SetNamedInstancePropertyHandler(
NamedPropertyQuery query,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
- bool is_fallback,
Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate,
@@ -1142,7 +1141,6 @@ void FunctionTemplate::SetNamedInstancePropertyHandler(
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
- obj->set_is_fallback(i::Smi::FromInt(is_fallback));
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
@@ -1287,33 +1285,6 @@ void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
query,
remover,
enumerator,
- false,
- data);
-}
-
-
-void ObjectTemplate::SetFallbackPropertyHandler(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- true,
data);
}
@@ -1436,45 +1407,6 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
}
-bool ObjectTemplate::HasExternalResource()
-{
- if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(),
- "v8::ObjectTemplate::HasExternalResource()")) {
- return 0;
- }
- return !Utils::OpenHandle(this)->has_external_resource()->IsUndefined();
-}
-
-
-void ObjectTemplate::SetHasExternalResource(bool value)
-{
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetHasExternalResource()")) {
- return;
- }
- ENTER_V8(isolate);
- if (value) {
- EnsureConstructor(this);
- }
- if (value) {
- Utils::OpenHandle(this)->set_has_external_resource(i::Smi::FromInt(1));
- } else {
- Utils::OpenHandle(this)->set_has_external_resource(Utils::OpenHandle(this)->GetHeap()->undefined_value());
- }
-}
-
-
-void ObjectTemplate::MarkAsUseUserObjectComparison()
-{
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUseUserObjectComparison()")) {
- return;
- }
- ENTER_V8(isolate);
- EnsureConstructor(this);
- Utils::OpenHandle(this)->set_use_user_object_comparison(i::Smi::FromInt(1));
-}
-
// --- S c r i p t D a t a ---
@@ -1525,8 +1457,7 @@ ScriptData* ScriptData::New(const char* data, int length) {
Local<Script> Script::New(v8::Handle<String> source,
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
- v8::Handle<String> script_data,
- v8::Script::CompileFlags compile_flags) {
+ v8::Handle<String> script_data) {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
LOG_API(isolate, "Script::New");
@@ -1563,8 +1494,7 @@ Local<Script> Script::New(v8::Handle<String> source,
NULL,
pre_data_impl,
Utils::OpenHandle(*script_data),
- i::NOT_NATIVES_CODE,
- compile_flags);
+ i::NOT_NATIVES_CODE);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
return Local<Script>(ToApi<Script>(result));
@@ -1572,23 +1502,21 @@ Local<Script> Script::New(v8::Handle<String> source,
Local<Script> Script::New(v8::Handle<String> source,
- v8::Handle<Value> file_name,
- v8::Script::CompileFlags compile_flags) {
+ v8::Handle<Value> file_name) {
ScriptOrigin origin(file_name);
- return New(source, &origin, 0, Handle<String>(), compile_flags);
+ return New(source, &origin);
}
Local<Script> Script::Compile(v8::Handle<String> source,
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
- v8::Handle<String> script_data,
- v8::Script::CompileFlags compile_flags) {
+ v8::Handle<String> script_data) {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
LOG_API(isolate, "Script::Compile");
ENTER_V8(isolate);
- Local<Script> generic = New(source, origin, pre_data, script_data, compile_flags);
+ Local<Script> generic = New(source, origin, pre_data, script_data);
if (generic.IsEmpty())
return generic;
i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
@@ -1604,18 +1532,13 @@ Local<Script> Script::Compile(v8::Handle<String> source,
Local<Script> Script::Compile(v8::Handle<String> source,
v8::Handle<Value> file_name,
- v8::Handle<String> script_data,
- v8::Script::CompileFlags compile_flags) {
+ v8::Handle<String> script_data) {
ScriptOrigin origin(file_name);
- return Compile(source, &origin, 0, script_data, compile_flags);
+ return Compile(source, &origin, 0, script_data);
}
Local<Value> Script::Run() {
- return Run(Handle<Object>());
-}
-
-Local<Value> Script::Run(Handle<Object> qml) {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
LOG_API(isolate, "Script::Run");
@@ -1634,11 +1557,10 @@ Local<Value> Script::Run(Handle<Object> qml) {
fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> qmlglobal = Utils::OpenHandle(*qml);
i::Handle<i::Object> receiver(
isolate->context()->global_proxy(), isolate);
i::Handle<i::Object> result =
- i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception, false, qmlglobal);
+ i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
raw_result = *result;
}
@@ -2316,12 +2238,6 @@ bool Value::IsRegExp() const {
return obj->IsJSRegExp();
}
-bool Value::IsError() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsError()")) return false;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(HEAP->Error_symbol());
-}
-
Local<String> Value::ToString() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
@@ -3717,57 +3633,6 @@ int String::Utf8Length() const {
}
-uint32_t String::Hash() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Hash()")) return 0;
- return str->Hash();
-}
-
-
-String::CompleteHashData String::CompleteHash() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::CompleteHash()")) return CompleteHashData();
- CompleteHashData result;
- result.length = str->length();
- result.hash = str->Hash();
- if (str->IsSeqString())
- result.symbol_id = i::SeqString::cast(*str)->symbol_id();
- return result;
-}
-
-
-uint32_t String::ComputeHash(uint16_t *string, int length) {
- return i::HashSequentialString<i::uc16>(string, length) >> i::String::kHashShift;
-}
-
-
-uint32_t String::ComputeHash(char *string, int length) {
- return i::HashSequentialString<char>(string, length) >> i::String::kHashShift;
-}
-
-
-uint16_t String::GetCharacter(int index)
-{
- i::Handle<i::String> str = Utils::OpenHandle(this);
- return str->Get(index);
-}
-
-
-bool String::Equals(uint16_t *string, int length) {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Equals()")) return 0;
- return str->SlowEqualsExternal(string, length);
-}
-
-
-bool String::Equals(char *string, int length)
-{
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Equals()")) return 0;
- return str->SlowEqualsExternal(string, length);
-}
-
-
int String::WriteUtf8(char* buffer,
int capacity,
int* nchars_ref,
@@ -4084,34 +3949,6 @@ void v8::Object::SetPointerInInternalField(int index, void* value) {
}
-void v8::Object::SetExternalResource(v8::Object::ExternalResource *resource) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (CanBeEncodedAsSmi(resource)) {
- obj->SetExternalResourceObject(EncodeAsSmi(resource));
- } else {
- obj->SetExternalResourceObject(*isolate->factory()->NewForeign(static_cast<i::Address>((void *)resource)));
- }
- if (!obj->IsSymbol()) {
- isolate->heap()->external_string_table()->AddObject(*obj);
- }
-}
-
-
-v8::Object::ExternalResource *v8::Object::GetExternalResource() {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Object* value = obj->GetExternalResourceObject();
- if (value->IsSmi()) {
- return reinterpret_cast<v8::Object::ExternalResource*>(i::Internals::GetExternalPointerFromSmi(value));
- } else if (value->IsForeign()) {
- return reinterpret_cast<v8::Object::ExternalResource*>(i::Foreign::cast(value)->address());
- } else {
- return NULL;
- }
-}
-
-
// --- E n v i r o n m e n t ---
@@ -4364,42 +4201,6 @@ v8::Local<v8::Context> Context::GetCalling() {
}
-v8::Local<v8::Object> Context::GetCallingQmlGlobal() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCallingQmlGlobal()")) {
- return Local<Object>();
- }
-
- i::Context *context = isolate->context();
- if (!context->qml_global()->IsUndefined()) {
- i::Handle<i::Object> qmlglobal(context->qml_global());
- return Utils::ToLocal(i::Handle<i::JSObject>::cast(qmlglobal));
- }
-
- i::JavaScriptFrameIterator it;
- if (it.done()) return Local<Object>();
- context = i::Context::cast(it.frame()->context());
- if (!context->qml_global()->IsUndefined()) {
- i::Handle<i::Object> qmlglobal(context->qml_global());
- return Utils::ToLocal(i::Handle<i::JSObject>::cast(qmlglobal));
- } else {
- return Local<Object>();
- }
-}
-
-v8::Local<v8::Value> Context::GetCallingScriptData()
-{
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCallingScriptData()")) {
- return Local<Object>();
- }
-
- i::JavaScriptFrameIterator it;
- if (it.done()) return Local<Object>();
- i::Handle<i::Script> script(i::Script::cast(i::JSFunction::cast(it.frame()->function())->shared()->script()));
- return Utils::ToLocal(i::Handle<i::Object>(script->data()));
-}
-
v8::Local<v8::Object> Context::Global() {
if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) {
return Local<v8::Object>();
@@ -4498,7 +4299,7 @@ static Local<External> ExternalNewImpl(void* data) {
}
static void* ExternalValueImpl(i::Handle<i::Object> obj) {
- return reinterpret_cast<void*>(i::Foreign::cast(*obj)->address());
+ return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
}
@@ -4524,7 +4325,7 @@ void* v8::Object::SlowGetPointerFromInternalField(int index) {
if (value->IsSmi()) {
return i::Internals::GetExternalPointerFromSmi(value);
} else if (value->IsForeign()) {
- return reinterpret_cast<void*>(i::Foreign::cast(value)->address());
+ return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
} else {
return NULL;
}
@@ -5077,7 +4878,7 @@ void V8::RemoveMessageListeners(MessageCallback that) {
NeanderObject listener(i::JSObject::cast(listeners.get(i)));
i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
- if (callback_obj->address() == FUNCTION_ADDR(that)) {
+ if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
listeners.set(i, isolate->heap()->undefined_value());
}
}
@@ -5130,17 +4931,6 @@ void V8::SetFailedAccessCheckCallbackFunction(
isolate->SetFailedAccessCheckCallback(callback);
}
-
-void V8::SetUserObjectComparisonCallbackFunction(
- UserObjectComparisonCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetUserObjectComparisonCallbackFunction()")) {
- return;
- }
- isolate->SetUserObjectComparisonCallback(callback);
-}
-
-
void V8::AddObjectGroup(Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info) {
diff --git a/src/3rdparty/v8/src/api.h b/src/3rdparty/v8/src/api.h
index 07723cb..f41c96e 100644
--- a/src/3rdparty/v8/src/api.h
+++ b/src/3rdparty/v8/src/api.h
@@ -115,7 +115,8 @@ void NeanderObject::set(int offset, v8::internal::Object* value) {
template <typename T> static inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return reinterpret_cast<T>(
- reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address()));
+ reinterpret_cast<intptr_t>(
+ v8::internal::Foreign::cast(obj)->foreign_address()));
}
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.cc b/src/3rdparty/v8/src/arm/code-stubs-arm.cc
index 1e1aebd..7a541f5 100644
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.cc
+++ b/src/3rdparty/v8/src/arm/code-stubs-arm.cc
@@ -172,10 +172,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Copy the qml global object from the surrounding context.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
-
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
@@ -1569,37 +1565,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
- {
- Label not_user_equal, user_equal;
- __ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &not_user_equal);
-
- __ CompareObjectType(r0, r2, r4, JS_OBJECT_TYPE);
- __ b(ne, &not_user_equal);
-
- __ CompareObjectType(r1, r3, r4, JS_OBJECT_TYPE);
- __ b(ne, &not_user_equal);
-
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &user_equal);
-
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitField2Offset));
- __ and_(r3, r3, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r3, Operand(1 << Map::kUseUserObjectComparison));
- __ b(ne, &not_user_equal);
-
- __ bind(&user_equal);
-
- __ Push(r0, r1);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- __ bind(&not_user_equal);
- }
-
-
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
@@ -5178,7 +5143,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
@@ -5254,7 +5220,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -5279,7 +5246,8 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -6646,18 +6614,10 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
- __ CompareObjectType(r0, r2, r3, JS_OBJECT_TYPE);
+ __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &miss);
- __ CompareObjectType(r1, r2, r3, JS_OBJECT_TYPE);
+ __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &miss);
ASSERT(GetCondition() == eq);
__ sub(r0, r0, Operand(r1));
@@ -6793,84 +6753,6 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0) {
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
- __ sub(index, index, Operand(1));
- __ and_(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- Register tmp = properties;
- __ add(tmp, properties, Operand(index, LSL, 1));
- __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- ASSERT(!tmp.is(entity_name));
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ cmp(entity_name, tmp);
- __ b(eq, done);
-
- if (i != kInlinedProbes - 1) {
- // Stop if found the property.
- __ cmp(entity_name, Operand(Handle<String>(name)));
- __ b(eq, miss);
-
- // Check if the entry name is not a symbol.
- __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ tst(entity_name, Operand(kIsSymbolMask));
- __ b(eq, miss);
-
- // Restore the properties.
- __ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- }
- }
-
- const int spill_mask =
- (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
- r2.bit() | r1.bit() | r0.bit());
-
- __ stm(db_w, sp, spill_mask);
- __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ mov(r1, Operand(Handle<String>(name)));
- StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
- __ tst(r0, Operand(r0));
- __ ldm(ia_w, sp, spill_mask);
-
- __ b(eq, done);
- __ b(ne, miss);
- return result;
-}
-
-
// Probe the string dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found. Jump to
// the |miss| label otherwise.
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.h b/src/3rdparty/v8/src/arm/code-stubs-arm.h
index 647fc8d..b846864 100644
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.h
+++ b/src/3rdparty/v8/src/arm/code-stubs-arm.h
@@ -807,17 +807,6 @@ class StringDictionaryLookupStub: public CodeStub {
Handle<String> name,
Register scratch0);
- // TODO(kmillikin): Eliminate this function when the stub cache is fully
- // handlified.
- MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0);
-
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
diff --git a/src/3rdparty/v8/src/arm/full-codegen-arm.cc b/src/3rdparty/v8/src/arm/full-codegen-arm.cc
index b8e3f30..d0c663b 100644
--- a/src/3rdparty/v8/src/arm/full-codegen-arm.cc
+++ b/src/3rdparty/v8/src/arm/full-codegen-arm.cc
@@ -47,11 +47,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-static unsigned GetPropertyId(Property* property) {
- return property->id();
-}
-
-
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
@@ -182,13 +177,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
__ push(r1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -400,7 +394,7 @@ void FullCodeGenerator::TestContext::Plug(Variable* var) const {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -423,7 +417,7 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -458,7 +452,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -517,7 +511,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -584,7 +578,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -687,7 +681,7 @@ void FullCodeGenerator::SetVar(Variable* var,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
@@ -698,13 +692,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
Label skip;
if (should_normalize) __ b(&skip);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
+ PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
@@ -1194,9 +1182,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ bind(&fast);
}
- __ ldr(r0, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF || var->is_qml_global())
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
@@ -1281,10 +1269,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
- __ ldr(r0, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, var->is_qml_global()?RelocInfo::CODE_TARGET:RelocInfo::CODE_TARGET_CONTEXT);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(r0);
break;
}
@@ -1735,7 +1723,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1743,7 +1731,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1921,7 +1909,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(r2, Operand(var->name()));
- __ ldr(r1, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ ldr(r1, GlobalObjectOperand());
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
@@ -2201,8 +2189,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
@@ -2221,13 +2208,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
__ mov(r1, Operand(Smi::FromInt(strict_mode)));
__ push(r1);
- // Push the qml mode flag.
- __ mov(r1, Operand(Smi::FromInt(is_qml_mode())));
- __ push(r1);
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
}
@@ -2261,28 +2243,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i));
}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
- // Push the function and resolve eval.
- __ push(r0);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
// Push a copy of the function (found below the arguments) and
// resolve eval.
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(r1);
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- __ bind(&done);
+ EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in r0 (function) and
// r1 (receiver). Touch up the stack with the right values.
@@ -2300,9 +2265,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
context()->DropAndPlug(1, r0);
} else if (proxy != NULL && proxy->var()->IsUnallocated()) {
// Push global object as receiver for the call IC.
- __ ldr(r0, proxy->var()->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ ldr(r0, GlobalObjectOperand());
__ push(r0);
- EmitCallWithIC(expr, proxy->name(), proxy->var()->is_qml_global()?RelocInfo::CODE_TARGET:RelocInfo::CODE_TARGET_CONTEXT);
+ EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
// Call to a lookup slot (dynamically introduced variable).
Label slow, done;
@@ -2406,7 +2371,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2418,7 +2384,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ tst(r0, Operand(kSmiTagMask));
Split(eq, if_true, if_false, fall_through);
@@ -2426,7 +2392,8 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2438,7 +2405,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ tst(r0, Operand(kSmiTagMask | 0x80000000));
Split(eq, if_true, if_false, fall_through);
@@ -2446,7 +2413,8 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2471,14 +2439,15 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(lt, if_false);
__ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(le, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2492,14 +2461,15 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2515,7 +2485,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ne, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2523,8 +2493,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
-
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2603,12 +2573,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
__ jmp(if_true);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2622,14 +2593,15 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2643,14 +2615,15 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2664,7 +2637,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2672,8 +2645,8 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2696,14 +2669,15 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2719,14 +2693,15 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(r1);
__ cmp(r0, r1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in edx and the formal
@@ -2740,9 +2715,8 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2762,7 +2736,8 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
@@ -2822,7 +2797,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
@@ -2830,6 +2805,7 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
@@ -2843,9 +2819,8 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
@@ -2897,9 +2872,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2909,9 +2885,10 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2922,9 +2899,9 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0)); // Load the object.
Label done;
@@ -2940,8 +2917,9 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2951,9 +2929,9 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
-
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(r1); // r0 = value. r1 = object.
@@ -2979,9 +2957,9 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
-
// Load the argument on the stack and call the stub.
VisitForStackValue(args->at(0));
@@ -2991,9 +2969,9 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0));
Label done;
@@ -3009,9 +2987,9 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
-
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
@@ -3056,9 +3034,9 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
-
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
@@ -3105,9 +3083,9 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3117,9 +3095,9 @@ void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3129,10 +3107,11 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3140,10 +3119,11 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3151,10 +3131,11 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3162,8 +3143,9 @@ void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3171,7 +3153,8 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
@@ -3190,8 +3173,9 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3201,7 +3185,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3298,9 +3283,9 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
ASSERT_NE(NULL, args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
@@ -3349,7 +3334,8 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
Register right = r0;
@@ -3389,7 +3375,8 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
@@ -3401,14 +3388,15 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3423,12 +3411,12 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
-
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(0));
@@ -3715,7 +3703,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// but "delete this" is allowed.
ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->IsUnallocated()) {
- __ ldr(r2, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
__ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
__ Push(r2, r1, r0);
@@ -3756,18 +3744,35 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
} else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(r0);
+ __ jmp(&done);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(r0);
+ __ bind(&done);
}
break;
}
@@ -4001,7 +4006,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
- __ ldr(r0, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(proxy->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
@@ -4026,12 +4031,13 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(r0);
} else {
// This expression cannot throw a reference error at the top level.
- VisitInCurrentContext(expr);
+ VisitInDuplicateContext(expr);
}
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
Handle<String> check) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -4041,9 +4047,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
&if_true, &if_false, &fall_through);
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(expr);
+ VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(r0, if_true);
@@ -4130,7 +4136,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
@@ -4140,7 +4146,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
__ tst(r0, r0);
Split(eq, if_true, if_false, fall_through);
@@ -4190,7 +4196,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Handle<Code> ic = CompareIC::GetUninitialized(op);
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
}
@@ -4213,7 +4219,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.cc b/src/3rdparty/v8/src/arm/lithium-arm.cc
index 943bc82..ffb7457 100644
--- a/src/3rdparty/v8/src/arm/lithium-arm.cc
+++ b/src/3rdparty/v8/src/arm/lithium-arm.cc
@@ -750,7 +750,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasSideEffects()) {
+ if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -762,7 +762,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
// Thus we still need to attach environment to this call even if
// call sequence can not deoptimize eagerly.
bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
@@ -820,28 +821,6 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
}
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineAsRegister(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-}
-
-
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
@@ -1128,7 +1107,7 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalObject(context, instr->qml_global()));
+ return DefineAsRegister(new LGlobalObject(context));
}
@@ -1198,7 +1177,7 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallGlobal(instr->qml_global()), r0), instr);
+ return MarkAsCall(DefineFixed(new LCallGlobal, r0), instr);
}
@@ -1243,8 +1222,24 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
}
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineAsRegister(new LBitI(left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
+ LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+ }
}
@@ -1255,16 +1250,6 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
}
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.h b/src/3rdparty/v8/src/arm/lithium-arm.h
index a7e1704..1452e53 100644
--- a/src/3rdparty/v8/src/arm/lithium-arm.h
+++ b/src/3rdparty/v8/src/arm/lithium-arm.h
@@ -796,18 +796,15 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
+ LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- Token::Value op() const { return op_; }
+ Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
};
@@ -1303,17 +1300,13 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalObject(LOperand* context, bool qml_global) {
+ explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
- qml_global_ = qml_global;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
LOperand* context() { return InputAt(0); }
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
};
@@ -1400,16 +1393,10 @@ class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {}
-
virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
};
@@ -2202,7 +2189,6 @@ class LChunkBuilder BASE_EMBEDDED {
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc b/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
index 2e1e6fa..4cf7df4 100644
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
+++ b/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
@@ -190,13 +190,12 @@ bool LCodeGen::GeneratePrologue() {
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in r1.
__ push(r1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -2827,7 +2826,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, instr->qml_global()?Context::QML_GLOBAL_INDEX:Context::GLOBAL_INDEX));
+ __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
}
@@ -3281,7 +3280,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- RelocInfo::Mode mode = instr->qml_global()?RelocInfo::CODE_TARGET:RelocInfo::CODE_TARGET_CONTEXT;
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
index cf4258c..90bad75 100644
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
+++ b/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
@@ -1094,7 +1094,7 @@ void MacroAssembler::InvokeFunction(Register fun,
}
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
@@ -1102,7 +1102,7 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- mov(r1, Operand(Handle<JSFunction>(function)));
+ mov(r1, Operand(function));
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -2084,41 +2084,19 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
}
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Handle<Code> code(Code::cast(result));
- Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
- return result;
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+ int stack_space) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
const int kNextOffset = 0;
@@ -2181,14 +2159,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
mov(pc, lr);
bind(&promote_scheduled_exception);
- MaybeObject* result
- = TryTailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
- if (result->IsFailure()) {
- return result;
- }
+ TailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0,
+ 1);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -2200,8 +2174,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
ExternalReference::delete_handle_scope_extensions(isolate()), 1);
mov(r0, r4);
jmp(&leave_exit_frame);
-
- return result;
}
@@ -2633,17 +2605,6 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- return TryJumpToExternalReference(ext);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -2664,18 +2625,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& builtin) {
-#if defined(__thumb__)
- // Thumb mode builtin.
- ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
-#endif
- mov(r1, Operand(builtin));
- CEntryStub stub(1);
- return TryTailCallStub(&stub);
-}
-
-
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.h b/src/3rdparty/v8/src/arm/macro-assembler-arm.h
index 5947e6a..8f6803e 100644
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.h
+++ b/src/3rdparty/v8/src/arm/macro-assembler-arm.h
@@ -520,7 +520,7 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- void InvokeFunction(JSFunction* function,
+ void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind);
@@ -926,20 +926,9 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = al);
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
-
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
- Condition cond = al);
-
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -958,12 +947,6 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
- // Tail call of a runtime routine (jump). Try to generate the code if
- // necessary. Do not perform a GC but instead return a retry after GC
- // failure.
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
@@ -1012,18 +995,15 @@ class MacroAssembler: public Assembler {
void GetCFunctionDoubleResult(const DoubleRegister dst);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context.
- // stack_space - space to be unwound on exit (includes the call js
- // arguments space and the additional space allocated for the fast call).
- MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
- int stack_space);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context. stack_space
+ // - space to be unwound on exit (includes the call js arguments space and
+ // the additional space allocated for the fast call).
+ void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
@@ -1326,11 +1306,6 @@ static inline MemOperand GlobalObjectOperand() {
}
-static inline MemOperand QmlGlobalObjectOperand() {
- return ContextOperand(cp, Context::QML_GLOBAL_INDEX);
-}
-
-
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
diff --git a/src/3rdparty/v8/src/arm/stub-cache-arm.cc b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
index f9a10c4..bf6f085 100644
--- a/src/3rdparty/v8/src/arm/stub-cache-arm.cc
+++ b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
@@ -149,68 +149,6 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateDictionaryNegativeLookup(
- MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register scratch0,
- Register scratch1) {
- ASSERT(name->IsSymbol());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ b(ne, miss_label);
-
- // Check that receiver is a JSObject.
- __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ cmp(map, tmp);
- __ b(ne, miss_label);
-
- // Restore the temporarily used register.
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-
- MaybeObject* result = StringDictionaryLookupStub::TryGenerateNegativeLookup(
- masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- if (result->IsFailure()) return result;
-
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- return result;
-}
-
-
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@@ -294,7 +232,10 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -302,8 +243,8 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
__ cmp(prototype, ip);
__ b(ne, miss);
// Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(isolate->global_context()->get(index));
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->global_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -566,12 +507,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
- JSObject* holder_obj) {
+ Handle<JSObject> holder_obj) {
__ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
- __ mov(scratch, Operand(Handle<Object>(interceptor)));
+ __ mov(scratch, Operand(interceptor));
__ push(scratch);
__ push(receiver);
__ push(holder);
@@ -580,11 +521,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
}
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
@@ -597,6 +539,7 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
__ CallStub(&stub);
}
+
static const int kFastApiCallArguments = 3;
// Reserves space for the extra arguments to FastHandleApiCall in the
@@ -618,10 +561,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
-static MaybeObject* GenerateFastApiDirectCall(
- MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee js function
@@ -632,18 +574,18 @@ static MaybeObject* GenerateFastApiDirectCall(
// -- sp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ mov(r5, Operand(Handle<JSFunction>(function)));
+ Handle<JSFunction> function = optimization.constant_function();
+ __ mov(r5, Operand(function));
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ Move(r0, api_call_info_handle);
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data());
+ if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+ __ Move(r0, api_call_info);
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
} else {
- __ Move(r6, Handle<Object>(call_data));
+ __ Move(r6, call_data);
}
// Store js function and call data.
__ stm(ib, sp, r5.bit() | r6.bit());
@@ -652,10 +594,6 @@ static MaybeObject* GenerateFastApiDirectCall(
// (refer to layout above).
__ add(r2, sp, Operand(2 * kPointerSize));
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
const int kApiStackSpace = 4;
FrameScope frame_scope(masm, StackFrame::MANUAL);
@@ -676,16 +614,15 @@ static MaybeObject* GenerateFastApiDirectCall(
__ mov(ip, Operand(0));
__ str(ip, MemOperand(r0, 3 * kPointerSize));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+
+ __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
@@ -700,86 +637,63 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name_(name),
extra_ic_state_(extra_ic_state) {}
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ void Compile(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
-
CallOptimization optimization(lookup);
-
if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+ holder, lookup, name, optimization, miss);
} else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value();
+ CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+ name, holder, miss);
}
}
private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ void CompileCacheable(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
-
Counters* counters = masm->isolate()->counters();
-
int depth1 = kInvalidProtoDepth;
int depth2 = kInvalidProtoDepth;
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
- }
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
+ !lookup->holder()->IsGlobalObject()) {
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(
+ object, interceptor_holder);
+ if (depth1 == kInvalidProtoDepth) {
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(
+ interceptor_holder, Handle<JSObject>(lookup->holder()));
+ }
+ can_do_fast_api_call =
+ depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
__ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
+ scratch1, scratch2);
if (can_do_fast_api_call) {
__ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -792,9 +706,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -807,10 +721,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
+ Handle<JSObject>(lookup->holder()),
+ scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -821,10 +736,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- MaybeObject* result = GenerateFastApiDirectCall(masm,
- optimization,
- arguments_.immediate());
- if (result->IsFailure()) return result;
+ GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -845,66 +757,53 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm);
}
-
- return masm->isolate()->heap()->undefined_value();
}
void CompileRegular(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
- JSObject* interceptor_holder,
+ Handle<String> name,
+ Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
+ scratch1, scratch2, scratch3,
+ name, miss_label);
// Call a runtime function to load the interceptor property.
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
-
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
+ PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
5);
-
// Restore the name_ register.
__ pop(name_);
-
// Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- JSObject* holder_obj,
+ Handle<JSObject> holder_obj,
Register scratch,
Label* interceptor_succeeded) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(holder, name_);
-
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
holder_obj);
-
__ pop(name_); // Restore the name.
__ pop(receiver); // Restore the holder.
}
-
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch);
@@ -938,30 +837,6 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
- ASSERT(cell->value()->IsTheHole());
- __ mov(scratch, Operand(Handle<Object>(cell)));
- __ ldr(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ b(ne, miss);
- return cell;
-}
-
-
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
static void GenerateCheckPropertyCells(MacroAssembler* masm,
@@ -984,34 +859,6 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCells(
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- Register scratch,
- Label* miss) {
- JSObject* current = object;
- while (current != holder) {
- if (current->IsGlobalObject()) {
- // Returns a cell or a failure.
- MaybeObject* result = TryGenerateCheckPropertyCell(
- masm,
- GlobalObject::cast(current),
- name,
- scratch,
- miss);
- if (result->IsFailure()) return result;
- }
- ASSERT(current->IsJSObject());
- current = JSObject::cast(current->GetPrototype());
- }
- return NULL;
-}
-
-
// Convert and store int passed in register ival to IEEE 754 single precision
// floating point value at memory location (dst + 4 * wordoffset)
// If VFP3 is available use it for conversion.
@@ -1226,150 +1073,6 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-Register StubCompiler::CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- JSObject* current = object;
- while (current != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- ASSERT(current->GetPrototype()->IsJSObject());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
- Object* lookup_result = NULL; // Initialization to please compiler.
- if (!maybe_lookup_result->ToObject(&lookup_result)) {
- set_failure(Failure::cast(maybe_lookup_result));
- return reg;
- }
- name = String::cast(lookup_result);
- }
- ASSERT(current->property_dictionary()->FindEntry(name) ==
- StringDictionary::kNotFound);
-
- MaybeObject* negative_lookup =
- TryGenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- if (negative_lookup->IsFailure()) {
- set_failure(Failure::cast(negative_lookup));
- return reg;
- }
-
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-
- // Branch on the result of the map check.
- __ b(ne, miss);
-
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- // Restore scratch register to be the map of the object. In the
- // new space case below, we load the prototype from the map in
- // the scratch register.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
-
- reg = holder_reg; // from now the object is in holder_reg
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
- // Branch on the result of the map check.
- __ b(ne, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, Operand(Handle<JSObject>(prototype)));
- }
-
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
-
- // Check the holder map.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
- __ b(ne, miss);
-
- // Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- MaybeObject* result = TryGenerateCheckPropertyCells(masm(),
- object,
- holder,
- name,
- scratch1,
- miss);
- if (result->IsFailure()) set_failure(Failure::cast(result));
-
- // Return the register containing the holder.
- return reg;
-}
-
-
void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1412,43 +1115,37 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
}
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
- name, miss);
+ Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+ scratch2, scratch3, name, miss);
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
__ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
- Handle<AccessorInfo> callback_handle(callback);
- if (heap()->InNewSpace(callback_handle->data())) {
- __ Move(scratch3, callback_handle);
+ if (heap()->InNewSpace(callback->data())) {
+ __ Move(scratch3, callback);
__ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
} else {
- __ Move(scratch3, Handle<Object>(callback_handle->data()));
+ __ Move(scratch3, Handle<Object>(callback->data()));
}
__ Push(reg, scratch3, name_reg);
__ mov(r0, sp); // r0 = Handle<String>
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
const int kApiStackSpace = 1;
-
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
@@ -1457,28 +1154,26 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
const int kStackUnwindSpace = 4;
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
ExternalReference ref =
ExternalReference(&fun,
ExternalReference::DIRECT_GETTER_CALL,
masm()->isolate());
- return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1494,9 +1189,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
+ lookup->GetCallbackObject()->IsAccessorInfo()) {
+ compile_followup_inline =
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
}
}
@@ -1513,14 +1208,12 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Requires a frame to make GC aware of pushed pointers.
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
// CALLBACKS case needs a receiver to be passed into C++ callback.
__ Push(receiver, holder_reg, name_reg);
} else {
__ Push(holder_reg, name_reg);
}
-
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
@@ -1529,7 +1222,6 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
holder_reg,
name_reg,
interceptor_holder);
-
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
@@ -1545,16 +1237,14 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
__ pop(receiver);
}
-
// Leave the internal frame.
}
-
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
- lookup->holder(),
+ Handle<JSObject>(lookup->holder()),
scratch1,
scratch2,
scratch3,
@@ -1573,15 +1263,14 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
+ Handle<AccessorInfo> callback(
+ AccessorInfo::cast(lookup->GetCallbackObject()));
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
// Important invariant in CALLBACKS case: the code above must be
// structured to never clobber |receiver| register.
- __ Move(scratch2, Handle<AccessorInfo>(callback));
+ __ Move(scratch2, callback);
// holder_reg is either receiver or scratch1.
if (!receiver.is(holder_reg)) {
ASSERT(scratch1.is(holder_reg));
@@ -1626,9 +1315,9 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1641,7 +1330,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(r0, miss);
}
@@ -1650,15 +1339,16 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
}
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Label* miss) {
// Get the value from the cell.
- __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(r3, Operand(cell));
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (heap()->InNewSpace(function)) {
+ if (heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1672,11 +1362,10 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
__ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ cmp(r4, r3);
- __ b(ne, miss);
} else {
- __ cmp(r1, Operand(Handle<JSFunction>(function)));
- __ b(ne, miss);
+ __ cmp(r1, Operand(function));
}
+ __ b(ne, miss);
}
@@ -1689,20 +1378,6 @@ void CallStubCompiler::GenerateMissBranch() {
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGenerateMissBranch() {
- MaybeObject* maybe_obj =
- isolate()->stub_cache()->TryComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
-}
-
-
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
int index,
@@ -1737,11 +1412,12 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1751,14 +1427,12 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
Register receiver = r1;
-
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
@@ -1767,8 +1441,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), receiver,
- holder, r3, r0, r4, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
+ name, &miss);
if (argc == 0) {
// Nothing to do, just return the length.
@@ -1777,10 +1451,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Ret();
} else {
Label call_builtin;
-
Register elements = r3;
Register end_elements = r5;
-
// Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1921,19 +1593,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1943,25 +1615,22 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss, return_undefined, call_builtin;
-
Register receiver = r1;
Register elements = r3;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object),
- receiver, holder, elements, r4, r0, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+ r4, r0, name, &miss);
// Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -2010,20 +1679,19 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2033,10 +1701,9 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
@@ -2047,17 +1714,16 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
- GenerateNameCheck(Handle<String>(name), &name_miss);
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
r0,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
- r1, r3, r4, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r1, r3, r4, name, &miss);
Register receiver = r1;
Register index = r4;
@@ -2070,20 +1736,20 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
+ StringCharCodeAtGenerator generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -2094,22 +1760,21 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in r2.
- __ Move(r2, Handle<String>(name));
+ __ Move(r2, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2119,31 +1784,28 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
-
if (kind_ == Code::CALL_IC &&
(CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
- GenerateNameCheck(Handle<String>(name), &name_miss);
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
r0,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
- r1, r3, r4, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r1, r3, r4, name, &miss);
Register receiver = r0;
Register index = r4;
@@ -2157,21 +1819,21 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
+ StringCharAtGenerator generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -2182,22 +1844,21 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in r2.
- __ Move(r2, Handle<String>(name));
+ __ Move(r2, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2210,22 +1871,23 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2241,13 +1903,13 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Convert the smi code to uint16.
__ and_(code, code, Operand(Smi::FromInt(0xffff)));
- StringCharFromCodeGenerator char_from_code_generator(code, r0);
- char_from_code_generator.GenerateFast(masm());
+ StringCharFromCodeGenerator generator(code, r0);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
@@ -2256,19 +1918,19 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ bind(&miss);
// r2: function name.
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2278,31 +1940,28 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -----------------------------------
if (!CpuFeatures::IsSupported(VFP3)) {
- return heap()->undefined_value();
+ return Handle<Code>::null();
}
CpuFeatures::Scope scope_vfp3(VFP3);
-
const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss, slow;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2404,19 +2063,19 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ bind(&miss);
// r2: function name.
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2426,25 +2085,22 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// -----------------------------------
const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
-
- if (cell == NULL) {
+ GenerateNameCheck(name, &miss);
+ if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2505,36 +2161,34 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ bind(&miss);
// r2: function name.
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
Counters* counters = isolate()->counters();
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- if (!object->IsJSObject()) return heap()->undefined_value();
+ if (object->IsGlobalObject()) return Handle<Code>::null();
+ if (!cell.is_null()) return Handle<Code>::null();
+ if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+ Handle<JSObject>::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(Handle<String>(name), &miss_before_stack_reserved);
+ GenerateNameCheck(name, &miss_before_stack_reserved);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2549,45 +2203,41 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
ReserveSpaceForFastApiCall(masm(), r0);
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
depth, &miss);
- MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
+ GenerateFastApiDirectCall(masm(), optimization, argc);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
CheckType check) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack
const int argc = arguments().immediate();
@@ -2601,16 +2251,14 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(masm()->isolate()->counters()->call_const(),
1, r0, r3);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2621,28 +2269,25 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
// Check that the object is a two-byte string or a symbol.
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r3, r1, r4, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case NUMBER_CHECK:
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(r1, &fast);
@@ -2652,18 +2297,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r3, r1, r4, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case BOOLEAN_CHECK:
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
Label fast;
// Check that the object is a boolean.
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
@@ -2676,14 +2321,15 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r3, r1, r4, name, &miss);
+ } else {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
}
break;
- }
-
- default:
- UNREACHABLE();
}
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
@@ -2693,29 +2339,25 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
-
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
@@ -2723,66 +2365,47 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- r1,
- r3,
- r4,
- r0,
- &miss);
- if (result->IsFailure()) {
- return result;
- }
+ compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
+ &miss);
// Move returned value, the function to call, to r1.
__ mov(r1, r0);
// Restore receiver.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
- GenerateCallFunction(masm(), Handle<Object>(object), arguments(), &miss,
- extra_state_);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
-
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy if
@@ -2798,7 +2421,6 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Jump to the cached code (tail call).
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
- Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
@@ -2813,11 +2435,10 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(NORMAL, name);
+ return GetCode(NORMAL, name);
}
@@ -3050,29 +2671,24 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
}
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
- callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return TryGetCode(CALLBACKS, name);
+ return GetCode(CALLBACKS, name);
}
@@ -3096,9 +2712,9 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
}
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
@@ -3108,21 +2724,13 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object,
- holder,
- &lookup,
- r0,
- r2,
- r3,
- r1,
- r4,
- name,
+ GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
@@ -3197,11 +2805,11 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3210,20 +2818,15 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
- MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
- r4, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return TryGetCode(CALLBACKS, name);
+ return GetCode(CALLBACKS, name);
}
@@ -3252,9 +2855,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3263,25 +2867,17 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- r1,
- r0,
- r2,
- r3,
- r4,
- name,
+ GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
@@ -3501,7 +3097,8 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+ Handle<JSFunction> function) {
// ----------- S t a t e -------------
// -- r0 : argc
// -- r1 : constructor
@@ -3547,12 +3144,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3,
- r4,
- r5,
- r6,
- &generic_stub_call,
- SIZE_IN_WORDS);
+ __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
@@ -3584,7 +3176,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// r7: undefined
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
+ Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
Label not_passed, next;
@@ -4043,9 +3635,9 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
__ bind(&miss_force_generic);
- Code* stub = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+ Handle<Code> stub =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(stub, RelocInfo::CODE_TARGET);
}
@@ -4419,9 +4011,9 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
__ Ret();
__ bind(&miss_force_generic);
- Code* stub = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+ Handle<Code> stub =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(stub, RelocInfo::CODE_TARGET);
}
diff --git a/src/3rdparty/v8/src/ast-inl.h b/src/3rdparty/v8/src/ast-inl.h
index 217c71f..f8b460d 100644
--- a/src/3rdparty/v8/src/ast-inl.h
+++ b/src/3rdparty/v8/src/ast-inl.h
@@ -126,11 +126,6 @@ StrictModeFlag FunctionLiteral::strict_mode_flag() const {
}
-bool FunctionLiteral::qml_mode() const {
- return scope()->is_qml_mode();
-}
-
-
} } // namespace v8::internal
#endif // V8_AST_INL_H_
diff --git a/src/3rdparty/v8/src/ast.cc b/src/3rdparty/v8/src/ast.cc
index 0dc3c1c..d5282d2 100644
--- a/src/3rdparty/v8/src/ast.cc
+++ b/src/3rdparty/v8/src/ast.cc
@@ -48,16 +48,19 @@ AST_NODE_LIST(DECL_ACCEPT)
// ----------------------------------------------------------------------------
// Implementation of other node functionality.
-Assignment* ExpressionStatement::StatementAsSimpleAssignment() {
- return (expression()->AsAssignment() != NULL &&
- !expression()->AsAssignment()->is_compound())
- ? expression()->AsAssignment()
- : NULL;
+
+bool Expression::IsSmiLiteral() {
+ return AsLiteral() != NULL && AsLiteral()->handle()->IsSmi();
+}
+
+
+bool Expression::IsStringLiteral() {
+ return AsLiteral() != NULL && AsLiteral()->handle()->IsString();
}
-CountOperation* ExpressionStatement::StatementAsCountOperation() {
- return expression()->AsCountOperation();
+bool Expression::IsNullLiteral() {
+ return AsLiteral() != NULL && AsLiteral()->handle()->IsNull();
}
@@ -764,11 +767,6 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
is_monomorphic_ = oracle->CallIsMonomorphic(this);
Property* property = expression()->AsProperty();
if (property == NULL) {
- if (VariableProxy *proxy = expression()->AsVariableProxy()) {
- if (proxy->var()->is_qml_global())
- return;
- }
-
// Function call. Specialize for monomorphic calls.
if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
} else {
diff --git a/src/3rdparty/v8/src/ast.h b/src/3rdparty/v8/src/ast.h
index 8920b50..295257a 100644
--- a/src/3rdparty/v8/src/ast.h
+++ b/src/3rdparty/v8/src/ast.h
@@ -118,7 +118,6 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
virtual AstNode::Type node_type() const { return AstNode::k##type; } \
- virtual type* As##type() { return this; }
class AstNode: public ZoneObject {
@@ -153,7 +152,8 @@ class AstNode: public ZoneObject {
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
- virtual type* As##type() { return NULL; }
+ bool Is##type() { return node_type() == AstNode::k##type; } \
+ type* As##type() { return Is##type() ? reinterpret_cast<type*>(this) : NULL; }
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
@@ -196,9 +196,6 @@ class Statement: public AstNode {
virtual Statement* AsStatement() { return this; }
- virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
- virtual CountOperation* StatementAsCountOperation() { return NULL; }
-
bool IsEmpty() { return AsEmptyStatement() != NULL; }
void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -264,7 +261,6 @@ class Expression: public AstNode {
virtual Expression* AsExpression() { return this; }
- virtual bool IsTrivial() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
// Helpers for ToBoolean conversion.
@@ -276,33 +272,24 @@ class Expression: public AstNode {
// names because [] for string objects is handled only by keyed ICs.
virtual bool IsPropertyName() { return false; }
- // Mark the expression as being compiled as an expression
- // statement. This is used to transform postfix increments to
- // (faster) prefix increments.
- virtual void MarkAsStatement() { /* do nothing */ }
-
// True iff the result can be safely overwritten (to avoid allocation).
// False for operations that can return one of their operands.
virtual bool ResultOverwriteAllowed() { return false; }
// True iff the expression is a literal represented as a smi.
- virtual bool IsSmiLiteral() { return false; }
+ bool IsSmiLiteral();
// True iff the expression is a string literal.
- virtual bool IsStringLiteral() { return false; }
+ bool IsStringLiteral();
// True iff the expression is the null literal.
- virtual bool IsNullLiteral() { return false; }
+ bool IsNullLiteral();
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
UNREACHABLE();
return false;
}
- virtual bool IsArrayLength() {
- UNREACHABLE();
- return false;
- }
virtual SmallMapList* GetReceiverTypes() {
UNREACHABLE();
return NULL;
@@ -368,16 +355,6 @@ class Block: public BreakableStatement {
DECLARE_NODE_TYPE(Block)
- virtual Assignment* StatementAsSimpleAssignment() {
- if (statements_.length() != 1) return NULL;
- return statements_[0]->StatementAsSimpleAssignment();
- }
-
- virtual CountOperation* StatementAsCountOperation() {
- if (statements_.length() != 1) return NULL;
- return statements_[0]->StatementAsCountOperation();
- }
-
virtual bool IsInlineable() const;
void AddStatement(Statement* statement) { statements_.Add(statement); }
@@ -612,9 +589,6 @@ class ExpressionStatement: public Statement {
virtual bool IsInlineable() const;
- virtual Assignment* StatementAsSimpleAssignment();
- virtual CountOperation* StatementAsCountOperation();
-
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
@@ -895,11 +869,6 @@ class Literal: public Expression {
DECLARE_NODE_TYPE(Literal)
- virtual bool IsTrivial() { return true; }
- virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
- virtual bool IsStringLiteral() { return handle_->IsString(); }
- virtual bool IsNullLiteral() { return handle_->IsNull(); }
-
// Check if this literal is identical to the other literal.
bool IsIdenticalTo(const Literal* other) const {
return handle_.is_identical_to(other->handle_);
@@ -1114,12 +1083,6 @@ class VariableProxy: public Expression {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
}
- virtual bool IsTrivial() {
- // Reading from a mutable variable is a side effect, but the
- // variable for 'this' is immutable.
- return is_this_ || is_trivial_;
- }
-
virtual bool IsInlineable() const;
bool IsVariable(Handle<String> n) {
@@ -1187,7 +1150,7 @@ class Property: public Expression {
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual bool IsArrayLength() { return is_array_length_; }
+ bool IsArrayLength() { return is_array_length_; }
private:
Expression* obj_;
@@ -1325,8 +1288,17 @@ class UnaryOperation: public Expression {
Token::Value op,
Expression* expression,
int pos)
- : Expression(isolate), op_(op), expression_(expression), pos_(pos) {
+ : Expression(isolate),
+ op_(op),
+ expression_(expression),
+ pos_(pos),
+ materialize_true_id_(AstNode::kNoNumber),
+ materialize_false_id_(AstNode::kNoNumber) {
ASSERT(Token::IsUnaryOp(op));
+ if (op == Token::NOT) {
+ materialize_true_id_ = GetNextId(isolate);
+ materialize_false_id_ = GetNextId(isolate);
+ }
}
DECLARE_NODE_TYPE(UnaryOperation)
@@ -1339,10 +1311,18 @@ class UnaryOperation: public Expression {
Expression* expression() const { return expression_; }
virtual int position() const { return pos_; }
+ int MaterializeTrueId() { return materialize_true_id_; }
+ int MaterializeFalseId() { return materialize_false_id_; }
+
private:
Token::Value op_;
Expression* expression_;
int pos_;
+
+ // For unary not (Token::NOT), the AST ids where true and false will
+ // actually be materialized, respectively.
+ int materialize_true_id_;
+ int materialize_false_id_;
};
@@ -1653,7 +1633,6 @@ class FunctionLiteral: public Expression {
bool is_anonymous() const { return is_anonymous_; }
bool strict_mode() const { return strict_mode_flag() == kStrictMode; }
StrictModeFlag strict_mode_flag() const;
- bool qml_mode() const;
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
diff --git a/src/3rdparty/v8/src/atomicops_internals_mips_gcc.h b/src/3rdparty/v8/src/atomicops_internals_mips_gcc.h
index 5113de2..9498fd7 100644
--- a/src/3rdparty/v8/src/atomicops_internals_mips_gcc.h
+++ b/src/3rdparty/v8/src/atomicops_internals_mips_gcc.h
@@ -30,7 +30,7 @@
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace v8 {
namespace internal {
@@ -48,16 +48,19 @@ namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- Atomic32 prev;
- __asm__ __volatile__("1:\n"
- "ll %0, %1\n" // prev = *ptr
+ Atomic32 prev, tmp;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "nop\n" // delay slot nop
- "sc %2, %1\n" // *ptr = new_value (with atomic check)
+ "move %2, %4\n" // tmp = new_value
+ "sc %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
- : "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
+ ".set pop\n"
+ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
@@ -68,12 +71,15 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
- __asm__ __volatile__("1:\n"
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
"ll %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
+ ".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
@@ -87,13 +93,15 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
- __asm__ __volatile__("1:\n"
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
"ll %0, %2\n" // temp = *ptr
- "addu %0, %3\n" // temp = temp + increment
- "move %1, %0\n" // temp2 = temp
- "sc %0, %2\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ "sc %1, %2\n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ ".set pop\n"
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
@@ -103,6 +111,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
+ ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
ATOMICOPS_COMPILER_BARRIER();
return res;
@@ -117,16 +126,19 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
- return x;
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ ATOMICOPS_COMPILER_BARRIER();
+ return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ ATOMICOPS_COMPILER_BARRIER();
+ return res;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -134,7 +146,7 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
}
inline void MemoryBarrier() {
- ATOMICOPS_COMPILER_BARRIER();
+ __asm__ __volatile__("sync" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
diff --git a/src/3rdparty/v8/src/code-stubs.h b/src/3rdparty/v8/src/code-stubs.h
index 4380764..d49ff1d 100644
--- a/src/3rdparty/v8/src/code-stubs.h
+++ b/src/3rdparty/v8/src/code-stubs.h
@@ -58,10 +58,8 @@ namespace internal {
V(FastNewContext) \
V(FastNewBlockContext) \
V(FastCloneShallowArray) \
- V(RevertToNumber) \
V(ToBoolean) \
V(ToNumber) \
- V(CounterOp) \
V(ArgumentsAccess) \
V(RegExpConstructResult) \
V(NumberToString) \
@@ -71,7 +69,8 @@ namespace internal {
V(KeyedStoreElement) \
V(DebuggerStatement) \
V(StringDictionaryLookup) \
- V(ElementsTransitionAndStore)
+ V(ElementsTransitionAndStore) \
+ V(StoreArrayLiteralElement)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@@ -326,7 +325,7 @@ class FastNewContextStub : public CodeStub {
static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ >= 0 && slots <= kMaximumSlots);
+ ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
@@ -1064,6 +1063,20 @@ class ElementsTransitionAndStoreStub : public CodeStub {
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
};
+
+class StoreArrayLiteralElementStub : public CodeStub {
+ public:
+ explicit StoreArrayLiteralElementStub() {}
+
+ private:
+ Major MajorKey() { return StoreArrayLiteralElement; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
+};
+
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/src/3rdparty/v8/src/compiler.cc b/src/3rdparty/v8/src/compiler.cc
index cabca74..88db467 100644
--- a/src/3rdparty/v8/src/compiler.cc
+++ b/src/3rdparty/v8/src/compiler.cc
@@ -447,8 +447,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
v8::Extension* extension,
ScriptDataImpl* input_pre_data,
Handle<Object> script_data,
- NativesFlag natives,
- v8::Script::CompileFlags compile_flags) {
+ NativesFlag natives) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_load_size()->Increment(source_length);
@@ -499,7 +498,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
// Create a script object describing the script to be compiled.
Handle<Script> script = FACTORY->NewScript(source);
- if (natives == NATIVES_CODE || compile_flags & v8::Script::NativeMode) {
+ if (natives == NATIVES_CODE) {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
}
if (!script_name.is_null()) {
@@ -516,7 +515,6 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
- if (compile_flags & v8::Script::QmlMode) info.MarkAsQmlMode();
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);
@@ -536,8 +534,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
- StrictModeFlag strict_mode,
- bool qml_mode) {
+ StrictModeFlag strict_mode) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_eval_size()->Increment(source_length);
@@ -562,7 +559,6 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
info.SetStrictModeFlag(strict_mode);
- if (qml_mode) info.MarkAsQmlMode();
info.SetCallingContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
@@ -610,12 +606,6 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
info->SetStrictModeFlag(strict_mode);
shared->set_strict_mode_flag(strict_mode);
- // After parsing we know function's qml mode. Remember it.
- if (info->function()->qml_mode()) {
- shared->set_qml_mode(true);
- info->MarkAsQmlMode();
- }
-
// Compile the code.
if (!MakeCode(info)) {
if (!isolate->has_pending_exception()) {
@@ -761,7 +751,6 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
*lit->this_property_assignments());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_strict_mode_flag(lit->strict_mode_flag());
- function_info->set_qml_mode(lit->qml_mode());
function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
}
diff --git a/src/3rdparty/v8/src/compiler.h b/src/3rdparty/v8/src/compiler.h
index 054e3b9..bedf5ee 100644
--- a/src/3rdparty/v8/src/compiler.h
+++ b/src/3rdparty/v8/src/compiler.h
@@ -57,7 +57,6 @@ class CompilationInfo BASE_EMBEDDED {
return StrictModeFlagField::decode(flags_);
}
bool is_in_loop() const { return IsInLoop::decode(flags_); }
- bool is_qml_mode() const { return IsQmlMode::decode(flags_); }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
Handle<Code> code() const { return code_; }
@@ -86,9 +85,6 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true);
}
- void MarkAsQmlMode() {
- flags_ |= IsQmlMode::encode(true);
- }
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
@@ -196,9 +192,6 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(strict_mode_flag() == kNonStrictMode);
SetStrictModeFlag(shared_info_->strict_mode_flag());
}
- if (!shared_info_.is_null() && shared_info_->qml_mode()) {
- MarkAsQmlMode();
- }
}
void SetMode(Mode mode) {
@@ -225,8 +218,7 @@ class CompilationInfo BASE_EMBEDDED {
// If compiling for debugging produce just full code matching the
// initial mode setting.
class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
- // Qml mode
- class IsQmlMode: public BitField<bool, 9, 1> {};
+
unsigned flags_;
@@ -291,15 +283,13 @@ class Compiler : public AllStatic {
v8::Extension* extension,
ScriptDataImpl* pre_data,
Handle<Object> script_data,
- NativesFlag is_natives_code,
- v8::Script::CompileFlags = v8::Script::Default);
+ NativesFlag is_natives_code);
// Compile a String source within a context for Eval.
static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
- StrictModeFlag strict_mode,
- bool qml_mode);
+ StrictModeFlag strict_mode);
// Compile from function info (used for lazy compilation). Returns true on
// success and false if the compilation resulted in a stack overflow.
diff --git a/src/3rdparty/v8/src/contexts.cc b/src/3rdparty/v8/src/contexts.cc
index 3129af0..b25ffac 100644
--- a/src/3rdparty/v8/src/contexts.cc
+++ b/src/3rdparty/v8/src/contexts.cc
@@ -103,9 +103,6 @@ Handle<Object> Context::Lookup(Handle<String> name,
PrintF(")\n");
}
- Handle<JSObject> qml_global;
- Handle<JSObject> qml_global_global;
-
do {
if (FLAG_trace_contexts) {
PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
@@ -113,11 +110,6 @@ Handle<Object> Context::Lookup(Handle<String> name,
PrintF("\n");
}
- if (qml_global.is_null() && !context->qml_global()->IsUndefined()) {
- qml_global = Handle<JSObject>(context->qml_global(), isolate);
- qml_global_global = Handle<JSObject>(context->global(), isolate);
- }
-
// 1. Check global objects, subjects of with, and extension objects.
if (context->IsGlobalContext() ||
context->IsWithContext() ||
@@ -236,33 +228,6 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
} while (follow_context_chain);
- if (!qml_global.is_null()) {
- if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0) {
- *attributes = qml_global_global->GetLocalPropertyAttribute(*name);
- } else {
- *attributes = qml_global_global->GetPropertyAttribute(*name);
- }
-
- if (*attributes != ABSENT) {
- *attributes = ABSENT;
- } else {
- if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0) {
- *attributes = qml_global->GetLocalPropertyAttribute(*name);
- } else {
- *attributes = qml_global->GetPropertyAttribute(*name);
- }
-
- if (*attributes != ABSENT) {
- // property found
- if (FLAG_trace_contexts) {
- PrintF("=> found property in qml global object %p\n",
- reinterpret_cast<void*>(*qml_global));
- }
- return qml_global;
- }
- }
- }
-
if (FLAG_trace_contexts) {
PrintF("=> no property/slot found\n");
}
diff --git a/src/3rdparty/v8/src/contexts.h b/src/3rdparty/v8/src/contexts.h
index c3cfeee..7021ff8 100644
--- a/src/3rdparty/v8/src/contexts.h
+++ b/src/3rdparty/v8/src/contexts.h
@@ -218,7 +218,6 @@ class Context: public FixedArray {
// (with contexts), or the variable name (catch contexts), the serialized
// scope info (block contexts).
EXTENSION_INDEX,
- QML_GLOBAL_INDEX,
GLOBAL_INDEX,
MIN_CONTEXT_SLOTS,
@@ -322,9 +321,6 @@ class Context: public FixedArray {
}
void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
- JSObject *qml_global() { return reinterpret_cast<JSObject *>(get(QML_GLOBAL_INDEX)); }
- void set_qml_global(JSObject *qml_global) { set(QML_GLOBAL_INDEX, qml_global); }
-
// Returns a JSGlobalProxy object or null.
JSObject* global_proxy();
void set_global_proxy(JSObject* global);
diff --git a/src/3rdparty/v8/src/date.js b/src/3rdparty/v8/src/date.js
index ccefce5..8d7d5d8 100644
--- a/src/3rdparty/v8/src/date.js
+++ b/src/3rdparty/v8/src/date.js
@@ -294,8 +294,8 @@ function TimeInYear(year) {
}
-var ymd_from_time_cache = [$NaN, $NaN, $NaN];
-var ymd_from_time_cached_time = $NaN;
+var ymd_from_time_cache = [1970, 0, 1];
+var ymd_from_time_cached_time = 0;
function YearFromTime(t) {
if (t !== ymd_from_time_cached_time) {
@@ -351,13 +351,12 @@ function MakeDay(year, month, date) {
date = TO_INTEGER_MAP_MINUS_ZERO(date);
if (year < kMinYear || year > kMaxYear ||
- month < kMinMonth || month > kMaxMonth ||
- date < kMinDate || date > kMaxDate) {
+ month < kMinMonth || month > kMaxMonth) {
return $NaN;
}
- // Now we rely on year, month and date being SMIs.
- return %DateMakeDay(year, month, date);
+ // Now we rely on year and month being SMIs.
+ return %DateMakeDay(year, month) + date - 1;
}
@@ -978,9 +977,10 @@ function PadInt(n, digits) {
}
+// ECMA 262 - 15.9.5.43
function DateToISOString() {
var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
+ if (NUMBER_IS_NAN(t)) throw MakeRangeError("invalid_time_value", []);
var year = this.getUTCFullYear();
var year_string;
if (year >= 0 && year <= 9999) {
diff --git a/src/3rdparty/v8/src/debug-debugger.js b/src/3rdparty/v8/src/debug-debugger.js
index 6f80a8b..d254ee5 100644
--- a/src/3rdparty/v8/src/debug-debugger.js
+++ b/src/3rdparty/v8/src/debug-debugger.js
@@ -477,8 +477,7 @@ ScriptBreakPoint.prototype.clear = function () {
// break points set in this script.
function UpdateScriptBreakPoints(script) {
for (var i = 0; i < script_break_points.length; i++) {
- if ((script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName ||
- script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptRegExp) &&
+ if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
script_break_points[i].matchesScript(script)) {
script_break_points[i].set(script);
}
diff --git a/src/3rdparty/v8/src/debug.cc b/src/3rdparty/v8/src/debug.cc
index d32574b..88149d8 100644
--- a/src/3rdparty/v8/src/debug.cc
+++ b/src/3rdparty/v8/src/debug.cc
@@ -1131,17 +1131,6 @@ Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
return Handle<DebugInfo>(DebugInfo::cast(shared->debug_info()));
}
-static bool ContainsLineBreak(String *string, int from, int to)
-{
- ASSERT(from >= 0);
- ASSERT(from <= to);
- const int end = (string->length() < to) ? string->length() : to;
- for (int pos = from; pos < end; ++pos) {
- if (string->Get(pos) == '\n')
- return true;
- }
- return false;
-}
void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
Handle<Object> break_point_object,
@@ -1162,22 +1151,12 @@ void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
// Find the break point and change it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
it.FindBreakLocationFromPosition(*source_position);
+ it.SetBreakPoint(break_point_object);
- bool acceptBreak = true;
- if (!FLAG_breakpoint_relocation) {
- if (String *sourceStr = String::cast(shared->GetSourceCode())) {
- acceptBreak = !ContainsLineBreak(sourceStr, *source_position, it.position());
- }
- }
-
- if (acceptBreak) {
- it.SetBreakPoint(break_point_object);
+ *source_position = it.position();
- *source_position = it.position();
-
- // At least one active break point now.
- ASSERT(debug_info->GetBreakPointCount() > 0);
- }
+ // At least one active break point now.
+ ASSERT(debug_info->GetBreakPointCount() > 0);
}
@@ -2642,7 +2621,8 @@ void Debugger::CallCEventCallback(v8::DebugEvent event,
v8::Debug::ClientData* client_data) {
Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
v8::Debug::EventCallback2 callback =
- FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->address());
+ FUNCTION_CAST<v8::Debug::EventCallback2>(
+ callback_obj->foreign_address());
EventDetailsImpl event_details(
event,
Handle<JSObject>::cast(exec_state),
diff --git a/src/3rdparty/v8/src/deoptimizer.h b/src/3rdparty/v8/src/deoptimizer.h
index 33580a1..284676c 100644
--- a/src/3rdparty/v8/src/deoptimizer.h
+++ b/src/3rdparty/v8/src/deoptimizer.h
@@ -369,7 +369,20 @@ class FrameDescription {
}
double GetDoubleFrameSlot(unsigned offset) {
- return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
+ intptr_t* ptr = GetFrameSlotPointer(offset);
+#if V8_TARGET_ARCH_MIPS
+ // Prevent gcc from using load-double (mips ldc1) on (possibly)
+ // non-64-bit aligned double. Uses two lwc1 instructions.
+ union conversion {
+ double d;
+ uint32_t u[2];
+ } c;
+ c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
+ c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
+ return c.d;
+#else
+ return *reinterpret_cast<double*>(ptr);
+#endif
}
void SetFrameSlot(unsigned offset, intptr_t value) {
diff --git a/src/3rdparty/v8/src/execution.cc b/src/3rdparty/v8/src/execution.cc
index 4261ac2..29955fa 100644
--- a/src/3rdparty/v8/src/execution.cc
+++ b/src/3rdparty/v8/src/execution.cc
@@ -71,8 +71,7 @@ static Handle<Object> Invoke(bool is_construct,
Handle<Object> receiver,
int argc,
Handle<Object> args[],
- bool* has_pending_exception,
- Handle<Object> qml) {
+ bool* has_pending_exception) {
Isolate* isolate = function->GetIsolate();
// Entering JavaScript.
@@ -103,12 +102,6 @@ static Handle<Object> Invoke(bool is_construct,
// make the current one is indeed a global object.
ASSERT(function->context()->global()->IsGlobalObject());
- Handle<JSObject> oldqml;
- if (!qml.is_null()) {
- oldqml = Handle<JSObject>(function->context()->qml_global());
- function->context()->set_qml_global(JSObject::cast(*qml));
- }
-
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
@@ -125,9 +118,6 @@ static Handle<Object> Invoke(bool is_construct,
CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
}
- if (!qml.is_null())
- function->context()->set_qml_global(*oldqml);
-
#ifdef DEBUG
value->Verify();
#endif
@@ -156,18 +146,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
int argc,
Handle<Object> argv[],
bool* pending_exception,
- bool convert_receiver)
-{
- return Call(callable, receiver, argc, argv, pending_exception, convert_receiver, Handle<Object>());
-}
-
-Handle<Object> Execution::Call(Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception,
- bool convert_receiver,
- Handle<Object> qml) {
+ bool convert_receiver) {
*pending_exception = false;
if (!callable->IsJSFunction()) {
@@ -191,7 +170,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
if (*pending_exception) return callable;
}
- return Invoke(false, func, receiver, argc, argv, pending_exception, qml);
+ return Invoke(false, func, receiver, argc, argv, pending_exception);
}
@@ -200,7 +179,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
Handle<Object> argv[],
bool* pending_exception) {
return Invoke(true, func, Isolate::Current()->global(), argc, argv,
- pending_exception, Handle<Object>());
+ pending_exception);
}
@@ -219,7 +198,7 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
*caught_exception = false;
Handle<Object> result = Invoke(false, func, receiver, argc, args,
- caught_exception, Handle<Object>());
+ caught_exception);
if (*caught_exception) {
ASSERT(catcher.HasCaught());
diff --git a/src/3rdparty/v8/src/execution.h b/src/3rdparty/v8/src/execution.h
index 532e5d8..f2d17d0 100644
--- a/src/3rdparty/v8/src/execution.h
+++ b/src/3rdparty/v8/src/execution.h
@@ -65,14 +65,6 @@ class Execution : public AllStatic {
bool* pending_exception,
bool convert_receiver = false);
- static Handle<Object> Call(Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception,
- bool convert_receiver,
- Handle<Object> qml);
-
// Construct object from function, the caller supplies an array of
// arguments. Arguments are Object* type. After function returns,
// pointers in 'args' might be invalid.
diff --git a/src/3rdparty/v8/src/factory.cc b/src/3rdparty/v8/src/factory.cc
index 76ca69d..15f640e 100644
--- a/src/3rdparty/v8/src/factory.cc
+++ b/src/3rdparty/v8/src/factory.cc
@@ -1152,24 +1152,15 @@ Handle<JSFunction> Factory::CreateApiFunction(
Handle<Code> construct_stub = isolate()->builtins()->JSConstructStubApi();
int internal_field_count = 0;
- bool has_external_resource = false;
- bool use_user_object_comparison = false;
-
if (!obj->instance_template()->IsUndefined()) {
Handle<ObjectTemplateInfo> instance_template =
Handle<ObjectTemplateInfo>(
ObjectTemplateInfo::cast(obj->instance_template()));
internal_field_count =
Smi::cast(instance_template->internal_field_count())->value();
- has_external_resource =
- !instance_template->has_external_resource()->IsUndefined();
- use_user_object_comparison =
- !instance_template->use_user_object_comparison()->IsUndefined();
}
int instance_size = kPointerSize * internal_field_count;
- if (has_external_resource) instance_size += kPointerSize;
-
InstanceType type = INVALID_TYPE;
switch (instance_type) {
case JavaScriptObject:
@@ -1204,16 +1195,6 @@ Handle<JSFunction> Factory::CreateApiFunction(
Handle<Map> map = Handle<Map>(result->initial_map());
- // Mark as having external data object if needed
- if (has_external_resource) {
- map->set_has_external_resource(true);
- }
-
- // Mark as using user object comparison if needed
- if (use_user_object_comparison) {
- map->set_use_user_object_comparison(true);
- }
-
// Mark as undetectable if needed.
if (obj->undetectable()) {
map->set_is_undetectable();
@@ -1232,9 +1213,6 @@ Handle<JSFunction> Factory::CreateApiFunction(
// Set interceptor information in the map.
if (!obj->named_property_handler()->IsUndefined()) {
map->set_has_named_interceptor();
- InterceptorInfo *nph = InterceptorInfo::cast(obj->named_property_handler());
- bool is_fallback = nph->is_fallback()->IsUndefined()?false:nph->is_fallback()->value();
- map->set_named_interceptor_is_fallback(is_fallback);
}
if (!obj->indexed_property_handler()->IsUndefined()) {
map->set_has_indexed_interceptor();
diff --git a/src/3rdparty/v8/src/flag-definitions.h b/src/3rdparty/v8/src/flag-definitions.h
index fe64a96..9f11616 100644
--- a/src/3rdparty/v8/src/flag-definitions.h
+++ b/src/3rdparty/v8/src/flag-definitions.h
@@ -114,11 +114,7 @@ DEFINE_bool(clever_optimizations,
"Optimize object size, Array shift, DOM strings and string +")
// Flags for Crankshaft.
-#ifdef V8_TARGET_ARCH_MIPS
- DEFINE_bool(crankshaft, false, "use crankshaft")
-#else
- DEFINE_bool(crankshaft, true, "use crankshaft")
-#endif
+DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
DEFINE_bool(build_lithium, true, "use lithium chunk builder")
@@ -233,7 +229,6 @@ DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
"in the queue")
-DEFINE_bool(breakpoint_relocation, true, "relocate breakpoints to the next executable line")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
// execution.cc
@@ -327,7 +322,8 @@ DEFINE_bool(strict_mode, true, "allow strict mode directives")
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
-DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
+DEFINE_bool(check_icache, false,
+ "Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
@@ -434,7 +430,6 @@ DEFINE_bool(print_json_ast, false, "print source AST as JSON")
DEFINE_bool(print_builtin_json_ast, false,
"print source AST for builtins as JSON")
DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
-DEFINE_bool(verify_stack_height, false, "verify stack height tracing on ia32")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
diff --git a/src/3rdparty/v8/src/full-codegen.cc b/src/3rdparty/v8/src/full-codegen.cc
index f099d25..e6fac19 100644
--- a/src/3rdparty/v8/src/full-codegen.cc
+++ b/src/3rdparty/v8/src/full-codegen.cc
@@ -410,27 +410,24 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
__ push(reg);
- codegen()->increment_stack_height();
}
void FullCodeGenerator::TestContext::Plug(Register reg) const {
// For simplicity we always test the accumulator register.
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
void FullCodeGenerator::EffectContext::PlugTOS() const {
__ Drop(1);
- codegen()->decrement_stack_height();
}
void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
__ pop(result_register());
- codegen()->decrement_stack_height();
}
@@ -441,8 +438,7 @@ void FullCodeGenerator::StackValueContext::PlugTOS() const {
void FullCodeGenerator::TestContext::PlugTOS() const {
// For simplicity we always test the accumulator register.
__ pop(result_register());
- codegen()->decrement_stack_height();
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -513,7 +509,7 @@ void FullCodeGenerator::VisitDeclarations(
// Batch declare global functions and variables.
if (global_count > 0) {
Handle<FixedArray> array =
- isolate()->factory()->NewFixedArray(3 * global_count, TENURED);
+ isolate()->factory()->NewFixedArray(2 * global_count, TENURED);
for (int j = 0, i = 0; i < length; i++) {
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
@@ -537,7 +533,6 @@ void FullCodeGenerator::VisitDeclarations(
}
array->set(j++, *function);
}
- array->set(j++, Smi::FromInt(var->is_qml_global()));
}
}
// Invoke the platform-dependent code generator to do the actual
@@ -657,14 +652,13 @@ FullCodeGenerator::InlineFunctionGenerator
}
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
- ZoneList<Expression*>* args = node->arguments();
- const Runtime::Function* function = node->function();
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+ const Runtime::Function* function = expr->function();
ASSERT(function != NULL);
ASSERT(function->intrinsic_type == Runtime::INLINE);
InlineFunctionGenerator generator =
FindInlineFunctionGenerator(function->function_id);
- ((*this).*(generator))(args);
+ ((*this).*(generator))(expr);
}
@@ -681,11 +675,25 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
}
+void FullCodeGenerator::VisitInDuplicateContext(Expression* expr) {
+ if (context()->IsEffect()) {
+ VisitForEffect(expr);
+ } else if (context()->IsAccumulatorValue()) {
+ VisitForAccumulatorValue(expr);
+ } else if (context()->IsStackValue()) {
+ VisitForStackValue(expr);
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ VisitForControl(expr, test->true_label(), test->false_label(),
+ test->fall_through());
+ }
+}
+
+
void FullCodeGenerator::VisitComma(BinaryOperation* expr) {
Comment cmnt(masm_, "[ Comma");
VisitForEffect(expr->left());
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitInCurrentContext(expr->right());
+ VisitInDuplicateContext(expr->right());
}
@@ -707,7 +715,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
}
PrepareForBailoutForId(right_id, NO_REGISTERS);
__ bind(&eval_right);
- ForwardBailoutToChild(expr);
} else if (context()->IsAccumulatorValue()) {
VisitForAccumulatorValue(left);
@@ -715,7 +722,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
// case we need it.
__ push(result_register());
Label discard, restore;
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
if (is_logical_and) {
DoTest(left, &discard, &restore, &restore);
} else {
@@ -734,7 +740,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
// case we need it.
__ push(result_register());
Label discard;
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
if (is_logical_and) {
DoTest(left, &discard, &done, &discard);
} else {
@@ -756,7 +761,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
__ bind(&eval_right);
}
- VisitInCurrentContext(right);
+ VisitInDuplicateContext(right);
__ bind(&done);
}
@@ -783,34 +788,6 @@ void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
}
-void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
- if (!info_->HasDeoptimizationSupport()) return;
- ASSERT(context()->IsTest());
- ASSERT(expr == forward_bailout_stack_->expr());
- forward_bailout_pending_ = forward_bailout_stack_;
-}
-
-
-void FullCodeGenerator::VisitInCurrentContext(Expression* expr) {
- if (context()->IsTest()) {
- ForwardBailoutStack stack(expr, forward_bailout_pending_);
- ForwardBailoutStack* saved = forward_bailout_stack_;
- forward_bailout_pending_ = NULL;
- forward_bailout_stack_ = &stack;
- Visit(expr);
- forward_bailout_stack_ = saved;
- } else {
- ASSERT(forward_bailout_pending_ == NULL);
- Visit(expr);
- State state = context()->IsAccumulatorValue() ? TOS_REG : NO_REGISTERS;
- PrepareForBailout(expr, state);
- // Forwarding bailouts to children is a one shot operation. It should have
- // been processed at this point.
- ASSERT(forward_bailout_pending_ == NULL);
- }
-}
-
-
void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
NestedBlock nested_block(this, stmt);
@@ -980,7 +957,6 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
VisitForStackValue(stmt->expression());
PushFunctionArgumentForContextAllocation();
__ CallRuntime(Runtime::kPushWithContext, 2);
- decrement_stack_height();
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{ WithOrCatch body(this);
@@ -1150,13 +1126,10 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Try block code. Sets up the exception handler chain.
__ bind(&try_handler_setup);
{
- const int delta = StackHandlerConstants::kSize / kPointerSize;
TryCatch try_block(this);
__ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
- increment_stack_height(delta);
Visit(stmt->try_block());
__ PopTryHandler();
- decrement_stack_height(delta);
}
__ bind(&done);
}
@@ -1188,7 +1161,6 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// cooked before GC.
Label finally_entry;
Label try_handler_setup;
- const int original_stack_height = stack_height();
// Setup the try-handler chain. Use a call to
// Jump to try-handler setup and try-block code. Use call to put try-handler
@@ -1210,7 +1182,6 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Finally block implementation.
Finally finally_block(this);
EnterFinallyBlock();
- set_stack_height(original_stack_height + Finally::kElementCount);
Visit(stmt->finally_block());
ExitFinallyBlock(); // Return to the calling code.
}
@@ -1218,13 +1189,10 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
__ bind(&try_handler_setup);
{
// Setup try handler (stack pointer registers).
- const int delta = StackHandlerConstants::kSize / kPointerSize;
TryFinally try_block(this, &finally_entry);
__ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
- set_stack_height(original_stack_height + delta);
Visit(stmt->try_block());
__ PopTryHandler();
- set_stack_height(original_stack_height);
}
// Execute the finally block on the way out. Clobber the unpredictable
// value in the accumulator with one that's safe for GC. The finally
@@ -1254,7 +1222,6 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
__ bind(&true_case);
SetExpressionPosition(expr->then_expression(),
expr->then_expression_position());
- int start_stack_height = stack_height();
if (context()->IsTest()) {
const TestContext* for_test = TestContext::cast(context());
VisitForControl(expr->then_expression(),
@@ -1262,17 +1229,15 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
for_test->false_label(),
NULL);
} else {
- VisitInCurrentContext(expr->then_expression());
+ VisitInDuplicateContext(expr->then_expression());
__ jmp(&done);
}
PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
__ bind(&false_case);
- set_stack_height(start_stack_height);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
SetExpressionPosition(expr->else_expression(),
expr->else_expression_position());
- VisitInCurrentContext(expr->else_expression());
+ VisitInDuplicateContext(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
if (!context()->IsTest()) {
__ bind(&done);
@@ -1309,11 +1274,8 @@ void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
- // Throw has no effect on the stack height or the current expression context.
- // Usually the expression context is null, because throw is a statement.
VisitForStackValue(expr->exception());
__ CallRuntime(Runtime::kThrow, 1);
- decrement_stack_height();
// Never returns here.
}
@@ -1333,7 +1295,7 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
Expression *sub_expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
- EmitLiteralCompareTypeof(sub_expr, check);
+ EmitLiteralCompareTypeof(expr, sub_expr, check);
return true;
}
diff --git a/src/3rdparty/v8/src/full-codegen.h b/src/3rdparty/v8/src/full-codegen.h
index 78e6089..799f96d 100644
--- a/src/3rdparty/v8/src/full-codegen.h
+++ b/src/3rdparty/v8/src/full-codegen.h
@@ -83,12 +83,9 @@ class FullCodeGenerator: public AstVisitor {
scope_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
- stack_height_(0),
context_(NULL),
bailout_entries_(0),
- stack_checks_(2), // There's always at least one.
- forward_bailout_stack_(NULL),
- forward_bailout_pending_(NULL) {
+ stack_checks_(2) { // There's always at least one.
}
static bool MakeCode(CompilationInfo* info);
@@ -276,27 +273,8 @@ class FullCodeGenerator: public AstVisitor {
}
};
- // The forward bailout stack keeps track of the expressions that can
- // bail out to just before the control flow is split in a child
- // node. The stack elements are linked together through the parent
- // link when visiting expressions in test contexts after requesting
- // bailout in child forwarding.
- class ForwardBailoutStack BASE_EMBEDDED {
- public:
- ForwardBailoutStack(Expression* expr, ForwardBailoutStack* parent)
- : expr_(expr), parent_(parent) { }
-
- Expression* expr() const { return expr_; }
- ForwardBailoutStack* parent() const { return parent_; }
-
- private:
- Expression* const expr_;
- ForwardBailoutStack* const parent_;
- };
-
// Type of a member function that generates inline code for a native function.
- typedef void (FullCodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
+ typedef void (FullCodeGenerator::*InlineFunctionGenerator)(CallRuntime* expr);
static const InlineFunctionGenerator kInlineFunctionGenerators[];
@@ -357,23 +335,22 @@ class FullCodeGenerator: public AstVisitor {
// need the write barrier if location is CONTEXT.
MemOperand VarOperand(Variable* var, Register scratch);
- // Forward the bailout responsibility for the given expression to
- // the next child visited (which must be in a test context).
- void ForwardBailoutToChild(Expression* expr);
-
void VisitForEffect(Expression* expr) {
EffectContext context(this);
- VisitInCurrentContext(expr);
+ Visit(expr);
+ PrepareForBailout(expr, NO_REGISTERS);
}
void VisitForAccumulatorValue(Expression* expr) {
AccumulatorValueContext context(this);
- VisitInCurrentContext(expr);
+ Visit(expr);
+ PrepareForBailout(expr, TOS_REG);
}
void VisitForStackValue(Expression* expr) {
StackValueContext context(this);
- VisitInCurrentContext(expr);
+ Visit(expr);
+ PrepareForBailout(expr, NO_REGISTERS);
}
void VisitForControl(Expression* expr,
@@ -381,9 +358,14 @@ class FullCodeGenerator: public AstVisitor {
Label* if_false,
Label* fall_through) {
TestContext context(this, expr, if_true, if_false, fall_through);
- VisitInCurrentContext(expr);
+ Visit(expr);
+ // For test contexts, we prepare for bailout before branching, not at
+ // the end of the entire expression. This happens as part of visiting
+ // the expression.
}
+ void VisitInDuplicateContext(Expression* expr);
+
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
int DeclareGlobalsFlags();
@@ -395,7 +377,9 @@ class FullCodeGenerator: public AstVisitor {
// Platform-specific code for comparing the type of a value with
// a given literal string.
- void EmitLiteralCompareTypeof(Expression* expr, Handle<String> check);
+ void EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check);
// Platform-specific code for equality comparison with a nil-like value.
void EmitLiteralCompareNil(CompareOperation* expr,
@@ -415,7 +399,7 @@ class FullCodeGenerator: public AstVisitor {
// canonical JS true value so we will insert a (dead) test against true at
// the actual bailout target from the optimized code. If not
// should_normalize, the true and false labels are ignored.
- void PrepareForBailoutBeforeSplit(State state,
+ void PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false);
@@ -450,7 +434,7 @@ class FullCodeGenerator: public AstVisitor {
void EmitInlineRuntimeCall(CallRuntime* expr);
#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
- void Emit##name(ZoneList<Expression*>* arguments);
+ void Emit##name(CallRuntime* expr);
INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
@@ -466,13 +450,8 @@ class FullCodeGenerator: public AstVisitor {
Label* done);
void EmitVariableLoad(VariableProxy* proxy);
- enum ResolveEvalFlag {
- SKIP_CONTEXT_LOOKUP,
- PERFORM_CONTEXT_LOOKUP
- };
-
// Expects the arguments and the function already pushed.
- void EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, int arg_count);
+ void EmitResolvePossiblyDirectEval(int arg_count);
// Platform-specific support for allocating a new closure based on
// the given function info.
@@ -539,35 +518,6 @@ class FullCodeGenerator: public AstVisitor {
loop_depth_--;
}
-#if defined(V8_TARGET_ARCH_IA32)
- int stack_height() { return stack_height_; }
- void set_stack_height(int depth) { stack_height_ = depth; }
- void increment_stack_height() { stack_height_++; }
- void increment_stack_height(int delta) { stack_height_ += delta; }
- void decrement_stack_height() {
- if (FLAG_verify_stack_height) {
- ASSERT(stack_height_ > 0);
- }
- stack_height_--;
- }
- void decrement_stack_height(int delta) {
- stack_height_-= delta;
- if (FLAG_verify_stack_height) {
- ASSERT(stack_height_ >= 0);
- }
- }
- // Call this function only if FLAG_verify_stack_height is true.
- void verify_stack_height(); // Generates a runtime check of esp - ebp.
-#else
- int stack_height() { return 0; }
- void set_stack_height(int depth) {}
- void increment_stack_height() {}
- void increment_stack_height(int delta) {}
- void decrement_stack_height() {}
- void decrement_stack_height(int delta) {}
- void verify_stack_height() {}
-#endif // V8_TARGET_ARCH_IA32
-
MacroAssembler* masm() { return masm_; }
class ExpressionContext;
@@ -583,7 +533,6 @@ class FullCodeGenerator: public AstVisitor {
StrictModeFlag strict_mode_flag() {
return function()->strict_mode_flag();
}
- bool is_qml_mode() { return function()->qml_mode(); }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return scope_; }
@@ -612,7 +561,6 @@ class FullCodeGenerator: public AstVisitor {
void VisitComma(BinaryOperation* expr);
void VisitLogicalExpression(BinaryOperation* expr);
void VisitArithmeticExpression(BinaryOperation* expr);
- void VisitInCurrentContext(Expression* expr);
void VisitForTypeofValue(Expression* expr);
@@ -631,10 +579,6 @@ class FullCodeGenerator: public AstVisitor {
virtual ~ExpressionContext() {
codegen_->set_new_context(old_);
- if (FLAG_verify_stack_height) {
- ASSERT_EQ(expected_stack_height_, codegen()->stack_height());
- codegen()->verify_stack_height();
- }
}
Isolate* isolate() const { return codegen_->isolate(); }
@@ -688,7 +632,6 @@ class FullCodeGenerator: public AstVisitor {
FullCodeGenerator* codegen() const { return codegen_; }
MacroAssembler* masm() const { return masm_; }
MacroAssembler* masm_;
- int expected_stack_height_; // The expected stack height esp - ebp on exit.
private:
const ExpressionContext* old_;
@@ -698,9 +641,7 @@ class FullCodeGenerator: public AstVisitor {
class AccumulatorValueContext : public ExpressionContext {
public:
explicit AccumulatorValueContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) {
- expected_stack_height_ = codegen->stack_height();
- }
+ : ExpressionContext(codegen) { }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
@@ -721,9 +662,7 @@ class FullCodeGenerator: public AstVisitor {
class StackValueContext : public ExpressionContext {
public:
explicit StackValueContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) {
- expected_stack_height_ = codegen->stack_height() + 1;
- }
+ : ExpressionContext(codegen) { }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
@@ -752,9 +691,7 @@ class FullCodeGenerator: public AstVisitor {
condition_(condition),
true_label_(true_label),
false_label_(false_label),
- fall_through_(fall_through) {
- expected_stack_height_ = codegen->stack_height();
- }
+ fall_through_(fall_through) { }
static const TestContext* cast(const ExpressionContext* context) {
ASSERT(context->IsTest());
@@ -791,10 +728,7 @@ class FullCodeGenerator: public AstVisitor {
class EffectContext : public ExpressionContext {
public:
explicit EffectContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) {
- expected_stack_height_ = codegen->stack_height();
- }
-
+ : ExpressionContext(codegen) { }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
@@ -818,12 +752,9 @@ class FullCodeGenerator: public AstVisitor {
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
- int stack_height_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BailoutEntry> stack_checks_;
- ForwardBailoutStack* forward_bailout_stack_;
- ForwardBailoutStack* forward_bailout_pending_;
friend class NestedStatement;
diff --git a/src/3rdparty/v8/src/globals.h b/src/3rdparty/v8/src/globals.h
index 26a0e5f..f85f286 100644
--- a/src/3rdparty/v8/src/globals.h
+++ b/src/3rdparty/v8/src/globals.h
@@ -364,11 +364,7 @@ template <typename T, class P = FreeStoreAllocationPolicy> class List;
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
enum StrictModeFlag {
kNonStrictMode,
- kStrictMode,
- // This value is never used, but is needed to prevent GCC 4.5 from failing
- // to compile when we assert that a flag is either kNonStrictMode or
- // kStrictMode.
- kInvalidStrictFlag
+ kStrictMode
};
diff --git a/src/3rdparty/v8/src/handles.cc b/src/3rdparty/v8/src/handles.cc
index 790d224..db8ce4d 100644
--- a/src/3rdparty/v8/src/handles.cc
+++ b/src/3rdparty/v8/src/handles.cc
@@ -269,11 +269,9 @@ Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool skip_fallback_interceptor) {
+ StrictModeFlag strict_mode) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode,
- skip_fallback_interceptor),
+ object->SetProperty(*key, *value, attributes, strict_mode),
Object);
}
@@ -518,8 +516,9 @@ static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
Handle<Object> cache = Utils::OpenHandle(*handle);
JSValue* wrapper = JSValue::cast(*cache);
Foreign* foreign = Script::cast(wrapper->value())->wrapper();
- ASSERT(foreign->address() == reinterpret_cast<Address>(cache.location()));
- foreign->set_address(0);
+ ASSERT(foreign->foreign_address() ==
+ reinterpret_cast<Address>(cache.location()));
+ foreign->set_foreign_address(0);
Isolate* isolate = Isolate::Current();
isolate->global_handles()->Destroy(cache.location());
isolate->counters()->script_wrappers()->Decrement();
@@ -527,10 +526,10 @@ static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
- if (script->wrapper()->address() != NULL) {
+ if (script->wrapper()->foreign_address() != NULL) {
// Return the script wrapper directly from the cache.
return Handle<JSValue>(
- reinterpret_cast<JSValue**>(script->wrapper()->address()));
+ reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
}
Isolate* isolate = Isolate::Current();
// Construct a new script wrapper.
@@ -546,7 +545,8 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
Handle<Object> handle = isolate->global_handles()->Create(*result);
isolate->global_handles()->MakeWeak(handle.location(), NULL,
&ClearWrapperCache);
- script->wrapper()->set_address(reinterpret_cast<Address>(handle.location()));
+ script->wrapper()->set_foreign_address(
+ reinterpret_cast<Address>(handle.location()));
return result;
}
diff --git a/src/3rdparty/v8/src/handles.h b/src/3rdparty/v8/src/handles.h
index c359cb3..06e47fc 100644
--- a/src/3rdparty/v8/src/handles.h
+++ b/src/3rdparty/v8/src/handles.h
@@ -190,8 +190,7 @@ Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool skip_fallback_interceptor = false);
+ StrictModeFlag strict_mode);
Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
diff --git a/src/3rdparty/v8/src/hashmap.cc b/src/3rdparty/v8/src/hashmap.cc
index 1422afd..37748a3 100644
--- a/src/3rdparty/v8/src/hashmap.cc
+++ b/src/3rdparty/v8/src/hashmap.cc
@@ -39,12 +39,6 @@ namespace internal {
Allocator HashMap::DefaultAllocator;
-HashMap::HashMap() {
- allocator_ = NULL;
- match_ = NULL;
-}
-
-
HashMap::HashMap(MatchFun match,
Allocator* allocator,
uint32_t initial_capacity) {
diff --git a/src/3rdparty/v8/src/hashmap.h b/src/3rdparty/v8/src/hashmap.h
index 5c13212..4e6a454 100644
--- a/src/3rdparty/v8/src/hashmap.h
+++ b/src/3rdparty/v8/src/hashmap.h
@@ -50,11 +50,6 @@ class HashMap {
typedef bool (*MatchFun) (void* key1, void* key2);
- // Dummy constructor. This constructor doesn't set up the hash
- // map properly so don't use it unless you have good reason (e.g.,
- // you know that the HashMap will never be used).
- HashMap();
-
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
explicit HashMap(MatchFun match,
diff --git a/src/3rdparty/v8/src/heap-inl.h b/src/3rdparty/v8/src/heap-inl.h
index bca57cb..aaf2927 100644
--- a/src/3rdparty/v8/src/heap-inl.h
+++ b/src/3rdparty/v8/src/heap-inl.h
@@ -105,7 +105,6 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
String* answer = String::cast(result);
answer->set_length(str.length());
answer->set_hash_field(hash_field);
- SeqString::cast(answer)->set_symbol_id(0);
ASSERT_EQ(size, answer->Size());
@@ -139,7 +138,6 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
String* answer = String::cast(result);
answer->set_length(str.length());
answer->set_hash_field(hash_field);
- SeqString::cast(answer)->set_symbol_id(0);
ASSERT_EQ(size, answer->Size());
@@ -222,36 +220,21 @@ MaybeObject* Heap::NumberFromUint32(uint32_t value) {
}
-void Heap::FinalizeExternalString(HeapObject* string) {
- ASSERT(string->IsExternalString() || string->map()->has_external_resource());
-
- if (string->IsExternalString()) {
- v8::String::ExternalStringResourceBase** resource_addr =
- reinterpret_cast<v8::String::ExternalStringResourceBase**>(
- reinterpret_cast<byte*>(string) +
- ExternalString::kResourceOffset -
- kHeapObjectTag);
-
- // Dispose of the C++ object if it has not already been disposed.
- if (*resource_addr != NULL) {
- (*resource_addr)->Dispose();
- }
-
- // Clear the resource pointer in the string.
- *resource_addr = NULL;
- } else {
- JSObject *object = JSObject::cast(string);
- Object *value = object->GetExternalResourceObject();
- v8::Object::ExternalResource *resource = 0;
- if (value->IsSmi()) {
- resource = reinterpret_cast<v8::Object::ExternalResource*>(Internals::GetExternalPointerFromSmi(value));
- } else if (value->IsForeign()) {
- resource = reinterpret_cast<v8::Object::ExternalResource*>(Foreign::cast(value)->address());
- }
- if (resource) {
- resource->Dispose();
- }
+void Heap::FinalizeExternalString(String* string) {
+ ASSERT(string->IsExternalString());
+ v8::String::ExternalStringResourceBase** resource_addr =
+ reinterpret_cast<v8::String::ExternalStringResourceBase**>(
+ reinterpret_cast<byte*>(string) +
+ ExternalString::kResourceOffset -
+ kHeapObjectTag);
+
+ // Dispose of the C++ object if it has not already been disposed.
+ if (*resource_addr != NULL) {
+ (*resource_addr)->Dispose();
}
+
+ // Clear the resource pointer in the string.
+ *resource_addr = NULL;
}
@@ -570,16 +553,6 @@ void ExternalStringTable::AddString(String* string) {
}
-void ExternalStringTable::AddObject(HeapObject* object) {
- ASSERT(object->map()->has_external_resource());
- if (heap_->InNewSpace(object)) {
- new_space_strings_.Add(object);
- } else {
- old_space_strings_.Add(object);
- }
-}
-
-
void ExternalStringTable::Iterate(ObjectVisitor* v) {
if (!new_space_strings_.is_empty()) {
Object** start = &new_space_strings_[0];
@@ -608,14 +581,14 @@ void ExternalStringTable::Verify() {
}
-void ExternalStringTable::AddOldObject(HeapObject* object) {
- ASSERT(object->IsExternalString() || object->map()->has_external_resource());
- ASSERT(!heap_->InNewSpace(object));
- old_space_strings_.Add(object);
+void ExternalStringTable::AddOldString(String* string) {
+ ASSERT(string->IsExternalString());
+ ASSERT(!heap_->InNewSpace(string));
+ old_space_strings_.Add(string);
}
-void ExternalStringTable::ShrinkNewObjects(int position) {
+void ExternalStringTable::ShrinkNewStrings(int position) {
new_space_strings_.Rewind(position);
if (FLAG_verify_heap) {
Verify();
diff --git a/src/3rdparty/v8/src/heap.cc b/src/3rdparty/v8/src/heap.cc
index 881a876..bded80b 100644
--- a/src/3rdparty/v8/src/heap.cc
+++ b/src/3rdparty/v8/src/heap.cc
@@ -1099,18 +1099,18 @@ void Heap::Scavenge() {
}
-HeapObject* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
+String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+ Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
- heap->FinalizeExternalString(HeapObject::cast(*p));
+ heap->FinalizeExternalString(String::cast(*p));
return NULL;
}
// String is still reachable.
- return HeapObject::cast(first_word.ToForwardingAddress());
+ return String::cast(first_word.ToForwardingAddress());
}
@@ -1128,11 +1128,11 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
for (Object** p = start; p < end; ++p) {
ASSERT(InFromSpace(*p));
- HeapObject* target = updater_func(this, p);
+ String* target = updater_func(this, p);
if (target == NULL) continue;
- ASSERT(target->IsExternalString() || target->map()->has_external_resource());
+ ASSERT(target->IsExternalString());
if (InNewSpace(target)) {
// String is still in new space. Update the table entry.
@@ -1140,12 +1140,12 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
++last;
} else {
// String got promoted. Move it to the old string list.
- external_string_table_.AddOldObject(target);
+ external_string_table_.AddOldString(target);
}
}
ASSERT(last <= end);
- external_string_table_.ShrinkNewObjects(static_cast<int>(last - start));
+ external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
}
@@ -2629,12 +2629,10 @@ MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result = Allocate(foreign_map(), space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- Foreign::cast(result)->set_address(address);
+ Foreign* result;
+ MaybeObject* maybe_result = Allocate(foreign_map(), space);
+ if (!maybe_result->To(&result)) return maybe_result;
+ result->set_foreign_address(address);
return result;
}
@@ -4009,7 +4007,6 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
String* answer = String::cast(result);
answer->set_length(chars);
answer->set_hash_field(hash_field);
- SeqString::cast(answer)->set_symbol_id(0);
ASSERT_EQ(size, answer->Size());
@@ -4052,7 +4049,6 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
HeapObject::cast(result)->set_map(ascii_string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
- SeqString::cast(result)->set_symbol_id(0);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
return result;
}
@@ -4088,7 +4084,6 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
HeapObject::cast(result)->set_map(string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
- SeqString::cast(result)->set_symbol_id(0);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
return result;
}
@@ -4355,7 +4350,6 @@ MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
context->set_previous(function->context());
context->set_extension(NULL);
context->set_global(function->context()->global());
- context->set_qml_global(function->context()->qml_global());
return context;
}
@@ -4376,7 +4370,6 @@ MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
context->set_previous(previous);
context->set_extension(name);
context->set_global(previous->global());
- context->set_qml_global(previous->qml_global());
context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
return context;
}
@@ -4395,7 +4388,6 @@ MaybeObject* Heap::AllocateWithContext(JSFunction* function,
context->set_previous(previous);
context->set_extension(extension);
context->set_global(previous->global());
- context->set_qml_global(previous->qml_global());
return context;
}
@@ -4414,7 +4406,6 @@ MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
context->set_previous(previous);
context->set_extension(scope_info);
context->set_global(previous->global());
- context->set_qml_global(previous->qml_global());
return context;
}
@@ -6371,19 +6362,6 @@ void ExternalStringTable::CleanUp() {
void ExternalStringTable::TearDown() {
- for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
- HeapObject *object = HeapObject::cast(new_space_strings_[i]);
- if (!object->IsExternalString())
- heap_->FinalizeExternalString(object);
- }
- for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
- HeapObject *object = HeapObject::cast(old_space_strings_[i]);
- if (!object->IsExternalString())
- heap_->FinalizeExternalString(object);
- }
-
new_space_strings_.Free();
old_space_strings_.Free();
}
diff --git a/src/3rdparty/v8/src/heap.h b/src/3rdparty/v8/src/heap.h
index 6166cde..7c0b0ea 100644
--- a/src/3rdparty/v8/src/heap.h
+++ b/src/3rdparty/v8/src/heap.h
@@ -190,7 +190,6 @@ inline Heap* _inline_get_heap_();
V(string_symbol, "string") \
V(String_symbol, "String") \
V(Date_symbol, "Date") \
- V(Error_symbol, "Error") \
V(this_symbol, "this") \
V(to_string_symbol, "toString") \
V(char_at_symbol, "CharAt") \
@@ -246,8 +245,8 @@ class Isolate;
class WeakObjectRetainer;
-typedef HeapObject* (*ExternalStringTableUpdaterCallback)(Heap* heap,
- Object** pointer);
+typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
+ Object** pointer);
class StoreBufferRebuilder {
public:
@@ -332,14 +331,10 @@ typedef void (*ScavengingCallback)(Map* map,
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
// finalize them.
-// The ExternalStringTable can contain both strings and objects with
-// external resources. It was not renamed to make the patch simpler.
class ExternalStringTable {
public:
// Registers an external string.
inline void AddString(String* string);
- // Registers an external object.
- inline void AddObject(HeapObject* string);
inline void Iterate(ObjectVisitor* v);
@@ -357,10 +352,10 @@ class ExternalStringTable {
inline void Verify();
- inline void AddOldObject(HeapObject* string);
+ inline void AddOldString(String* string);
// Notifies the table that only a prefix of the new list is valid.
- inline void ShrinkNewObjects(int position);
+ inline void ShrinkNewStrings(int position);
// To speed up scavenge collections new space string are kept
// separate from old space strings.
@@ -856,7 +851,7 @@ class Heap {
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
- inline void FinalizeExternalString(HeapObject* string);
+ inline void FinalizeExternalString(String* string);
// Allocates an uninitialized object. The memory is non-executable if the
// hardware and OS allow.
@@ -1661,7 +1656,7 @@ class Heap {
// Performs a minor collection in new generation.
void Scavenge();
- static HeapObject* UpdateNewSpaceReferenceInExternalStringTableEntry(
+ static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
Heap* heap,
Object** pointer);
diff --git a/src/3rdparty/v8/src/hydrogen-instructions.cc b/src/3rdparty/v8/src/hydrogen-instructions.cc
index ac9728c..ae62367 100644
--- a/src/3rdparty/v8/src/hydrogen-instructions.cc
+++ b/src/3rdparty/v8/src/hydrogen-instructions.cc
@@ -126,7 +126,9 @@ void Range::AddConstant(int32_t value) {
bool may_overflow = false; // Overflow is ignored here.
lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
+#ifdef DEBUG
Verify();
+#endif
}
@@ -173,7 +175,9 @@ bool Range::AddAndCheckOverflow(Range* other) {
lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
KeepOrder();
+#ifdef DEBUG
Verify();
+#endif
return may_overflow;
}
@@ -183,7 +187,9 @@ bool Range::SubAndCheckOverflow(Range* other) {
lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
KeepOrder();
+#ifdef DEBUG
Verify();
+#endif
return may_overflow;
}
@@ -197,9 +203,11 @@ void Range::KeepOrder() {
}
+#ifdef DEBUG
void Range::Verify() const {
ASSERT(lower_ <= upper_);
}
+#endif
bool Range::MulAndCheckOverflow(Range* other) {
@@ -210,7 +218,9 @@ bool Range::MulAndCheckOverflow(Range* other) {
int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
lower_ = Min(Min(v1, v2), Min(v3, v4));
upper_ = Max(Max(v1, v2), Max(v3, v4));
+#ifdef DEBUG
Verify();
+#endif
return may_overflow;
}
@@ -234,25 +244,6 @@ const char* HType::ToString() {
}
-const char* HType::ToShortString() {
- switch (type_) {
- case kTagged: return "t";
- case kTaggedPrimitive: return "p";
- case kTaggedNumber: return "n";
- case kSmi: return "m";
- case kHeapNumber: return "h";
- case kString: return "s";
- case kBoolean: return "b";
- case kNonPrimitive: return "r";
- case kJSArray: return "a";
- case kJSObject: return "o";
- case kUninitialized: return "z";
- }
- UNREACHABLE();
- return "Unreachable code";
-}
-
-
HType HType::TypeFromValue(Handle<Object> value) {
HType result = HType::Tagged();
if (value->IsSmi()) {
@@ -564,7 +555,7 @@ void HInstruction::InsertAfter(HInstruction* previous) {
// followed by a simulate instruction, we need to insert after the
// simulate instruction instead.
HInstruction* next = previous->next_;
- if (previous->HasSideEffects() && next != NULL) {
+ if (previous->HasObservableSideEffects() && next != NULL) {
ASSERT(next->IsSimulate());
previous = next;
next = previous->next_;
@@ -604,7 +595,7 @@ void HInstruction::Verify() {
// Verify that instructions that may have side-effects are followed
// by a simulate instruction.
- if (HasSideEffects() && !IsOsrEntry()) {
+ if (HasObservableSideEffects() && !IsOsrEntry()) {
ASSERT(next()->IsSimulate());
}
@@ -655,11 +646,6 @@ void HCallNamed::PrintDataTo(StringStream* stream) {
}
-void HGlobalObject::PrintDataTo(StringStream* stream) {
- stream->Add("qml_global: %s ", qml_global()?"true":"false");
- HUnaryOperation::PrintDataTo(stream);
-}
-
void HCallGlobal::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
HUnaryCall::PrintDataTo(stream);
@@ -1257,28 +1243,17 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) {
}
-Range* HBitAnd::InferRange() {
+Range* HBitwise::InferRange() {
+ if (op() == Token::BIT_XOR) return HValue::InferRange();
int32_t left_mask = (left()->range() != NULL)
? left()->range()->Mask()
: 0xffffffff;
int32_t right_mask = (right()->range() != NULL)
? right()->range()->Mask()
: 0xffffffff;
- int32_t result_mask = left_mask & right_mask;
- return (result_mask >= 0)
- ? new Range(0, result_mask)
- : HValue::InferRange();
-}
-
-
-Range* HBitOr::InferRange() {
- int32_t left_mask = (left()->range() != NULL)
- ? left()->range()->Mask()
- : 0xffffffff;
- int32_t right_mask = (right()->range() != NULL)
- ? right()->range()->Mask()
- : 0xffffffff;
- int32_t result_mask = left_mask | right_mask;
+ int32_t result_mask = (op() == Token::BIT_AND)
+ ? left_mask & right_mask
+ : left_mask | right_mask;
return (result_mask >= 0)
? new Range(0, result_mask)
: HValue::InferRange();
@@ -1790,21 +1765,6 @@ HType HAdd::CalculateInferredType() {
}
-HType HBitAnd::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HBitXor::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HBitOr::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
HType HBitNot::CalculateInferredType() {
return HType::TaggedNumber();
}
@@ -1815,21 +1775,6 @@ HType HUnaryMathOperation::CalculateInferredType() {
}
-HType HShl::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HShr::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HSar::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
HType HStringCharFromCode::CalculateInferredType() {
return HType::String();
}
diff --git a/src/3rdparty/v8/src/hydrogen-instructions.h b/src/3rdparty/v8/src/hydrogen-instructions.h
index ebf0030..101d62a 100644
--- a/src/3rdparty/v8/src/hydrogen-instructions.h
+++ b/src/3rdparty/v8/src/hydrogen-instructions.h
@@ -67,10 +67,8 @@ class LChunkBuilder;
V(ArgumentsLength) \
V(ArgumentsObject) \
V(ArrayLiteral) \
- V(BitAnd) \
+ V(Bitwise) \
V(BitNot) \
- V(BitOr) \
- V(BitXor) \
V(BlockEntry) \
V(BoundsCheck) \
V(Branch) \
@@ -183,6 +181,7 @@ class LChunkBuilder;
V(Calls) \
V(InobjectFields) \
V(BackingStoreFields) \
+ V(ElementsKind) \
V(ArrayElements) \
V(DoubleArrayElements) \
V(SpecializedArrayElements) \
@@ -246,7 +245,9 @@ class Range: public ZoneObject {
return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
}
void KeepOrder();
+#ifdef DEBUG
void Verify() const;
+#endif
void StackUpon(Range* other) {
Intersect(other);
@@ -406,7 +407,6 @@ class HType {
static HType TypeFromValue(Handle<Object> value);
const char* ToString();
- const char* ToShortString();
private:
enum Type {
@@ -621,8 +621,14 @@ class HValue: public ZoneObject {
void SetAllSideEffects() { flags_ |= AllSideEffects(); }
void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
+ bool HasObservableSideEffects() const {
+ return (flags_ & ObservableSideEffects()) != 0;
+ }
int ChangesFlags() const { return flags_ & ChangesFlagsMask(); }
+ int ObservableChangesFlags() const {
+ return flags_ & ChangesFlagsMask() & ObservableSideEffects();
+ }
Range* range() const { return range_; }
bool HasRange() const { return range_ != NULL; }
@@ -702,6 +708,12 @@ class HValue: public ZoneObject {
return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
}
+ // A flag mask of all side effects that can make observable changes in
+ // an executing program (i.e. are not safe to repeat, move or remove);
+ static int ObservableSideEffects() {
+ return ChangesFlagsMask() & ~(1 << kChangesElementsKind);
+ }
+
// Remove the matching use from the use list if present. Returns the
// removed list node or NULL.
HUseListNode* RemoveUse(HValue* value, int index);
@@ -1410,30 +1422,19 @@ class HOuterContext: public HUnaryOperation {
class HGlobalObject: public HUnaryOperation {
public:
- explicit HGlobalObject(HValue* context) : HUnaryOperation(context), qml_global_(false) {
+ explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- virtual void PrintDataTo(StringStream* stream);
-
DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
- bool qml_global() { return qml_global_; }
- void set_qml_global(bool v) { qml_global_ = v; }
-
protected:
- virtual bool DataEquals(HValue* other) {
- HGlobalObject* o = HGlobalObject::cast(other);
- return o->qml_global_ == qml_global_;
- }
-
- private:
- bool qml_global_;
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1612,7 +1613,7 @@ class HCallFunction: public HUnaryCall {
class HCallGlobal: public HUnaryCall {
public:
HCallGlobal(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name), qml_global_(false) {
+ : HUnaryCall(context, argument_count), name_(name) {
}
virtual void PrintDataTo(StringStream* stream);
@@ -1624,14 +1625,10 @@ class HCallGlobal: public HUnaryCall {
return Representation::Tagged();
}
- bool qml_global() { return qml_global_; }
- void set_qml_global(bool v) { qml_global_ = v; }
-
DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
private:
Handle<String> name_;
- bool qml_global_;
};
@@ -1754,7 +1751,7 @@ class HElementsKind: public HUnaryOperation {
explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ SetFlag(kDependsOnElementsKind);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -1881,6 +1878,7 @@ class HLoadElements: public HUnaryOperation {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
+ SetFlag(kDependsOnElementsKind);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -3043,52 +3041,30 @@ class HDiv: public HArithmeticBinaryOperation {
};
-class HBitAnd: public HBitwiseBinaryOperation {
- public:
- HBitAnd(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
-
- virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(BitAnd)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange();
-};
-
-
-class HBitXor: public HBitwiseBinaryOperation {
+class HBitwise: public HBitwiseBinaryOperation {
public:
- HBitXor(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
-
- virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(BitXor)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
+ HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right), op_(op) {
+ ASSERT(op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR);
+ }
-class HBitOr: public HBitwiseBinaryOperation {
- public:
- HBitOr(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
+ Token::Value op() const { return op_; }
virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(BitOr)
+ DECLARE_CONCRETE_INSTRUCTION(Bitwise)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) {
+ return op() == HBitwise::cast(other)->op();
+ }
virtual Range* InferRange();
+
+ private:
+ Token::Value op_;
};
@@ -3098,7 +3074,6 @@ class HShl: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
- virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(Shl)
@@ -3113,7 +3088,6 @@ class HShr: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
- virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(Shr)
@@ -3128,7 +3102,6 @@ class HSar: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
- virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(Sar)
@@ -3930,7 +3903,7 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
transitioned_map_(transitioned_map) {
SetOperandAt(0, object);
SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ SetFlag(kChangesElementsKind);
set_representation(Representation::Tagged());
}
diff --git a/src/3rdparty/v8/src/hydrogen.cc b/src/3rdparty/v8/src/hydrogen.cc
index 0a7fad1..a7a022d 100644
--- a/src/3rdparty/v8/src/hydrogen.cc
+++ b/src/3rdparty/v8/src/hydrogen.cc
@@ -1346,6 +1346,7 @@ class HGlobalValueNumberer BASE_EMBEDDED {
explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
: graph_(graph),
info_(info),
+ removed_side_effects_(false),
block_side_effects_(graph->blocks()->length()),
loop_side_effects_(graph->blocks()->length()),
visited_on_paths_(graph->zone(), graph->blocks()->length()) {
@@ -1357,7 +1358,8 @@ class HGlobalValueNumberer BASE_EMBEDDED {
ASSERT(!info_->isolate()->heap()->allow_allocation(true));
}
- void Analyze();
+ // Returns true if values with side effects are removed.
+ bool Analyze();
private:
int CollectSideEffectsOnPathsToDominatedBlock(HBasicBlock* dominator,
@@ -1377,6 +1379,7 @@ class HGlobalValueNumberer BASE_EMBEDDED {
HGraph* graph_;
CompilationInfo* info_;
+ bool removed_side_effects_;
// A map of block IDs to their side effects.
ZoneList<int> block_side_effects_;
@@ -1390,13 +1393,14 @@ class HGlobalValueNumberer BASE_EMBEDDED {
};
-void HGlobalValueNumberer::Analyze() {
+bool HGlobalValueNumberer::Analyze() {
ComputeBlockSideEffects();
if (FLAG_loop_invariant_code_motion) {
LoopInvariantCodeMotion();
}
HValueMap* map = new(zone()) HValueMap();
AnalyzeBlock(graph_->entry_block(), map);
+ return removed_side_effects_;
}
@@ -1530,11 +1534,12 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
HInstruction* next = instr->next();
int flags = instr->ChangesFlags();
if (flags != 0) {
- ASSERT(!instr->CheckFlag(HValue::kUseGVN));
// Clear all instructions in the map that are affected by side effects.
map->Kill(flags);
TraceGVN("Instruction %d kills\n", instr->id());
- } else if (instr->CheckFlag(HValue::kUseGVN)) {
+ }
+ if (instr->CheckFlag(HValue::kUseGVN)) {
+ ASSERT(!instr->HasObservableSideEffects());
HValue* other = map->Lookup(instr);
if (other != NULL) {
ASSERT(instr->Equals(other) && other->Equals(instr));
@@ -1543,6 +1548,7 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
instr->Mnemonic(),
other->id(),
other->Mnemonic());
+ if (instr->HasSideEffects()) removed_side_effects_ = true;
instr->DeleteAndReplaceWith(other);
} else {
map->Add(instr);
@@ -2108,12 +2114,12 @@ void TestContext::ReturnValue(HValue* value) {
void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
- if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
}
void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
- ASSERT(!instr->HasSideEffects());
+ ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
@@ -2131,12 +2137,12 @@ void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
}
owner()->AddInstruction(instr);
owner()->Push(instr);
- if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
}
void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
- ASSERT(!instr->HasSideEffects());
+ ASSERT(!instr->HasObservableSideEffects());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
return owner()->Bailout("bad value context for arguments object value");
}
@@ -2161,7 +2167,7 @@ void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
builder->AddInstruction(instr);
// We expect a simulate after every expression with side effects, though
// this one isn't actually needed (and wouldn't work if it were targeted).
- if (instr->HasSideEffects()) {
+ if (instr->HasObservableSideEffects()) {
builder->Push(instr);
builder->AddSimulate(ast_id);
builder->Pop();
@@ -2171,7 +2177,7 @@ void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
- ASSERT(!instr->HasSideEffects());
+ ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
@@ -2373,7 +2379,13 @@ HGraph* HGraphBuilder::CreateGraph() {
if (FLAG_use_gvn) {
HPhase phase("Global value numbering", graph());
HGlobalValueNumberer gvn(graph(), info());
- gvn.Analyze();
+ bool removed_side_effects = gvn.Analyze();
+ // Trigger a second analysis pass to further eliminate duplicate values that
+ // could only be discovered by removing side-effect-generating instructions
+ // during the first pass.
+ if (FLAG_smi_only_arrays && removed_side_effects) {
+ gvn.Analyze();
+ }
}
if (FLAG_use_range) {
@@ -3185,7 +3197,6 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- if (variable->is_qml_global()) global_object->set_qml_global(true);
AddInstruction(global_object);
HLoadGlobalGeneric* instr =
new(zone()) HLoadGlobalGeneric(context,
@@ -3547,7 +3558,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
// The HSimulate for the store should not see the stored value in
// effect contexts (it is not materialized at expr->id() in the
// unoptimized code).
- if (instr->HasSideEffects()) {
+ if (instr->HasObservableSideEffects()) {
if (ast_context()->IsEffect()) {
AddSimulate(expr->id());
} else {
@@ -3620,7 +3631,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
}
@@ -3641,11 +3652,10 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
instr->set_position(position);
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- if (var->is_qml_global()) global_object->set_qml_global(true);
AddInstruction(global_object);
HStoreGlobalGeneric* instr =
new(zone()) HStoreGlobalGeneric(context,
@@ -3655,8 +3665,8 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
function_strict_mode_flag());
instr->set_position(position);
AddInstruction(instr);
- ASSERT(instr->HasSideEffects());
- if (instr->HasSideEffects()) AddSimulate(ast_id);
+ ASSERT(instr->HasObservableSideEffects());
+ if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
}
}
@@ -3713,7 +3723,9 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), Top());
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId());
+ }
break;
}
@@ -3739,7 +3751,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
load = BuildLoadNamedGeneric(obj, prop);
}
PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
+ if (load->HasObservableSideEffects()) AddSimulate(expr->CompoundLoadId());
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
@@ -3747,14 +3759,14 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasSideEffects()) AddSimulate(operation->id());
+ if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
HInstruction* store = BuildStoreNamed(obj, instr, prop);
AddInstruction(store);
// Drop the simulated receiver and value. Return the value.
Drop(2);
Push(instr);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
} else {
@@ -3779,7 +3791,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasSideEffects()) AddSimulate(operation->id());
+ if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
expr->RecordTypeFeedback(oracle());
HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -3877,7 +3889,9 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), Top());
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId());
+ }
return ast_context()->ReturnValue(Pop());
}
@@ -4150,7 +4164,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
if (num_untransitionable_maps == 1) {
HInstruction* instr = AddInstruction(BuildMonomorphicElementAccess(
object, key, val, untransitionable_map, is_store));
- *has_side_effects |= instr->HasSideEffects();
+ *has_side_effects |= instr->HasObservableSideEffects();
instr->set_position(position);
return is_store ? NULL : instr;
}
@@ -4237,7 +4251,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
Push(access);
}
- *has_side_effects |= access->HasSideEffects();
+ *has_side_effects |= access->HasObservableSideEffects();
if (position != -1) {
access->set_position(position);
}
@@ -4258,7 +4272,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
access = AddInstruction(BuildExternalArrayElementAccess(
external_elements, checked_key, val, elements_kind, is_store));
}
- *has_side_effects |= access->HasSideEffects();
+ *has_side_effects |= access->HasObservableSideEffects();
access->set_position(position);
if (!is_store) {
Push(access);
@@ -4303,7 +4317,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
}
instr->set_position(position);
AddInstruction(instr);
- *has_side_effects = instr->HasSideEffects();
+ *has_side_effects = instr->HasObservableSideEffects();
return instr;
}
@@ -4370,7 +4384,7 @@ void HGraphBuilder::VisitProperty(Property* expr) {
CHECK_ALIVE(VisitForValue(expr->obj()));
HInstruction* instr = NULL;
- if (expr->IsArrayLength()) {
+ if (expr->AsProperty()->IsArrayLength()) {
HValue* array = Pop();
AddInstruction(new(zone()) HCheckNonSmi(array));
HInstruction* mapcheck =
@@ -4902,7 +4916,7 @@ bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
AddInstruction(square_root);
// MathPowHalf doesn't have side effects so there's no need for
// an environment simulation here.
- ASSERT(!square_root->HasSideEffects());
+ ASSERT(!square_root->HasObservableSideEffects());
result = new(zone()) HDiv(context, double_one, square_root);
} else if (exponent == 2.0) {
result = new(zone()) HMul(context, left, left);
@@ -5035,7 +5049,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
return;
}
- if (CallStubCompiler::HasCustomCallGenerator(*expr->target()) ||
+ if (CallStubCompiler::HasCustomCallGenerator(expr->target()) ||
expr->check_type() != RECEIVER_MAP_CHECK) {
// When the target has a custom call IC generator, use the IC,
// because it is likely to generate better code. Also use the IC
@@ -5108,13 +5122,11 @@ void HGraphBuilder::VisitCall(Call* expr) {
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* receiver = new(zone()) HGlobalObject(context);
- if (var->is_qml_global()) receiver->set_qml_global(true);
AddInstruction(receiver);
PushAndAdd(new(zone()) HPushArgument(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
call = new(zone()) HCallGlobal(context, var->name(), argument_count);
- if (var->is_qml_global()) static_cast<HCallGlobal*>(call)->set_qml_global(true);
Drop(argument_count);
}
@@ -5367,7 +5379,7 @@ void HGraphBuilder::VisitNot(UnaryOperation* expr) {
materialize_true));
if (materialize_false->HasPredecessor()) {
- materialize_false->SetJoinId(expr->expression()->id());
+ materialize_false->SetJoinId(expr->MaterializeFalseId());
set_current_block(materialize_false);
Push(graph()->GetConstantFalse());
} else {
@@ -5375,7 +5387,7 @@ void HGraphBuilder::VisitNot(UnaryOperation* expr) {
}
if (materialize_true->HasPredecessor()) {
- materialize_true->SetJoinId(expr->expression()->id());
+ materialize_true->SetJoinId(expr->MaterializeTrueId());
set_current_block(materialize_true);
Push(graph()->GetConstantTrue());
} else {
@@ -5488,7 +5500,9 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), after);
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId());
+ }
break;
}
@@ -5517,7 +5531,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
load = BuildLoadNamedGeneric(obj, prop);
}
PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(expr->CountId());
+ if (load->HasObservableSideEffects()) AddSimulate(expr->CountId());
after = BuildIncrement(returns_original_input, expr);
input = Pop();
@@ -5530,7 +5544,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
// necessary.
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
} else {
// Keyed property.
@@ -5623,13 +5637,9 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
instr = new(zone()) HDiv(context, left, right);
break;
case Token::BIT_XOR:
- instr = new(zone()) HBitXor(context, left, right);
- break;
case Token::BIT_AND:
- instr = new(zone()) HBitAnd(context, left, right);
- break;
case Token::BIT_OR:
- instr = new(zone()) HBitOr(context, left, right);
+ instr = new(zone()) HBitwise(expr->op(), context, left, right);
break;
case Token::SAR:
instr = new(zone()) HSar(context, left, right);
@@ -6084,7 +6094,7 @@ void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
HStoreContextSlot* store =
new HStoreContextSlot(context, var->index(), value);
AddInstruction(store);
- if (store->HasSideEffects()) AddSimulate(proxy->id());
+ if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
} else {
environment()->Bind(var, value);
}
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.cc b/src/3rdparty/v8/src/ia32/assembler-ia32.cc
index 66a9884..c6030f9 100644
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/assembler-ia32.cc
@@ -628,26 +628,6 @@ void Assembler::movzx_w(Register dst, const Operand& src) {
}
-void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
- EnsureSpace ensure_space(this);
- UNIMPLEMENTED();
- USE(cc);
- USE(dst);
- USE(imm32);
-}
-
-
-void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
- EnsureSpace ensure_space(this);
- UNIMPLEMENTED();
- USE(cc);
- USE(dst);
- USE(handle);
-}
-
-
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.h b/src/3rdparty/v8/src/ia32/assembler-ia32.h
index 4dfde5f..fbd04bb 100644
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.h
+++ b/src/3rdparty/v8/src/ia32/assembler-ia32.h
@@ -713,8 +713,6 @@ class Assembler : public AssemblerBase {
void movzx_w(Register dst, const Operand& src);
// Conditional moves
- void cmov(Condition cc, Register dst, int32_t imm32);
- void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, Register src) {
cmov(cc, dst, Operand(src));
}
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc b/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
index e73753e..50cddca 100644
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
@@ -144,11 +144,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
- // Copy the qml global object from the previous context.
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::QML_GLOBAL_INDEX)), ebx);
-
-
// Initialize the rest of the slots to undefined.
__ mov(ebx, factory->undefined_value());
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
@@ -4020,39 +4015,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
- {
- Label not_user_equal, user_equal;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &not_user_equal);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &not_user_equal);
-
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ebx);
- __ j(not_equal, &not_user_equal);
-
- __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &not_user_equal);
-
- __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &user_equal);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &user_equal);
-
- __ jmp(&not_user_equal);
-
- __ bind(&user_equal);
-
- __ pop(ebx); // Return address.
- __ push(eax);
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- __ bind(&not_user_equal);
- }
-
// Identical objects can be compared fast, but there are some tricky cases
// for NaN and undefined.
{
@@ -5227,7 +5189,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
@@ -5312,7 +5275,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -5339,7 +5303,8 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -6530,14 +6495,8 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
__ j(not_equal, &miss, Label::kNear);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &miss, Label::kNear);
__ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
__ j(not_equal, &miss, Label::kNear);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
__ sub(eax, edx);
@@ -6643,69 +6602,6 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- String* name,
- Register r0) {
- ASSERT(name->IsSymbol());
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r0;
- // Capacity is smi 2^n.
- __ mov(index, FieldOperand(properties, kCapacityOffset));
- __ dec(index);
- __ and_(index,
- Immediate(Smi::FromInt(name->Hash() +
- StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
- Register entity_name = r0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
- __ j(equal, done);
-
- // Stop if found the property.
- __ cmp(entity_name, Handle<String>(name));
- __ j(equal, miss);
-
- // Check if the entry name is not a symbol.
- __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- kIsSymbolMask);
- __ j(zero, miss);
- }
-
- StringDictionaryLookupStub stub(properties,
- r0,
- r0,
- StringDictionaryLookupStub::NEGATIVE_LOOKUP);
- __ push(Immediate(Handle<Object>(name)));
- __ push(Immediate(name->Hash()));
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
- __ test(r0, r0);
- __ j(not_zero, miss);
- __ jmp(done);
- return result;
-}
-
-
// Probe the string dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found leaving the
// index into the dictionary in |r0|. Jump to the |miss| label
@@ -6891,6 +6787,8 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// ElementsTransitionGenerator::GenerateDoubleToObject
{ eax, edx, esi, EMIT_REMEMBERED_SET},
{ edx, eax, edi, EMIT_REMEMBERED_SET},
+ // StoreArrayLiteralElementStub::Generate
+ { ebx, eax, ecx, EMIT_REMEMBERED_SET},
// Null termination.
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
};
@@ -7133,6 +7031,93 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : element value to store
+ // -- ebx : array literal
+ // -- edi : map of array literal
+ // -- ecx : element index as smi
+ // -- edx : array literal index in function
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label slow_elements_from_double;
+ Label fast_elements;
+
+ if (!FLAG_trace_elements_transitions) {
+ __ CheckFastElements(edi, &double_elements);
+
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(eax, &smi_element);
+ __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+ }
+
+ __ bind(&slow_elements);
+ __ pop(edi); // Pop return address and remember to put back later for tail
+ // call.
+ __ push(ebx);
+ __ push(ecx);
+ __ push(eax);
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(edx);
+ __ push(edi); // Return return address so that tail call returns to right
+ // place.
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ if (!FLAG_trace_elements_transitions) {
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+
+ __ push(edx);
+ __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(eax,
+ edx,
+ ecx,
+ edi,
+ xmm0,
+ &slow_elements_from_double,
+ false);
+ __ pop(edx);
+ __ jmp(&element_done);
+
+ __ bind(&slow_elements_from_double);
+ __ pop(edx);
+ __ jmp(&slow_elements);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
+ FixedArrayBase::kHeaderSize));
+ __ mov(Operand(ecx, 0), eax);
+ // Update the write barrier for the array store.
+ __ RecordWrite(ebx, ecx, eax,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&element_done);
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
+ FixedArrayBase::kHeaderSize), eax);
+ // Fall through
+ __ bind(&element_done);
+ __ ret(0);
+ }
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.h b/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
index 8775344..692cbcf 100644
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
+++ b/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
@@ -428,16 +428,6 @@ class StringDictionaryLookupStub: public CodeStub {
Handle<String> name,
Register r0);
- // TODO(kmillikin): Eliminate this function when the stub cache is fully
- // handlified.
- MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- String* name,
- Register r0);
-
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
diff --git a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
index 9cee4a3..0af30f0 100644
--- a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
@@ -44,11 +44,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-static unsigned GetPropertyId(Property* property) {
- return property->id();
-}
-
-
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -169,22 +164,16 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
}
- set_stack_height(2 + scope()->num_stack_slots());
- if (FLAG_verify_stack_height) {
- verify_stack_height();
- }
-
bool function_in_register = true;
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi.
__ push(edi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -373,15 +362,6 @@ void FullCodeGenerator::EmitReturnSequence() {
}
-void FullCodeGenerator::verify_stack_height() {
- ASSERT(FLAG_verify_stack_height);
- __ sub(ebp, Immediate(kPointerSize * stack_height()));
- __ cmp(ebp, esp);
- __ Assert(equal, "Full codegen stack height not as expected.");
- __ add(ebp, Immediate(kPointerSize * stack_height()));
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
}
@@ -398,14 +378,13 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
MemOperand operand = codegen()->VarOperand(var, result_register());
// Memory operands can be pushed directly.
__ push(operand);
- codegen()->increment_stack_height();
}
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -452,12 +431,11 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
} else {
__ push(Immediate(lit));
}
- codegen()->increment_stack_height();
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -490,7 +468,6 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count,
Register reg) const {
ASSERT(count > 0);
__ Drop(count);
- codegen()->decrement_stack_height(count);
}
@@ -500,7 +477,6 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
ASSERT(count > 0);
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->decrement_stack_height(count);
}
@@ -509,7 +485,6 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
ASSERT(count > 0);
if (count > 1) __ Drop(count - 1);
__ mov(Operand(esp, 0), reg);
- codegen()->decrement_stack_height(count - 1);
}
@@ -519,9 +494,8 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
- codegen()->decrement_stack_height(count);
}
@@ -555,7 +529,6 @@ void FullCodeGenerator::StackValueContext::Plug(
__ bind(materialize_false);
__ push(Immediate(isolate()->factory()->false_value()));
__ bind(&done);
- codegen()->increment_stack_height();
}
@@ -583,12 +556,11 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
? isolate()->factory()->true_value()
: isolate()->factory()->false_value();
__ push(Immediate(value));
- codegen()->increment_stack_height();
}
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -681,7 +653,7 @@ void FullCodeGenerator::SetVar(Variable* var,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
@@ -692,13 +664,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
+ PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, NULL);
@@ -785,18 +751,14 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
- increment_stack_height(3);
if (function != NULL) {
VisitForStackValue(function);
} else if (binding_needs_init) {
__ push(Immediate(isolate()->factory()->the_hole_value()));
- increment_stack_height();
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
- increment_stack_height();
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
- decrement_stack_height(4);
break;
}
}
@@ -821,7 +783,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
- int switch_clause_stack_height = stack_height();
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -886,7 +847,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ jmp(default_clause->body_target());
}
- set_stack_height(switch_clause_stack_height);
// Compile all the case bodies.
for (int i = 0; i < clauses->length(); i++) {
Comment cmnt(masm_, "[ Case body");
@@ -928,7 +888,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
__ push(eax);
- increment_stack_height();
// Check for proxies.
Label call_runtime;
@@ -1026,8 +985,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Fixed array length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
- // 1 ~ The object has already been pushed.
- increment_stack_height(ForIn::kElementCount - 1);
// Generate code for doing the condition check.
__ bind(&loop);
__ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
@@ -1089,7 +1046,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.break_label());
__ add(esp, Immediate(5 * kPointerSize));
- decrement_stack_height(ForIn::kElementCount);
// Exit and decrement the loop depth.
__ bind(&exit);
decrement_loop_depth();
@@ -1180,10 +1136,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ mov(eax, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF || var->is_qml_global())
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
__ call(ic, mode);
@@ -1264,10 +1220,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object in eax.
- __ mov(eax, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ call(ic, var->is_qml_global()?RelocInfo::CODE_TARGET:RelocInfo::CODE_TARGET_CONTEXT);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(eax);
break;
}
@@ -1411,7 +1367,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (!result_saved) {
__ push(eax); // Save result on the stack
result_saved = true;
- increment_stack_height();
}
switch (property->kind()) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1436,7 +1391,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Fall through.
case ObjectLiteral::Property::PROTOTYPE:
__ push(Operand(esp, 0)); // Duplicate receiver.
- increment_stack_height();
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1445,20 +1399,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
__ Drop(3);
}
- decrement_stack_height(3);
break;
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
__ push(Operand(esp, 0)); // Duplicate receiver.
- increment_stack_height();
VisitForStackValue(key);
__ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
- increment_stack_height();
VisitForStackValue(value);
__ CallRuntime(Runtime::kDefineAccessor, 4);
- decrement_stack_height(4);
break;
default: UNREACHABLE();
}
@@ -1533,67 +1483,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(eax);
result_saved = true;
- increment_stack_height();
}
VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
__ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
- __ CheckFastElements(edi, &double_elements);
-
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
- __ JumpIfSmi(result_register(), &smi_element);
- __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- __ push(Operand(esp, 0)); // Copy of array literal.
- __ push(Immediate(Smi::FromInt(i)));
- __ push(result_register());
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(Immediate(Smi::FromInt(strict_mode_flag()))); // Strict mode.
- __ CallRuntime(Runtime::kSetProperty, 5);
- __ jmp(&element_done);
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
__ mov(ecx, Immediate(Smi::FromInt(i)));
- __ StoreNumberToDoubleElements(result_register(),
- ebx,
- ecx,
- edx,
- xmm0,
- &slow_elements,
- false);
- __ jmp(&element_done);
-
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ mov(FieldOperand(ebx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(ebx, offset, result_register(), ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&element_done);
-
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
- __ bind(&smi_element);
- __ mov(FieldOperand(ebx, offset), result_register());
- // Fall through
-
- __ bind(&element_done);
+ __ mov(edx, Immediate(Smi::FromInt(expr->literal_index())));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1611,9 +1510,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// on the left-hand side.
if (!expr->target()->IsValidLeftHandSide()) {
- ASSERT(expr->target()->AsThrow() != NULL);
- VisitInCurrentContext(expr->target()); // Throw does not plug the context
- context()->Plug(eax);
+ VisitForEffect(expr->target());
return;
}
@@ -1638,7 +1535,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// We need the receiver both on the stack and in the accumulator.
VisitForAccumulatorValue(property->obj());
__ push(result_register());
- increment_stack_height();
} else {
VisitForStackValue(property->obj());
}
@@ -1649,7 +1545,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(property->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
- increment_stack_height();
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1681,7 +1576,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Token::Value op = expr->binary_op();
__ push(eax); // Left operand goes on the stack.
- increment_stack_height();
VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1731,14 +1625,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, prop->id());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1751,7 +1645,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// stack. Right operand is in eax.
Label smi_case, done, stub_call;
__ pop(edx);
- decrement_stack_height();
__ mov(ecx, eax);
__ or_(eax, edx);
JumpPatchSite patch_site(masm_);
@@ -1843,7 +1736,6 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(edx);
- decrement_stack_height();
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
@@ -1856,9 +1748,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
- ASSERT(expr->AsThrow() != NULL);
- VisitInCurrentContext(expr); // Throw does not plug the context
- context()->Plug(eax);
+ VisitForEffect(expr);
return;
}
@@ -1882,11 +1772,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
case NAMED_PROPERTY: {
__ push(eax); // Preserve value.
- increment_stack_height();
VisitForAccumulatorValue(prop->obj());
__ mov(edx, eax);
__ pop(eax); // Restore value.
- decrement_stack_height();
__ mov(ecx, prop->key()->AsLiteral()->handle());
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
@@ -1896,14 +1784,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
case KEYED_PROPERTY: {
__ push(eax); // Preserve value.
- increment_stack_height();
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax);
__ pop(edx);
- decrement_stack_height();
__ pop(eax); // Restore value.
- decrement_stack_height();
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
@@ -1921,7 +1806,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(ecx, var->name());
- __ mov(edx, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ mov(edx, GlobalObjectOperand());
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
@@ -2030,7 +1915,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(edx, Operand(esp, 0));
} else {
__ pop(edx);
- decrement_stack_height();
}
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
@@ -2044,7 +1928,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
__ Drop(1);
- decrement_stack_height();
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -2066,12 +1949,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
__ pop(ecx);
- decrement_stack_height();
if (expr->ends_initialization_block()) {
__ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
} else {
__ pop(edx);
- decrement_stack_height();
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
@@ -2087,7 +1968,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ push(edx);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
- decrement_stack_height();
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2107,7 +1987,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ pop(edx);
- decrement_stack_height();
EmitKeyedPropertyLoad(expr);
context()->Plug(eax);
}
@@ -2134,7 +2013,6 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- decrement_stack_height(arg_count + 1);
context()->Plug(eax);
}
@@ -2149,7 +2027,6 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
__ pop(ecx);
__ push(eax);
__ push(ecx);
- increment_stack_height();
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2168,7 +2045,6 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- decrement_stack_height(arg_count + 1);
context()->DropAndPlug(1, eax); // Drop the key still on the stack.
}
@@ -2212,14 +2088,11 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- decrement_stack_height(arg_count + 1);
context()->DropAndPlug(1, eax);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ push(Operand(esp, arg_count * kPointerSize));
@@ -2236,12 +2109,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
__ push(Immediate(Smi::FromInt(strict_mode)));
- // Push the qml mode flag
- __ push(Immediate(Smi::FromInt(is_qml_mode())));
-
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
}
@@ -2267,33 +2135,15 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(callee);
// Reserved receiver slot.
__ push(Immediate(isolate()->factory()->undefined_value()));
- increment_stack_height();
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly in
- // generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
- // Push the function and resolve eval.
- __ push(eax);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
// Push a copy of the function (found below the arguments) and
// resolve eval.
__ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- __ bind(&done);
+ EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in eax (function) and
// edx (receiver). Touch up the stack with the right values.
@@ -2307,14 +2157,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- decrement_stack_height(arg_count + 1); // Function is left on the stack.
context()->DropAndPlug(1, eax);
} else if (proxy != NULL && proxy->var()->IsUnallocated()) {
// Push global object as receiver for the call IC.
- __ push(proxy->var()->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
- increment_stack_height();
- EmitCallWithIC(expr, proxy->name(), proxy->var()->is_qml_global()?RelocInfo::CODE_TARGET:RelocInfo::CODE_TARGET_CONTEXT);
+ __ push(GlobalObjectOperand());
+ EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
// Call to a lookup slot (dynamically introduced variable).
@@ -2332,7 +2180,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ CallRuntime(Runtime::kLoadContextSlot, 2);
__ push(eax); // Function.
__ push(edx); // Receiver.
- increment_stack_height(2);
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2340,8 +2187,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Label call;
__ jmp(&call, Label::kNear);
__ bind(&done);
- // Push function. Stack height already incremented in slow case
- // above.
+ // Push function.
__ push(eax);
// The receiver is implicitly the global receiver. Indicate this by
// passing the hole to the call function stub.
@@ -2374,7 +2220,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Load global receiver object.
__ mov(ebx, GlobalObjectOperand());
__ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
- increment_stack_height();
// Emit function call.
EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
}
@@ -2415,13 +2260,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Handle<Code> construct_builtin =
isolate()->builtins()->JSConstructCall();
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- decrement_stack_height(arg_count + 1);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2433,7 +2277,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ test(eax, Immediate(kSmiTagMask));
Split(zero, if_true, if_false, fall_through);
@@ -2441,7 +2285,8 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2453,7 +2298,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ test(eax, Immediate(kSmiTagMask | 0x80000000));
Split(zero, if_true, if_false, fall_through);
@@ -2461,7 +2306,8 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2485,14 +2331,15 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(below, if_false);
__ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2506,14 +2353,15 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2529,7 +2377,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(not_zero, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2537,7 +2385,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2614,12 +2463,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ jmp(if_true);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2633,14 +2483,15 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2654,14 +2505,15 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2675,7 +2527,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2683,8 +2535,8 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2707,14 +2559,15 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2729,16 +2582,16 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
&if_true, &if_false, &fall_through);
__ pop(ebx);
- decrement_stack_height();
__ cmp(eax, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in edx and the formal
@@ -2752,8 +2605,8 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
@@ -2775,7 +2628,8 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
@@ -2835,7 +2689,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
@@ -2843,12 +2697,12 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
- decrement_stack_height(2);
}
// Finally, we're expected to leave a value on the top of the stack.
__ mov(eax, isolate()->factory()->undefined_value());
@@ -2856,8 +2710,8 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
@@ -2907,34 +2761,35 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallStub(&stub);
- decrement_stack_height(3);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
VisitForStackValue(args->at(3));
__ CallStub(&stub);
- decrement_stack_height(4);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -2952,8 +2807,9 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2964,18 +2820,17 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
}
- decrement_stack_height(2);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(ebx); // eax = value. ebx = object.
- decrement_stack_height();
Label done;
// If the object is a smi, return the value.
@@ -2998,7 +2853,8 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
@@ -3006,12 +2862,12 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
NumberToStringStub stub;
__ CallStub(&stub);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3029,7 +2885,8 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -3041,7 +2898,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
Register result = edx;
__ pop(object);
- decrement_stack_height();
Label need_conversion;
Label index_out_of_range;
@@ -3077,7 +2933,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -3090,7 +2947,6 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
Register result = eax;
__ pop(object);
- decrement_stack_height();
Label need_conversion;
Label index_out_of_range;
@@ -3127,7 +2983,8 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -3135,12 +2992,12 @@ void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
StringAddStub stub(NO_STRING_ADD_FLAGS);
__ CallStub(&stub);
- decrement_stack_height(2);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -3148,58 +3005,58 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
StringCompareStub stub;
__ CallStub(&stub);
- decrement_stack_height(2);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
@@ -3214,25 +3071,25 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
__ InvokeFunction(edi, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- decrement_stack_height(arg_count + 1);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallStub(&stub);
- decrement_stack_height(3);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3320,12 +3177,12 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ CallRuntime(Runtime::kSwapElements, 3);
__ bind(&done);
- decrement_stack_height(3);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3373,7 +3230,8 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
Register right = eax;
@@ -3406,12 +3264,12 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ mov(eax, Immediate(isolate()->factory()->true_value()));
__ bind(&done);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3429,14 +3287,15 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ test(FieldOperand(eax, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(zero, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3451,11 +3310,12 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// We will leave the separator on the stack until the end of the function.
VisitForStackValue(args->at(1));
@@ -3710,7 +3570,6 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ add(esp, Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- decrement_stack_height();
context()->Plug(eax);
}
@@ -3730,7 +3589,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Prepare for calling JS runtime function.
__ mov(eax, GlobalObjectOperand());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
- increment_stack_height();
}
// Push the arguments ("left-to-right").
@@ -3752,11 +3610,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
}
- decrement_stack_height(arg_count);
- if (expr->is_jsruntime()) {
- decrement_stack_height();
- }
-
context()->Plug(eax);
}
@@ -3773,7 +3626,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- decrement_stack_height(2);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -3781,7 +3633,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// but "delete this" is allowed.
ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->IsUnallocated()) {
- __ push(var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
__ push(Immediate(Smi::FromInt(kNonStrictMode)));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
@@ -3821,18 +3673,41 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
} else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ mov(eax, isolate()->factory()->true_value());
+ } else {
+ __ push(isolate()->factory()->true_value());
+ }
+ __ jmp(&done, Label::kNear);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ mov(eax, isolate()->factory()->false_value());
+ } else {
+ __ push(isolate()->factory()->false_value());
+ }
+ __ bind(&done);
}
break;
}
@@ -3843,7 +3718,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ CallRuntime(Runtime::kTypeof, 1);
- decrement_stack_height();
context()->Plug(eax);
break;
}
@@ -3897,10 +3771,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// as the left-hand side.
if (!expr->expression()->IsValidLeftHandSide()) {
- ASSERT(expr->expression()->AsThrow() != NULL);
- VisitInCurrentContext(expr->expression());
- // Visiting Throw does not plug the context.
- context()->Plug(eax);
+ VisitForEffect(expr->expression());
return;
}
@@ -3925,20 +3796,17 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
__ push(Immediate(Smi::FromInt(0)));
- increment_stack_height();
}
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the accumulator.
VisitForAccumulatorValue(prop->obj());
__ push(eax);
- increment_stack_height();
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
- increment_stack_height();
EmitKeyedPropertyLoad(prop);
}
}
@@ -3969,7 +3837,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
switch (assign_type) {
case VARIABLE:
__ push(eax);
- increment_stack_height();
break;
case NAMED_PROPERTY:
__ mov(Operand(esp, kPointerSize), eax);
@@ -4043,7 +3910,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->handle());
__ pop(edx);
- decrement_stack_height();
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
@@ -4061,8 +3927,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(ecx);
__ pop(edx);
- decrement_stack_height();
- decrement_stack_height();
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
@@ -4089,7 +3953,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
- __ mov(eax, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ mov(eax, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
@@ -4114,12 +3978,13 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(eax);
} else {
// This expression cannot throw a reference error at the top level.
- VisitInCurrentContext(expr);
+ VisitInDuplicateContext(expr);
}
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
Handle<String> check) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -4129,9 +3994,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
&if_true, &if_false, &fall_through);
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(expr);
+ VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(eax, if_true);
@@ -4215,8 +4080,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- decrement_stack_height(2);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
break;
@@ -4225,8 +4089,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
- decrement_stack_height(2);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ test(eax, eax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
@@ -4259,7 +4122,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
__ pop(edx);
- decrement_stack_height();
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4279,7 +4141,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ test(eax, eax);
Split(cc, if_true, if_false, fall_through);
}
@@ -4302,7 +4164,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Handle<Object> nil_value = nil == kNullValue ?
isolate()->factory()->null_value() :
isolate()->factory()->undefined_value();
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
index b381227..d4cbbce 100644
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
@@ -211,13 +211,12 @@ bool LCodeGen::GeneratePrologue() {
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in edi.
__ push(edi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -2662,7 +2661,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ mov(result, Operand(context, Context::SlotOffset(instr->qml_global()?Context::QML_GLOBAL_INDEX:Context::GLOBAL_INDEX)));
+ __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
}
@@ -3132,7 +3131,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- RelocInfo::Mode mode = instr->qml_global()?RelocInfo::CODE_TARGET:RelocInfo::CODE_TARGET_CONTEXT;
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(ecx, instr->name());
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-ia32.cc
index d09d55f..227d0b5 100644
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/lithium-ia32.cc
@@ -110,22 +110,17 @@ void LInstruction::PrintTo(StringStream* stream) {
}
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- for (int i = 0; i < inputs_.length(); i++) {
+ for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- inputs_[i]->PrintTo(stream);
+ InputAt(i)->PrintTo(stream);
}
}
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- for (int i = 0; i < results_.length(); i++) {
- if (i > 0) stream->Add(" ");
- results_[i]->PrintTo(stream);
- }
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
}
@@ -459,7 +454,7 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new LInstructionGap(block);
+ LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap);
@@ -534,7 +529,7 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new LChunk(info(), graph());
+ chunk_ = new(zone()) LChunk(info(), graph());
HPhase phase("Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -570,14 +565,14 @@ LRegister* LChunkBuilder::ToOperand(Register reg) {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
}
LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ XMMRegister::ToAllocationIndex(reg));
}
@@ -592,30 +587,30 @@ LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
return Use(value,
- new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
}
LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
}
LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
}
LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
}
@@ -650,7 +645,7 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new LUnallocated(LUnallocated::ANY));
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
}
@@ -675,14 +670,15 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::NONE));
+ return Define(instr, new(zone()) LUnallocated(LUnallocated::NONE));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
@@ -690,14 +686,16 @@ template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
LTemplateInstruction<1, I, T>* instr,
int index) {
- return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
@@ -750,7 +748,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasSideEffects()) {
+ if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -762,7 +760,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
// Thus we still need to attach environment to this call even if
// call sequence can not deoptimize eagerly.
bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
@@ -779,13 +778,14 @@ LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_));
return instr;
}
LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
allocator_->RecordTemporary(operand);
return operand;
}
@@ -806,40 +806,17 @@ LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new LLabel(instr->block());
+ return new(zone()) LLabel(instr->block());
}
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
+ return AssignEnvironment(new(zone()) LDeoptimize);
}
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new LArithmeticT(op, context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
+ return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -852,7 +829,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new LArithmeticT(op, context, left, right);
+ LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -886,7 +863,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
}
LInstruction* result =
- DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
return does_deopt ? AssignEnvironment(result) : result;
}
@@ -899,7 +876,7 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new LArithmeticD(op, left, right);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineSameAsFirst(result);
}
@@ -919,7 +896,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
LOperand* left_operand = UseFixed(left, edx);
LOperand* right_operand = UseFixed(right, eax);
LArithmeticT* result =
- new LArithmeticT(op, context, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1017,12 +994,13 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
- LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer);
+ LEnvironment* result =
+ new(zone()) LEnvironment(hydrogen_env->closure(),
+ ast_id,
+ hydrogen_env->parameter_count(),
+ argument_count_,
+ value_count,
+ outer);
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1031,7 +1009,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument((*argument_index_accumulator)++);
+ op = new(zone()) LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1043,7 +1021,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
}
@@ -1055,7 +1033,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
- return new LGoto(successor->block_id());
+ return new(zone()) LGoto(successor->block_id());
}
ToBooleanStub::Types expected = instr->expected_input_types();
// We need a temporary register when we have to access the map *or* we have
@@ -1063,24 +1041,24 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
// involving maps).
bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(new LBranch(UseRegister(v), temp));
+ return AssignEnvironment(new(zone()) LBranch(UseRegister(v), temp));
}
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return new LCmpMapAndBranch(value);
+ return new(zone()) LCmpMapAndBranch(value);
}
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+ return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new LArgumentsElements);
+ return DefineAsRegister(new(zone()) LArgumentsElements);
}
@@ -1088,7 +1066,7 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
LOperand* context = UseFixed(instr->context(), esi);
- LInstanceOf* result = new LInstanceOf(context, left, right);
+ LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1096,7 +1074,7 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(
+ new(zone()) LInstanceOfKnownGlobal(
UseFixed(instr->context(), esi),
UseFixed(instr->left(), InstanceofStub::left()),
FixedTemp(edi));
@@ -1110,11 +1088,11 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* length = UseFixed(instr->length(), ebx);
LOperand* elements = UseFixed(instr->elements(), ecx);
LOperand* temp = FixedTemp(edx);
- LApplyArguments* result = new LApplyArguments(function,
- receiver,
- length,
- elements,
- temp);
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
+ receiver,
+ length,
+ elements,
+ temp);
return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1122,42 +1100,44 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
LOperand* argument = UseAny(instr->argument());
- return new LPushArgument(argument);
+ return new(zone()) LPushArgument(argument);
}
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+ return instr->HasNoUses()
+ ? NULL
+ : DefineAsRegister(new(zone()) LThisFunction);
}
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
}
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LOuterContext(context));
+ return DefineAsRegister(new(zone()) LOuterContext(context));
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalObject(context, instr->qml_global()));
+ return DefineAsRegister(new(zone()) LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalReceiver(global_object));
+ return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
}
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallConstantFunction, eax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
}
@@ -1165,7 +1145,7 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new LInvokeFunction(context, function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1177,17 +1157,20 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
ASSERT(instr->value()->representation().IsDouble());
LOperand* context = UseAny(instr->context()); // Not actually used.
LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+ input);
return DefineSameAsFirst(result);
} else if (op == kMathSin || op == kMathCos) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+ input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
- LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+ input);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1212,7 +1195,7 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseFixed(instr->key(), ecx);
argument_count_ -= instr->argument_count();
- LCallKeyed* result = new LCallKeyed(context, key);
+ LCallKeyed* result = new(zone()) LCallKeyed(context, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1220,7 +1203,7 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- LCallNamed* result = new LCallNamed(context);
+ LCallNamed* result = new(zone()) LCallNamed(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1228,14 +1211,14 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- LCallGlobal* result = new LCallGlobal(context, instr->qml_global());
+ LCallGlobal* result = new(zone()) LCallGlobal(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallKnownGlobal, eax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
}
@@ -1243,7 +1226,7 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
argument_count_ -= instr->argument_count();
- LCallNew* result = new LCallNew(context, constructor);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1251,7 +1234,7 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- LCallFunction* result = new LCallFunction(context);
+ LCallFunction* result = new(zone()) LCallFunction(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1259,7 +1242,7 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
argument_count_ -= instr->argument_count();
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LCallRuntime(context), eax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
}
@@ -1278,8 +1261,26 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
}
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new(zone()) LBitI(left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(instr->op(), context, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+ }
}
@@ -1287,21 +1288,11 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new LBitNotI(input);
+ LBitNotI* result = new(zone()) LBitNotI(input);
return DefineSameAsFirst(result);
}
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1311,7 +1302,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* temp = FixedTemp(edx);
LOperand* dividend = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new LDivI(dividend, divisor, temp);
+ LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1329,7 +1320,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
+ LModI* mod =
+ new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
result = DefineSameAsFirst(mod);
} else {
// The temporary operand is necessary to ensure that right is
@@ -1337,7 +1329,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
LOperand* temp = FixedTemp(edx);
LOperand* value = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new LModI(value, divisor, temp);
+ LModI* mod = new(zone()) LModI(value, divisor, temp);
result = DefineFixed(mod, edx);
}
@@ -1354,7 +1346,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
// TODO(fschneider): Allow any register as input registers.
LOperand* left = UseFixedDouble(instr->left(), xmm2);
LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+ LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
}
}
@@ -1370,7 +1362,7 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
temp = TempRegister();
}
- LMulI* mul = new LMulI(left, right, temp);
+ LMulI* mul = new(zone()) LMulI(left, right, temp);
return AssignEnvironment(DefineSameAsFirst(mul));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
@@ -1387,7 +1379,7 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new LSubI(left, right);
+ LSubI* sub = new(zone()) LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
@@ -1408,7 +1400,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new LAddI(left, right);
+ LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineSameAsFirst(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
@@ -1433,7 +1425,7 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), xmm2) :
UseFixed(instr->right(), eax);
- LPower* result = new LPower(left, right);
+ LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1445,7 +1437,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
- LCmpT* result = new LCmpT(context, left, right);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1458,7 +1450,7 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return new LCmpIDAndBranch(left, right);
+ return new(zone()) LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -1472,7 +1464,7 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
- return new LCmpIDAndBranch(left, right);
+ return new(zone()) LCmpIDAndBranch(left, right);
}
}
@@ -1481,49 +1473,51 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseAtStart(instr->right());
- return new LCmpObjectEqAndBranch(left, right);
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
}
LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
HCompareConstantEqAndBranch* instr) {
- return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+ return new(zone()) LCmpConstantEqAndBranch(
+ UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
// We only need a temp register for non-strict compare.
LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
+ return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
- return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
+ return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
}
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(instr->value()));
+ return new(zone()) LIsSmiAndBranch(Use(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
ASSERT(instr ->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
- TempRegister());
+ return new(zone()) LIsUndetectableAndBranch(
+ UseRegisterAtStart(instr->value()), TempRegister());
}
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
- TempRegister());
+ return new(zone()) LHasInstanceTypeAndBranch(
+ UseRegisterAtStart(instr->value()),
+ TempRegister());
}
@@ -1532,14 +1526,14 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGetCachedArrayIndex(value));
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
+ return new(zone()) LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(instr->value()));
}
@@ -1547,7 +1541,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ return new(zone()) LClassOfTestAndBranch(UseTempRegister(instr->value()),
TempRegister(),
TempRegister());
}
@@ -1555,32 +1549,32 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LJSArrayLength(array));
+ return DefineAsRegister(new(zone()) LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
HFixedArrayBaseLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LFixedArrayBaseLength(array));
+ return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
}
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LElementsKind(object));
+ return DefineAsRegister(new(zone()) LElementsKind(object));
}
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
- LValueOf* result = new LValueOf(object, TempRegister());
+ LValueOf* result = new(zone()) LValueOf(object, TempRegister());
return AssignEnvironment(DefineSameAsFirst(result));
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- return AssignEnvironment(new LBoundsCheck(
+ return AssignEnvironment(new(zone()) LBoundsCheck(
UseRegisterOrConstantAtStart(instr->index()),
UseAtStart(instr->length())));
}
@@ -1596,7 +1590,7 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), eax);
- return MarkAsCall(new LThrow(context, value), instr);
+ return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
@@ -1619,7 +1613,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new LNumberUntagD(value);
+ LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
@@ -1631,10 +1625,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
(truncating && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
- LTaggedToI* res = new LTaggedToI(value, xmm_temp);
+ LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
} else {
- return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
}
}
} else if (from.IsDouble()) {
@@ -1644,7 +1638,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new LNumberTagD(value, temp);
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
@@ -1653,21 +1647,23 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new LSmiTag(value));
+ return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else {
- LNumberTagI* result = new LNumberTagI(value);
+ LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
ASSERT(to.IsDouble());
- return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(Use(instr->value())));
}
}
UNREACHABLE();
@@ -1677,28 +1673,28 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new LCheckNonSmi(value));
+ return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- LCheckInstanceType* result = new LCheckInstanceType(value, temp);
+ LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp = TempRegister();
- LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
+ LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value));
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
}
@@ -1710,13 +1706,13 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
? UseRegisterAtStart(instr->value())
: UseAtStart(instr->value());
- return AssignEnvironment(new LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckFunction(value));
}
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LCheckMap* result = new LCheckMap(value);
+ LCheckMap* result = new(zone()) LCheckMap(value);
return AssignEnvironment(result);
}
@@ -1726,17 +1722,17 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
Representation input_rep = value->representation();
if (input_rep.IsDouble()) {
LOperand* reg = UseRegister(value);
- return DefineAsRegister(new LClampDToUint8(reg));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
} else if (input_rep.IsInteger32()) {
LOperand* reg = UseFixed(value, eax);
- return DefineFixed(new LClampIToUint8(reg), eax);
+ return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
} else {
ASSERT(input_rep.IsTagged());
LOperand* reg = UseFixed(value, eax);
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve xmm1 explicitly.
LOperand* temp = FixedTemp(xmm1);
- LClampTToUint8* result = new LClampTToUint8(reg, temp);
+ LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
return AssignEnvironment(DefineFixed(result, eax));
}
}
@@ -1751,7 +1747,7 @@ LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
LOperand* reg = UseRegister(value);
LOperand* temp_reg =
CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
- result = DefineAsRegister(new LDoubleToI(reg, temp_reg));
+ result = DefineAsRegister(new(zone()) LDoubleToI(reg, temp_reg));
} else if (input_rep.IsInteger32()) {
// Canonicalization should already have removed the hydrogen instruction in
// this case, since it is a noop.
@@ -1764,29 +1760,29 @@ LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
// temps. Reserve xmm1 explicitly.
LOperand* xmm_temp =
CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
- result = DefineSameAsFirst(new LTaggedToI(reg, xmm_temp));
+ result = DefineSameAsFirst(new(zone()) LTaggedToI(reg, xmm_temp));
}
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new LReturn(UseFixed(instr->value(), eax));
+ return new(zone()) LReturn(UseFixed(instr->value(), eax));
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
- return DefineAsRegister(new LConstantI);
+ return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
? TempRegister()
: NULL;
- return DefineAsRegister(new LConstantD(temp));
+ return DefineAsRegister(new(zone()) LConstantD(temp));
} else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT);
+ return DefineAsRegister(new(zone()) LConstantT);
} else {
UNREACHABLE();
return NULL;
@@ -1795,7 +1791,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new LLoadGlobalCell;
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
@@ -1805,14 +1801,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* global_object = UseFixed(instr->global_object(), eax);
- LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LStoreGlobalCell* result =
- new LStoreGlobalCell(UseTempRegister(instr->value()),
+ new(zone()) LStoreGlobalCell(UseTempRegister(instr->value()),
TempRegister(),
TempRegister());
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
@@ -1824,14 +1821,14 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LOperand* global_object = UseFixed(instr->global_object(), edx);
LOperand* value = UseFixed(instr->value(), eax);
LStoreGlobalGeneric* result =
- new LStoreGlobalGeneric(context, global_object, value);
+ new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
+ return DefineAsRegister(new(zone()) LLoadContextSlot(context));
}
@@ -1846,14 +1843,14 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
value = UseRegister(instr->value());
temp = NULL;
}
- return new LStoreContextSlot(context, value, temp);
+ return new(zone()) LStoreContextSlot(context, value, temp);
}
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new LLoadNamedField(obj));
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -1864,12 +1861,12 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), eax);
LLoadNamedFieldPolymorphic* result =
- new LLoadNamedFieldPolymorphic(context, obj);
+ new(zone()) LLoadNamedFieldPolymorphic(context, obj);
return MarkAsCall(DefineFixed(result, eax), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
LLoadNamedFieldPolymorphic* result =
- new LLoadNamedFieldPolymorphic(context, obj);
+ new(zone()) LLoadNamedFieldPolymorphic(context, obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
@@ -1878,7 +1875,7 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), eax);
- LLoadNamedGeneric* result = new LLoadNamedGeneric(context, object);
+ LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1886,21 +1883,21 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
- new LLoadFunctionPrototype(UseRegister(instr->function()),
- TempRegister())));
+ new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
+ TempRegister())));
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadElements(input));
+ return DefineAsRegister(new(zone()) LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadExternalArrayPointer(input));
+ return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
}
@@ -1910,7 +1907,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+ LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1922,7 +1919,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
- new LLoadKeyedFastDoubleElement(elements, key);
+ new(zone()) LLoadKeyedFastDoubleElement(elements, key);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1942,7 +1939,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
- new LLoadKeyedSpecializedArrayElement(external_pointer,
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
@@ -1958,7 +1955,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), edx);
LOperand* key = UseFixed(instr->key(), eax);
- LLoadKeyedGeneric* result = new LLoadKeyedGeneric(context, object, key);
+ LLoadKeyedGeneric* result =
+ new(zone()) LLoadKeyedGeneric(context, object, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1978,7 +1976,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+ return AssignEnvironment(new(zone()) LStoreKeyedFastElement(obj, key, val));
}
@@ -1992,7 +1990,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
LOperand* val = UseTempRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new LStoreKeyedFastDoubleElement(elements, key, val);
+ return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
}
@@ -2022,9 +2020,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
val = UseRegister(instr->value());
}
- return new LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -2039,7 +2037,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->value()->representation().IsTagged());
LStoreKeyedGeneric* result =
- new LStoreKeyedGeneric(context, object, key, value);
+ new(zone()) LStoreKeyedGeneric(context, object, key, value);
return MarkAsCall(result, instr);
}
@@ -2052,14 +2050,16 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
LTransitionElementsKind* result =
- new LTransitionElementsKind(object, new_map_reg, temp_reg);
+ new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
return DefineSameAsFirst(result);
} else {
LOperand* object = UseFixed(instr->object(), eax);
LOperand* fixed_object_reg = FixedTemp(edx);
LOperand* new_map_reg = FixedTemp(ebx);
LTransitionElementsKind* result =
- new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+ new(zone()) LTransitionElementsKind(object,
+ new_map_reg,
+ fixed_object_reg);
return MarkAsCall(DefineFixed(result, eax), instr);
}
}
@@ -2087,7 +2087,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
? TempRegister()
: NULL;
- return new LStoreNamedField(obj, val, temp);
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2096,7 +2096,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), edx);
LOperand* value = UseFixed(instr->value(), eax);
- LStoreNamedGeneric* result = new LStoreNamedGeneric(context, object, value);
+ LStoreNamedGeneric* result =
+ new(zone()) LStoreNamedGeneric(context, object, value);
return MarkAsCall(result, instr);
}
@@ -2105,7 +2106,7 @@ LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- LStringAdd* string_add = new LStringAdd(context, left, right);
+ LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
return MarkAsCall(DefineFixed(string_add, eax), instr);
}
@@ -2114,7 +2115,8 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result = new LStringCharCodeAt(context, string, index);
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
@@ -2122,38 +2124,43 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result = new LStringCharFromCode(context, char_code);
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LStringLength(string));
+ return DefineAsRegister(new(zone()) LStringLength(string));
}
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LArrayLiteral(context), eax), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
}
LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LObjectLiteral(context), eax), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LObjectLiteral(context), eax), instr);
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LRegExpLiteral(context), eax), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LFunctionLiteral(context), eax), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
}
@@ -2161,7 +2168,7 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseAtStart(instr->object());
LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new LDeleteProperty(context, object, key);
+ LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2169,13 +2176,13 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new LOsrEntry);
+ return AssignEnvironment(new(zone()) LOsrEntry);
}
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new LParameter, spill_index);
+ return DefineAsSpilled(new(zone()) LParameter, spill_index);
}
@@ -2185,14 +2192,14 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
- return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- LCallStub* result = new LCallStub(context);
+ LCallStub* result = new(zone()) LCallStub(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2210,14 +2217,15 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
- LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ LAccessArgumentsAt* result =
+ new(zone()) LAccessArgumentsAt(arguments, length, index);
return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LOperand* object = UseFixed(instr->value(), eax);
- LToFastProperties* result = new LToFastProperties(object);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2225,19 +2233,19 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseAtStart(instr->value());
- LTypeof* result = new LTypeof(context, value);
+ LTypeof* result = new(zone()) LTypeof(context, value);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+ return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
HIsConstructCallAndBranch* instr) {
- return new LIsConstructCallAndBranch(TempRegister());
+ return new(zone()) LIsConstructCallAndBranch(TempRegister());
}
@@ -2261,7 +2269,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
// lazy bailout instruction to capture the environment.
if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
- LLazyBailout* lazy_bailout = new LLazyBailout;
+ LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
@@ -2276,11 +2284,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(new LStackCheck(context), instr);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
LOperand* context = UseAny(instr->context());
- return AssignEnvironment(AssignPointerMap(new LStackCheck(context)));
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@@ -2309,7 +2318,7 @@ LInstruction* LChunkBuilder::DoIn(HIn* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseOrConstantAtStart(instr->key());
LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new LIn(context, key, object);
+ LIn* result = new(zone()) LIn(context, key, object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.h b/src/3rdparty/v8/src/ia32/lithium-ia32.h
index 22541c8..98487b4 100644
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.h
+++ b/src/3rdparty/v8/src/ia32/lithium-ia32.h
@@ -192,8 +192,8 @@ class LInstruction: public ZoneObject {
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) = 0;
- virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
enum Opcode {
// Declare a unique enum value for each instruction.
@@ -289,9 +289,6 @@ class LTemplateInstruction: public LInstruction {
int TempCount() { return T; }
LOperand* TempAt(int i) { return temps_[i]; }
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
@@ -799,18 +796,15 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
+ LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- Token::Value op() const { return op_; }
+ Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
};
@@ -1338,17 +1332,13 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalObject(LOperand* context, bool qml_global) {
+ explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
- qml_global_ = qml_global;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
LOperand* context() { return InputAt(0); }
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
};
@@ -1447,7 +1437,7 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCallGlobal(LOperand* context, bool qml_global) : qml_global_(qml_global) {
+ explicit LCallGlobal(LOperand* context) {
inputs_[0] = context;
}
@@ -1459,10 +1449,6 @@ class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
LOperand* context() { return inputs_[0]; }
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
};
@@ -2177,6 +2163,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
+ isolate_(graph->isolate()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@@ -2206,6 +2193,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
+ Zone* zone() { return isolate_->zone(); }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2308,7 +2296,6 @@ class LChunkBuilder BASE_EMBEDDED {
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
@@ -2318,6 +2305,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
+ Isolate* isolate_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
index dd1ace9..1676a70 100644
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
@@ -1594,33 +1594,12 @@ void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
}
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
- return result;
-}
-
-
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
@@ -1674,12 +1653,6 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
}
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
- int num_arguments) {
- return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments) {
// If the expected number of arguments of the runtime function is
@@ -1701,26 +1674,6 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
}
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
- int num_arguments) {
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- // Since we did not call the stub, there was no allocation failure.
- // Return some non-failure object.
- return isolate()->heap()->undefined_value();
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
- return TryCallStub(&ces);
-}
-
-
void MacroAssembler::CallExternalReference(ExternalReference ref,
int num_arguments) {
mov(eax, Immediate(num_arguments));
@@ -1743,17 +1696,6 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- return TryJumpToExternalReference(ext);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -1763,14 +1705,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- return TryTailCallExternalReference(
- ExternalReference(fid, isolate()), num_arguments, result_size);
-}
-
-
// If true, a Handle<T> returned by value from a function with cdecl calling
// convention will be returned directly as a value of location_ field in a
// register eax.
@@ -1819,8 +1753,8 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
}
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
- int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+ int stack_space) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
ExternalReference limit_address =
@@ -1833,8 +1767,8 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
mov(edi, Operand::StaticVariable(limit_address));
add(Operand::StaticVariable(level_address), Immediate(1));
- // Call the api function!
- call(function->address(), RelocInfo::RUNTIME_ENTRY);
+ // Call the api function.
+ call(function_address, RelocInfo::RUNTIME_ENTRY);
if (!kReturnHandlesDirectly) {
// PrepareCallApiFunction saved pointer to the output slot into
@@ -1872,11 +1806,8 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- MaybeObject* result =
- TryTailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
- if (result->IsFailure()) {
- return result;
- }
+ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+
bind(&empty_handle);
// It was zero; the result is undefined.
mov(eax, isolate()->factory()->undefined_value());
@@ -1893,8 +1824,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
call(eax);
mov(eax, edi);
jmp(&leave_exit_frame);
-
- return result;
}
@@ -1906,15 +1835,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
}
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& ext) {
- // Set the entry point and jump to the C entry runtime stub.
- mov(ebx, Immediate(ext));
- CEntryStub ces(1);
- return TryTailCallStub(&ces);
-}
-
-
void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
// This macro takes the dst register to make the code more readable
// at the call sites. However, the dst register has to be ecx to
@@ -2079,7 +1999,7 @@ void MacroAssembler::InvokeFunction(Register fun,
}
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -2088,7 +2008,7 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- mov(edi, Immediate(Handle<JSFunction>(function)));
+ mov(edi, Immediate(function));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
index de3c3a0..68a14bb 100644
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
+++ b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
@@ -278,7 +278,7 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- void InvokeFunction(JSFunction* function,
+ void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -632,19 +632,9 @@ class MacroAssembler: public Assembler {
// Call a code stub. Generate the code if necessary.
void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
// Tail call a code stub (jump). Generate the code if necessary.
void TailCallStub(CodeStub* stub);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@@ -652,19 +642,9 @@ class MacroAssembler: public Assembler {
void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
- // Call a runtime function, returning the CodeStub object called.
- // Try to generate the stub code if necessary. Do not perform a GC
- // but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
- int num_arguments);
-
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments);
- // Convenience function: Same as above, but takes the fid instead.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
- int num_arguments);
-
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
@@ -675,23 +655,11 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
- // Tail call of a runtime routine (jump). Try to generate the code if
- // necessary. Do not perform a GC but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
- // Convenience function: tail call a runtime routine (jump). Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry after
- // GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
// etc., not pushed. The argument count assumes all arguments are word sized.
@@ -716,19 +684,15 @@ class MacroAssembler: public Assembler {
// stores the pointer to the reserved slot into esi.
void PrepareCallApiFunction(int argc);
- // Calls an API function. Allocates HandleScope, extracts
- // returned value from handle and propagates exceptions.
- // Clobbers ebx, edi and caller-save registers. Restores context.
- // On return removes stack_space * kPointerSize (GCed).
- MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
- int stack_space);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Clobbers ebx, edi and
+ // caller-save registers. Restores context. On return removes
+ // stack_space * kPointerSize (GCed).
+ void CallApiFunctionAndReturn(Address function_address, int stack_space);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
-
// ---------------------------------------------------------------------------
// Utilities
@@ -935,9 +899,6 @@ static inline Operand GlobalObjectOperand() {
return ContextOperand(esi, Context::GLOBAL_INDEX);
}
-static inline Operand QmlGlobalObjectOperand() {
- return ContextOperand(esi, Context::QML_GLOBAL_INDEX);
-}
// Generates an Operand for saving parameters after PrepareCallApiFunction.
Operand ApiParameterOperand(int index);
diff --git a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc b/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
index af53acd..3089a69 100644
--- a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
@@ -153,59 +153,6 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-static MaybeObject* TryGenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register r0,
- Register r1) {
- ASSERT(name->IsSymbol());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
- kInterceptorOrAccessCheckNeededMask);
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = r0;
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->hash_table_map()));
- __ j(not_equal, miss_label);
-
- Label done;
- MaybeObject* result =
- StringDictionaryLookupStub::TryGenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
- if (result->IsFailure()) return result;
-
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-
- return result;
-}
-
-
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@@ -274,14 +221,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
// Check we're still in the same context.
__ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
masm->isolate()->global());
__ j(not_equal, miss);
// Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(masm->isolate()->global_context()->get(index));
+ Handle<JSFunction> function(
+ JSFunction::cast(masm->isolate()->global_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
@@ -396,12 +346,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
- JSObject* holder_obj) {
+ Handle<JSObject> holder_obj) {
__ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
- __ mov(scratch, Immediate(Handle<Object>(interceptor)));
+ __ mov(scratch, Immediate(interceptor));
__ push(scratch);
__ push(receiver);
__ push(holder);
@@ -409,11 +359,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
}
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
@@ -460,9 +411,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- esp[0] : return address
// -- esp[4] : object passing the type check
@@ -477,30 +428,25 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
// -- esp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ mov(edi, Immediate(Handle<JSFunction>(function)));
+ Handle<JSFunction> function = optimization.constant_function();
+ __ mov(edi, Immediate(function));
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Pass the additional arguments.
__ mov(Operand(esp, 2 * kPointerSize), edi);
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ mov(ecx, api_call_info_handle);
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data());
+ if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+ __ mov(ecx, api_call_info);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
__ mov(Operand(esp, 3 * kPointerSize), ebx);
} else {
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(Handle<Object>(call_data)));
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
}
// Prepare arguments.
__ lea(eax, Operand(esp, 3 * kPointerSize));
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
// Allocate the v8::Arguments structure in the arguments' space since
@@ -520,12 +466,10 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
__ lea(eax, ApiParameterOperand(1));
__ mov(ApiParameterOperand(0), eax);
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm->TryCallApiFunctionAndReturn(&fun,
- argc + kFastApiCallArguments + 1);
+ // Function address is a foreign pointer outside V8's heap.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ CallApiFunctionAndReturn(function_address,
+ argc + kFastApiCallArguments + 1);
}
@@ -540,16 +484,16 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name_(name),
extra_state_(extra_state) {}
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ void Compile(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -557,45 +501,27 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
-
if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+ holder, lookup, name, optimization, miss);
} else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value(); // Success.
+ CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+ name, holder, miss);
}
}
private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ void CompileCacheable(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -604,16 +530,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
!lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(
+ object, interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(
+ interceptor_holder, Handle<JSObject>(lookup->holder()));
}
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
+ can_do_fast_api_call =
+ depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
Counters* counters = masm->isolate()->counters();
@@ -629,9 +553,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -644,10 +568,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
+ Handle<JSObject>(lookup->holder()),
+ scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -658,9 +583,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- MaybeObject* result =
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
- if (result->IsFailure()) return result;
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
@@ -681,33 +604,27 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm, scratch1);
}
-
- return masm->isolate()->heap()->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
- JSObject* interceptor_holder,
+ Handle<String> name,
+ Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
+ scratch1, scratch2, scratch3,
+ name, miss_label);
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
+ PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -723,7 +640,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- JSObject* holder_obj,
+ Handle<JSObject> holder_obj,
Label* interceptor_succeeded) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -762,10 +679,9 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Code* code = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- Handle<Code> ic(code);
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(code, RelocInfo::CODE_TARGET);
}
@@ -883,33 +799,6 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
- ASSERT(cell->value()->IsTheHole());
- if (Serializer::enabled()) {
- __ mov(scratch, Immediate(Handle<Object>(cell)));
- __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Immediate(masm->isolate()->factory()->the_hole_value()));
- } else {
- __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
- Immediate(masm->isolate()->factory()->the_hole_value()));
- }
- __ j(not_equal, miss);
- return cell;
-}
-
-
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
static void GenerateCheckPropertyCells(MacroAssembler* masm,
@@ -931,35 +820,6 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
}
-
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCells(
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- Register scratch,
- Label* miss) {
- JSObject* current = object;
- while (current != holder) {
- if (current->IsGlobalObject()) {
- // Returns a cell or a failure.
- MaybeObject* result = TryGenerateCheckPropertyCell(
- masm,
- GlobalObject::cast(current),
- name,
- scratch,
- miss);
- if (result->IsFailure()) return result;
- }
- ASSERT(current->IsJSObject());
- current = JSObject::cast(current->GetPrototype());
- }
- return NULL;
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
@@ -1076,144 +936,6 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-Register StubCompiler::CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- JSObject* current = object;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
- }
-
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (current != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- ASSERT(current->GetPrototype()->IsJSObject());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
- Object* lookup_result = NULL; // Initialization to please compiler.
- if (!maybe_lookup_result->ToObject(&lookup_result)) {
- set_failure(Failure::cast(maybe_lookup_result));
- return reg;
- }
- name = String::cast(lookup_result);
- }
- ASSERT(current->property_dictionary()->FindEntry(name) ==
- StringDictionary::kNotFound);
-
- MaybeObject* negative_lookup =
- TryGenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
- if (negative_lookup->IsFailure()) {
- set_failure(Failure::cast(negative_lookup));
- return reg;
- }
-
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Immediate(Handle<Map>(current->map())));
- // Branch on the result of the map check.
- __ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(current->map())));
- // Branch on the result of the map check.
- __ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, Handle<JSObject>(prototype));
- }
-
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
- ASSERT(current == holder);
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Check the holder map.
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(holder->map())));
- __ j(not_equal, miss);
-
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- MaybeObject* result = TryGenerateCheckPropertyCells(masm(),
- object,
- holder,
- name,
- scratch1,
- miss);
- if (result->IsFailure()) set_failure(Failure::cast(result));
-
- // Return the register containing the holder.
- return reg;
-}
-
-
void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1236,25 +958,22 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
}
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
-
- Handle<AccessorInfo> callback_handle(callback);
+ Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+ scratch2, scratch3, name, miss);
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch3.is(reg));
@@ -1265,11 +984,11 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
ASSERT(!scratch2.is(reg));
__ push(reg); // holder
// Push data from AccessorInfo.
- if (isolate()->heap()->InNewSpace(callback_handle->data())) {
- __ mov(scratch1, Immediate(callback_handle));
+ if (isolate()->heap()->InNewSpace(callback->data())) {
+ __ mov(scratch1, Immediate(callback));
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
} else {
- __ push(Immediate(Handle<Object>(callback_handle->data())));
+ __ push(Immediate(Handle<Object>(callback->data())));
}
// Save a pointer to where we pushed the arguments pointer.
@@ -1281,10 +1000,6 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ push(scratch3); // Restore return address.
- // Do call through the api.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
// 3 elements array for v8::Agruments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
const int kStackSpace = 5;
@@ -1299,7 +1014,8 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ __ CallApiFunctionAndReturn(getter_address, kStackSpace);
}
@@ -1325,15 +1041,15 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1349,9 +1065,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
+ lookup->GetCallbackObject()->IsAccessorInfo()) {
+ compile_followup_inline =
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
}
}
@@ -1405,10 +1121,10 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into holder_reg.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
- lookup->holder(),
+ Handle<JSObject>(lookup->holder()),
scratch1,
scratch2,
scratch3,
@@ -1427,9 +1143,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
+ Handle<AccessorInfo> callback(
+ AccessorInfo::cast(lookup->GetCallbackObject()));
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
@@ -1438,7 +1153,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ pop(scratch2); // return address
__ push(receiver);
__ push(holder_reg);
- __ mov(holder_reg, Immediate(Handle<AccessorInfo>(callback)));
+ __ mov(holder_reg, Immediate(callback));
__ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
__ push(holder_reg);
__ push(name_reg);
@@ -1476,9 +1191,9 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1491,7 +1206,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(edx, miss);
}
@@ -1500,19 +1215,20 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
}
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Label* miss) {
// Get the value from the cell.
if (Serializer::enabled()) {
- __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(edi, Immediate(cell));
__ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
} else {
- __ mov(edi, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(edi, Operand::Cell(cell));
}
// Check that the cell contains the same function.
- if (isolate()->heap()->InNewSpace(function)) {
+ if (isolate()->heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1525,11 +1241,10 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
// Check the shared function info. Make sure it hasn't changed.
__ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
Immediate(Handle<SharedFunctionInfo>(function->shared())));
- __ j(not_equal, miss);
} else {
- __ cmp(edi, Immediate(Handle<JSFunction>(function)));
- __ j(not_equal, miss);
+ __ cmp(edi, Immediate(function));
}
+ __ j(not_equal, miss);
}
@@ -1542,20 +1257,6 @@ void CallStubCompiler::GenerateMissBranch() {
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGenerateMissBranch() {
- MaybeObject* maybe_obj =
- isolate()->stub_cache()->TryComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
-}
-
-
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
int index,
@@ -1612,11 +1313,12 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1626,13 +1328,13 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) {
- return isolate()->heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) {
+ return Handle<Code>::null();
}
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -1641,9 +1343,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check that the receiver isn't a smi.
__ JumpIfSmi(edx, &miss);
- CheckPrototypes(JSObject::cast(object), edx,
- holder, ebx,
- eax, edi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
if (argc == 0) {
// Noop, return the length.
@@ -1705,8 +1406,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
FixedArray::kHeaderSize - argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
- __ RecordWrite(
- ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ RecordWrite(ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ ret((argc + 1) * kPointerSize);
@@ -1789,19 +1490,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1811,13 +1512,13 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) {
- return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) {
+ return Handle<Code>::null();
}
Label miss, return_undefined, call_builtin;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -1825,9 +1526,8 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Check that the receiver isn't a smi.
__ JumpIfSmi(edx, &miss);
- CheckPrototypes(JSObject::cast(object), edx,
- holder, ebx,
- eax, edi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1872,20 +1572,19 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -1895,8 +1594,8 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) {
- return isolate()->heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) {
+ return Handle<Code>::null();
}
const int argc = arguments().immediate();
@@ -1912,16 +1611,16 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
index_out_of_range_label = &miss;
}
- GenerateNameCheck(Handle<String>(name), &name_miss);
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ eax, holder, ebx, edx, edi, name, &miss);
Register receiver = ebx;
Register index = edi;
@@ -1934,19 +1633,19 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ Set(index, Immediate(factory()->undefined_value()));
}
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
+ StringCharCodeAtGenerator generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1956,22 +1655,21 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in ecx.
- __ Set(ecx, Immediate(Handle<String>(name)));
+ __ Set(ecx, Immediate(name));
__ bind(&name_miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -1981,8 +1679,8 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) {
- return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) {
+ return Handle<Code>::null();
}
const int argc = arguments().immediate();
@@ -1998,16 +1696,16 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
index_out_of_range_label = &miss;
}
- GenerateNameCheck(Handle<String>(name), &name_miss);
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ eax, holder, ebx, edx, edi, name, &miss);
Register receiver = eax;
Register index = edi;
@@ -2021,20 +1719,20 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ Set(index, Immediate(factory()->undefined_value()));
}
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
+ StringCharAtGenerator generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -2044,22 +1742,21 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in ecx.
- __ Set(ecx, Immediate(Handle<String>(name)));
+ __ Set(ecx, Immediate(name));
__ bind(&name_miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -2073,23 +1770,22 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) {
- return isolate()->heap()->undefined_value();
+ return Handle<Code>::null();
}
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ mov(edx, Operand(esp, 2 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2105,12 +1801,12 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Convert the smi code to uint16.
__ and_(code, Immediate(Smi::FromInt(0xffff)));
- StringCharFromCodeGenerator char_from_code_generator(code, eax);
- char_from_code_generator.GenerateFast(masm());
+ StringCharFromCodeGenerator generator(code, eax);
+ generator.GenerateFast(masm());
__ ret(2 * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
@@ -2123,19 +1819,19 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ bind(&miss);
// ecx: function name.
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2145,7 +1841,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -----------------------------------
if (!CpuFeatures::IsSupported(SSE2)) {
- return isolate()->heap()->undefined_value();
+ return Handle<Code>::null();
}
CpuFeatures::Scope use_sse2(SSE2);
@@ -2155,23 +1851,24 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) {
- return isolate()->heap()->undefined_value();
+ return Handle<Code>::null();
}
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(edx, &miss);
- CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2252,19 +1949,19 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ bind(&miss);
// ecx: function name.
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2278,23 +1975,24 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) {
- return isolate()->heap()->undefined_value();
+ return Handle<Code>::null();
}
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(edx, &miss);
- CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2356,34 +2054,33 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ bind(&miss);
// ecx: function name.
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- if (!object->IsJSObject()) return heap()->undefined_value();
+ if (object->IsGlobalObject()) return Handle<Code>::null();
+ if (!cell.is_null()) return Handle<Code>::null();
+ if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+ Handle<JSObject>::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
- GenerateNameCheck(Handle<String>(name), &miss_before_stack_reserved);
+ GenerateNameCheck(name, &miss_before_stack_reserved);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2401,8 +2098,8 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
__ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, edi, name, depth, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, depth, &miss);
// Move the return address on top of the stack.
__ mov(eax, Operand(esp, 3 * kPointerSize));
@@ -2410,27 +2107,24 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
- MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
+ GenerateFastApiCall(masm(), optimization, argc);
__ bind(&miss);
__ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(
- Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ CheckType check) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2440,17 +2134,15 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2464,15 +2156,13 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(isolate()->counters()->call_const(), 1);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, edi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
+ edi, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2483,28 +2173,25 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
break;
case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
// Check that the object is a string or a symbol.
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
__ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ eax, holder, ebx, edx, edi, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case NUMBER_CHECK:
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(edx, &fast);
@@ -2514,18 +2201,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ eax, holder, ebx, edx, edi, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case BOOLEAN_CHECK:
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
Label fast;
// Check that the object is a boolean.
__ cmp(edx, factory()->true_value());
@@ -2536,14 +2223,15 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ eax, holder, ebx, edx, edi, name, &miss);
+ } else {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
}
break;
- }
-
- default:
- UNREACHABLE();
}
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
@@ -2554,17 +2242,16 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2574,7 +2261,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// -----------------------------------
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -2586,17 +2273,8 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- edx,
- ebx,
- edi,
- eax,
- &miss);
- if (result->IsFailure()) return result;
+ compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
+ &miss);
// Restore receiver.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -2623,20 +2301,19 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle load cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(
- JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2646,23 +2323,17 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy.
@@ -2691,11 +2362,10 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(NORMAL, name);
+ return GetCode(NORMAL, name);
}
@@ -3039,10 +2709,11 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
}
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
@@ -3050,18 +2721,13 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
// -----------------------------------
Label miss;
- MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
- edi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, callback,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return TryGetCode(CALLBACKS, name);
+ return GetCode(CALLBACKS, name);
}
@@ -3085,9 +2751,9 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
}
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
@@ -3100,22 +2766,14 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- eax,
- ecx,
- edx,
- ebx,
- edi,
- name,
- &miss);
+ GenerateLoadInterceptor(receiver, holder, &lookup, eax, ecx, edx, ebx, edi,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
@@ -3202,11 +2860,11 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3218,23 +2876,18 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
- __ cmp(eax, Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
- MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
- ecx, edi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
+ GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, callback,
+ name, &miss);
__ bind(&miss);
-
__ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return TryGetCode(CALLBACKS, name);
+ return GetCode(CALLBACKS, name);
}
@@ -3268,9 +2921,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3282,27 +2936,19 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
- __ cmp(eax, Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- edx,
- eax,
- ecx,
- ebx,
- edi,
- name,
- &miss);
+ GenerateLoadInterceptor(receiver, holder, &lookup, edx, eax, ecx, ebx, edi,
+ name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_interceptor(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
@@ -3433,7 +3079,8 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+ Handle<JSFunction> function) {
// ----------- S t a t e -------------
// -- eax : argc
// -- edi : constructor
@@ -3472,12 +3119,8 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// ebx: initial map
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ shl(ecx, kPointerSizeLog2);
- __ AllocateInNewSpace(ecx,
- edx,
- ecx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(ecx, edx, ecx, no_reg,
+ &generic_stub_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// ebx: initial map
@@ -3508,7 +3151,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// edi: undefined
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
+ Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
// Check if the argument assigned to the property is actually passed.
@@ -3560,9 +3203,8 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Handle<Code> generic_construct_stub =
- isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+ __ jmp(code, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode();
diff --git a/src/3rdparty/v8/src/incremental-marking-inl.h b/src/3rdparty/v8/src/incremental-marking-inl.h
index 2a7fba7..c8af236 100644
--- a/src/3rdparty/v8/src/incremental-marking-inl.h
+++ b/src/3rdparty/v8/src/incremental-marking-inl.h
@@ -37,62 +37,42 @@ namespace internal {
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
Object** slot,
Object* value) {
- if (IsMarking() && value->IsHeapObject()) {
- MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
- if (Marking::IsWhite(value_bit)) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
-
- // Object is either grey or white it will be scanned if survives.
- return false;
+ MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+ if (Marking::IsWhite(value_bit)) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ BlackToGreyAndUnshift(obj, obj_bit);
+ RestartIfNotMarking();
}
- return true;
+
+ // Object is either grey or white. It will be scanned if survives.
+ return false;
}
- return false;
+ return true;
}
void IncrementalMarking::RecordWrite(HeapObject* obj,
Object** slot,
Object* value) {
- if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- // Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordSlot(
- HeapObject::RawField(obj, 0), slot, value);
- }
+ if (IsMarking() && value->NonFailureIsHeapObject()) {
+ RecordWriteSlow(obj, slot, value);
}
}
+void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
+ Object** slot,
+ Code* value) {
+ if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
+}
+
+
void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
RelocInfo* rinfo,
Object* value) {
- if (IsMarking() && value->IsHeapObject()) {
- MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
- if (Marking::IsWhite(value_bit)) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
-
- // Object is either grey or white it will be scanned if survives.
- return;
- }
-
- if (is_compacting_) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- // Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
- Code::cast(value));
- }
- }
+ if (IsMarking() && value->NonFailureIsHeapObject()) {
+ RecordWriteIntoCodeSlow(obj, rinfo, value);
}
}
diff --git a/src/3rdparty/v8/src/incremental-marking.cc b/src/3rdparty/v8/src/incremental-marking.cc
index 68b830a..6d2f393 100644
--- a/src/3rdparty/v8/src/incremental-marking.cc
+++ b/src/3rdparty/v8/src/incremental-marking.cc
@@ -60,6 +60,20 @@ void IncrementalMarking::TearDown() {
}
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
+ Object** slot,
+ Object* value) {
+ if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ // Object is not going to be rescanned we need to record the slot.
+ heap_->mark_compact_collector()->RecordSlot(
+ HeapObject::RawField(obj, 0), slot, value);
+ }
+ }
+}
+
+
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
Object* value,
Isolate* isolate) {
@@ -108,7 +122,7 @@ void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
}
-void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
+void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
Object** slot,
Code* value) {
if (BaseRecordWrite(host, slot, value) && is_compacting_) {
@@ -119,6 +133,30 @@ void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
}
+void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
+ RelocInfo* rinfo,
+ Object* value) {
+ MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+ if (Marking::IsWhite(value_bit)) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ BlackToGreyAndUnshift(obj, obj_bit);
+ RestartIfNotMarking();
+ }
+ // Object is either grey or white. It will be scanned if survives.
+ return;
+ }
+
+ if (is_compacting_) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ // Object is not going to be rescanned. We need to record the slot.
+ heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
+ Code::cast(value));
+ }
+ }
+}
+
class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
public:
diff --git a/src/3rdparty/v8/src/incremental-marking.h b/src/3rdparty/v8/src/incremental-marking.h
index fa7337b..4542fbd 100644
--- a/src/3rdparty/v8/src/incremental-marking.h
+++ b/src/3rdparty/v8/src/incremental-marking.h
@@ -59,7 +59,7 @@ class IncrementalMarking {
inline bool IsStopped() { return state() == STOPPED; }
- inline bool IsMarking() { return state() >= MARKING; }
+ INLINE(bool IsMarking()) { return state() >= MARKING; }
inline bool IsMarkingIncomplete() { return state() == MARKING; }
@@ -120,16 +120,23 @@ class IncrementalMarking {
Object** slot,
Isolate* isolate);
- inline bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value);
+ INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+ INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
+ INLINE(void RecordWriteIntoCode(HeapObject* obj,
+ RelocInfo* rinfo,
+ Object* value));
+ INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
+ Object** slot,
+ Code* value));
- inline void RecordWrite(HeapObject* obj, Object** slot, Object* value);
- inline void RecordWriteIntoCode(HeapObject* obj,
- RelocInfo* rinfo,
- Object* value);
+ void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+ void RecordWriteIntoCodeSlow(HeapObject* obj,
+ RelocInfo* rinfo,
+ Object* value);
+ void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
void RecordCodeTargetPatch(Address pc, HeapObject* value);
- void RecordWriteOfCodeEntry(JSFunction* host, Object** slot, Code* value);
inline void RecordWrites(HeapObject* obj);
diff --git a/src/3rdparty/v8/src/isolate-inl.h b/src/3rdparty/v8/src/isolate-inl.h
index d6e6131..0a2c174 100644
--- a/src/3rdparty/v8/src/isolate-inl.h
+++ b/src/3rdparty/v8/src/isolate-inl.h
@@ -45,9 +45,7 @@ SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
}
isolate->set_save_context(this);
- // If there is no JS frame under the current C frame, use the value 0.
- JavaScriptFrameIterator it(isolate);
- js_sp_ = it.done() ? 0 : it.frame()->sp();
+ c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
}
diff --git a/src/3rdparty/v8/src/isolate.cc b/src/3rdparty/v8/src/isolate.cc
index 36c1dfd..a073af9 100644
--- a/src/3rdparty/v8/src/isolate.cc
+++ b/src/3rdparty/v8/src/isolate.cc
@@ -96,7 +96,6 @@ void ThreadLocalTop::InitializeInternal() {
thread_id_ = ThreadId::Invalid();
external_caught_exception_ = false;
failed_access_check_callback_ = NULL;
- user_object_comparison_callback_ = NULL;
save_context_ = NULL;
catcher_ = NULL;
top_lookup_result_ = NULL;
@@ -730,12 +729,6 @@ void Isolate::SetFailedAccessCheckCallback(
thread_local_top()->failed_access_check_callback_ = callback;
}
-
-void Isolate::SetUserObjectComparisonCallback(
- v8::UserObjectComparisonCallback callback) {
- thread_local_top()->user_object_comparison_callback_ = callback;
-}
-
void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
if (!thread_local_top()->failed_access_check_callback_) return;
diff --git a/src/3rdparty/v8/src/isolate.h b/src/3rdparty/v8/src/isolate.h
index 9919e83..116b802 100644
--- a/src/3rdparty/v8/src/isolate.h
+++ b/src/3rdparty/v8/src/isolate.h
@@ -258,9 +258,6 @@ class ThreadLocalTop BASE_EMBEDDED {
// Head of the list of live LookupResults.
LookupResult* top_lookup_result_;
- // Call back function for user object comparisons
- v8::UserObjectComparisonCallback user_object_comparison_callback_;
-
// Whether out of memory exceptions should be ignored.
bool ignore_out_of_memory_;
@@ -706,11 +703,6 @@ class Isolate {
void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
- void SetUserObjectComparisonCallback(v8::UserObjectComparisonCallback callback);
- inline v8::UserObjectComparisonCallback UserObjectComparisonCallback() {
- return thread_local_top()->user_object_comparison_callback_;
- }
-
// Exception throwing support. The caller should use the result
// of Throw() as its return value.
Failure* Throw(Object* exception, MessageLocation* location = NULL);
@@ -1254,8 +1246,8 @@ class SaveContext BASE_EMBEDDED {
SaveContext* prev() { return prev_; }
// Returns true if this save context is below a given JavaScript frame.
- bool below(JavaScriptFrame* frame) {
- return (js_sp_ == 0) || (frame->sp() < js_sp_);
+ bool IsBelowFrame(JavaScriptFrame* frame) {
+ return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
}
private:
@@ -1264,7 +1256,7 @@ class SaveContext BASE_EMBEDDED {
Handle<Context> dummy_;
#endif
SaveContext* prev_;
- Address js_sp_; // The top JS frame's sp when saving context.
+ Address c_entry_fp_;
};
diff --git a/src/3rdparty/v8/src/macros.py b/src/3rdparty/v8/src/macros.py
index a42e83c..bf7119f 100644
--- a/src/3rdparty/v8/src/macros.py
+++ b/src/3rdparty/v8/src/macros.py
@@ -82,8 +82,6 @@ const kMinYear = -1000000;
const kMaxYear = 1000000;
const kMinMonth = -10000000;
const kMaxMonth = 10000000;
-const kMinDate = -100000000;
-const kMaxDate = 100000000;
# Native cache ids.
const STRING_TO_REGEXP_CACHE_ID = 0;
diff --git a/src/3rdparty/v8/src/mark-compact.cc b/src/3rdparty/v8/src/mark-compact.cc
index bf0aab8..b41b033 100644
--- a/src/3rdparty/v8/src/mark-compact.cc
+++ b/src/3rdparty/v8/src/mark-compact.cc
@@ -1513,9 +1513,8 @@ class SymbolTableCleaner : public ObjectVisitor {
// Since no objects have yet been moved we can safely access the map of
// the object.
- if (o->IsExternalString() ||
- (o->IsHeapObject() && HeapObject::cast(o)->map()->has_external_resource())) {
- heap_->FinalizeExternalString(HeapObject::cast(*p));
+ if (o->IsExternalString()) {
+ heap_->FinalizeExternalString(String::cast(*p));
}
// Set the entry to null_value (as deleted).
*p = heap_->null_value();
@@ -2488,15 +2487,15 @@ static void UpdatePointer(HeapObject** p, HeapObject* object) {
}
-static HeapObject* UpdateReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
+static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
+ Object** p) {
MapWord map_word = HeapObject::cast(*p)->map_word();
if (map_word.IsForwardingAddress()) {
- return HeapObject::cast(map_word.ToForwardingAddress());
+ return String::cast(map_word.ToForwardingAddress());
}
- return HeapObject::cast(*p);
+ return String::cast(*p);
}
diff --git a/src/3rdparty/v8/src/messages.cc b/src/3rdparty/v8/src/messages.cc
index c70463d..a0793c2 100644
--- a/src/3rdparty/v8/src/messages.cc
+++ b/src/3rdparty/v8/src/messages.cc
@@ -126,7 +126,7 @@ void MessageHandler::ReportMessage(Isolate* isolate,
v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
v8::MessageCallback callback =
- FUNCTION_CAST<v8::MessageCallback>(callback_obj->address());
+ FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
Handle<Object> callback_data(listener.get(1));
{
// Do not allow exceptions to propagate.
diff --git a/src/3rdparty/v8/src/messages.js b/src/3rdparty/v8/src/messages.js
index e4607ab..b370422 100644
--- a/src/3rdparty/v8/src/messages.js
+++ b/src/3rdparty/v8/src/messages.js
@@ -198,6 +198,7 @@ function FormatMessage(message) {
// RangeError
"invalid_array_length", ["Invalid array length"],
"stack_overflow", ["Maximum call stack size exceeded"],
+ "invalid_time_value", ["Invalid time value"],
// SyntaxError
"unable_to_parse", ["Parse error"],
"invalid_regexp_flags", ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
diff --git a/src/3rdparty/v8/src/mips/builtins-mips.cc b/src/3rdparty/v8/src/mips/builtins-mips.cc
index 17975fe..a46a786 100644
--- a/src/3rdparty/v8/src/mips/builtins-mips.cc
+++ b/src/3rdparty/v8/src/mips/builtins-mips.cc
@@ -1176,24 +1176,93 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
-// These functions are called from C++ but cannot be used in live code.
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the function and deoptimization type to the runtime system.
+ __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a0);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ }
+
+ // Get the full codegen state from the stack and untag it -> t2.
+ __ lw(t2, MemOperand(sp, 0 * kPointerSize));
+ __ SmiUntag(t2);
+ // Switch on the state.
+ Label with_tos_register, unknown_state;
+ __ Branch(&with_tos_register,
+ ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
+ __ Ret();
+
+ __ bind(&with_tos_register);
+ __ lw(v0, MemOperand(sp, 1 * kPointerSize));
+ __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
+
+ __ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
+ __ Ret();
+
+ __ bind(&unknown_state);
+ __ stop("no cases left");
+}
+
+
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- __ Abort("Call to unimplemented function in builtins-mips.cc");
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- __ Abort("Call to unimplemented function in builtins-mips.cc");
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- __ Abort("Call to unimplemented function in builtins-mips.cc");
+ // For now, we are relying on the fact that Runtime::NotifyOSR
+ // doesn't do any garbage collection which allows us to save/restore
+ // the registers without worrying about which of them contain
+ // pointers. This seems a bit fragile.
+ RegList saved_regs =
+ (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
+ __ MultiPush(saved_regs);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ }
+ __ MultiPop(saved_regs);
+ __ Ret();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- __ Abort("Call to unimplemented function in builtins-mips.cc");
+ CpuFeatures::TryForceFeatureScope scope(VFP3);
+ if (!CpuFeatures::IsSupported(FPU)) {
+ __ Abort("Unreachable code: Cannot optimize without FPU support.");
+ return;
+ }
+
+ // Lookup the function in the JavaScript frame and push it as an
+ // argument to the on-stack replacement function.
+ __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(a0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ // If the result was -1 it means that we couldn't optimize the
+ // function. Just return and continue in the unoptimized version.
+ __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
+
+ // Untag the AST id and push it on the stack.
+ __ SmiUntag(v0);
+ __ push(v0);
+
+ // Generate the code for doing the frame-to-frame translation using
+ // the deoptimizer infrastructure.
+ Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+ generator.Generate();
}
@@ -1395,8 +1464,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kFunctionOffset = 4 * kPointerSize;
{
- FrameScope scope(masm, StackFrame::INTERNAL);
-
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
__ push(a0);
__ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
@@ -1530,8 +1598,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ InvokeFunction(a1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
- scope.GenerateLeaveFrame();
-
+ frame_scope.GenerateLeaveFrame();
__ Ret(USE_DELAY_SLOT);
__ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
diff --git a/src/3rdparty/v8/src/mips/code-stubs-mips.cc b/src/3rdparty/v8/src/mips/code-stubs-mips.cc
index c851158..8ff39c5 100644
--- a/src/3rdparty/v8/src/mips/code-stubs-mips.cc
+++ b/src/3rdparty/v8/src/mips/code-stubs-mips.cc
@@ -173,11 +173,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Copy the qml global object from the surrounding context.
- __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
- __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
-
-
// Initialize the rest of the slots to undefined.
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
@@ -1644,46 +1639,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
- {
- // This is optimized for reading the code and not benchmarked for
- // speed or amount of instructions. The code is not ordered for speed
- // or anything like this
- Label miss, user_compare;
-
- // No global compare if both operands are SMIs
- __ And(a2, a1, Operand(a0));
- __ JumpIfSmi(a2, &miss);
-
-
- // We need to check if lhs and rhs are both objects, if not we are
- // jumping out of the function. We will keep the 'map' in t0 (lhs) and
- // t1 (rhs) for later usage.
- __ GetObjectType(a0, t0, a3);
- __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
-
- __ GetObjectType(a1, t1, a3);
- __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
-
- // Check if the UseUserComparison flag is set by using the map of t0 for lhs
- __ lbu(t0, FieldMemOperand(t0, Map::kBitField2Offset));
- __ And(t0, t0, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&user_compare, eq, t0, Operand(1 << Map::kUseUserObjectComparison));
-
-
- // Check if the UseUserComparison flag is _not_ set by using the map of t1 for
- // rhs and then jump to the miss label.
- __ lbu(t1, FieldMemOperand(t1, Map::kBitField2Offset));
- __ And(t1, t1, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&miss, ne, t1, Operand(1 << Map::kUseUserObjectComparison));
-
- // Invoke the runtime function here
- __ bind(&user_compare);
- __ Push(a0, a1);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- // We exit here without doing anything
- __ bind(&miss);
- }
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
@@ -5396,7 +5351,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
@@ -5482,7 +5438,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -5508,7 +5465,8 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -6899,20 +6857,10 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
- // Compare lhs, a2 holds the map, a3 holds the type_reg
- __ GetObjectType(a0, a2, a3);
- __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
-
-
- // Compare rhs, a2 holds the map, a3 holds the type_reg
- __ GetObjectType(a1, a2, a3);
- __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
ASSERT(GetCondition() == eq);
__ Subu(v0, a0, Operand(a1));
@@ -7034,84 +6982,6 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
Register tmp = properties;
- __ sll(tmp, index, 1);
- __ Addu(tmp, properties, tmp);
- __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- ASSERT(!tmp.is(entity_name));
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ Branch(done, eq, entity_name, Operand(tmp));
-
- if (i != kInlinedProbes - 1) {
- // Stop if found the property.
- __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
-
- // Check if the entry name is not a symbol.
- __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ lbu(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ And(tmp, entity_name, Operand(kIsSymbolMask));
- __ Branch(miss, eq, tmp, Operand(zero_reg));
-
- // Restore the properties.
- __ lw(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- }
- }
-
- const int spill_mask =
- (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
- a2.bit() | a1.bit() | a0.bit() | v0.bit());
-
- __ MultiPush(spill_mask);
- __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ li(a1, Operand(Handle<String>(name)));
- StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ mov(at, v0);
- __ MultiPop(spill_mask);
-
- __ Branch(done, eq, at, Operand(zero_reg));
- __ Branch(miss, ne, at, Operand(zero_reg));
-}
-
-
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0) {
-// If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ lw(index, FieldMemOperand(properties, kCapacityOffset));
- __ Subu(index, index, Operand(1));
- __ And(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- // index *= 3.
- __ sll(at, index, 1);
- __ Addu(index, index, at);
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- Register tmp = properties;
-
__ sll(scratch0, index, 1);
__ Addu(tmp, properties, scratch0);
__ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
@@ -7145,14 +7015,12 @@ MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
__ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ li(a1, Operand(Handle<String>(name)));
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
+ __ CallStub(&stub);
__ mov(at, v0);
__ MultiPop(spill_mask);
__ Branch(done, eq, at, Operand(zero_reg));
__ Branch(miss, ne, at, Operand(zero_reg));
- return result;
}
@@ -7167,6 +7035,11 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
Register name,
Register scratch1,
Register scratch2) {
+ ASSERT(!elements.is(scratch1));
+ ASSERT(!elements.is(scratch2));
+ ASSERT(!name.is(scratch1));
+ ASSERT(!name.is(scratch2));
+
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
diff --git a/src/3rdparty/v8/src/mips/code-stubs-mips.h b/src/3rdparty/v8/src/mips/code-stubs-mips.h
index beb20aa..94ef2af 100644
--- a/src/3rdparty/v8/src/mips/code-stubs-mips.h
+++ b/src/3rdparty/v8/src/mips/code-stubs-mips.h
@@ -807,17 +807,6 @@ class StringDictionaryLookupStub: public CodeStub {
Handle<String> name,
Register scratch0);
- // TODO(kmillikin): Eliminate this function when the stub cache is fully
- // handlified.
- MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0);
-
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
diff --git a/src/3rdparty/v8/src/mips/deoptimizer-mips.cc b/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
index 92d7edd..bc735df 100644
--- a/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
+++ b/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
@@ -32,24 +32,112 @@
#include "full-codegen.h"
#include "safepoint-table.h"
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 10;
+const int Deoptimizer::table_entry_size_ = 32;
int Deoptimizer::patch_size() {
- const int kCallInstructionSizeInWords = 3;
+ const int kCallInstructionSizeInWords = 4;
return kCallInstructionSizeInWords * Assembler::kInstrSize;
}
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+ // Nothing to do. No new relocation information is written for lazy
+ // deoptimization on MIPS.
+}
+
+
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- UNIMPLEMENTED();
+ HandleScope scope;
+ AssertNoAllocation no_allocation;
+
+ if (!function->IsOptimized()) return;
+
+ // Get the optimized code.
+ Code* code = function->code();
+
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ // For each return after a safepoint insert an absolute call to the
+ // corresponding deoptimization entry.
+ unsigned last_pc_offset = 0;
+ SafepointTable table(function->code());
+ for (unsigned i = 0; i < table.length(); i++) {
+ unsigned pc_offset = table.GetPcOffset(i);
+ SafepointEntry safepoint_entry = table.GetEntry(i);
+ int deoptimization_index = safepoint_entry.deoptimization_index();
+ int gap_code_size = safepoint_entry.gap_code_size();
+ // Check that we did not shoot past next safepoint.
+ CHECK(pc_offset >= last_pc_offset);
+#ifdef DEBUG
+ // Destroy the code which is not supposed to be run again.
+ int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
+ CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+ instructions);
+ for (int x = 0; x < instructions; x++) {
+ destroyer.masm()->break_(0);
+ }
+#endif
+ last_pc_offset = pc_offset;
+ if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
+ Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
+ deoptimization_index, Deoptimizer::LAZY);
+ last_pc_offset += gap_code_size;
+ int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry,
+ RelocInfo::NONE);
+ int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+ ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
+ ASSERT(call_size_in_bytes <= patch_size());
+ CodePatcher patcher(code->instruction_start() + last_pc_offset,
+ call_size_in_words);
+ patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
+ last_pc_offset += call_size_in_bytes;
+ }
+ }
+
+#ifdef DEBUG
+ // Destroy the code which is not supposed to be run again.
+ int instructions =
+ (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
+ CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+ instructions);
+ for (int x = 0; x < instructions; x++) {
+ destroyer.masm()->break_(0);
+ }
+#endif
+
+ Isolate* isolate = code->GetIsolate();
+
+ // Add the deoptimizing code to the list.
+ DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+ DeoptimizerData* data = isolate->deoptimizer_data();
+ node->set_next(data->deoptimizing_code_list_);
+ data->deoptimizing_code_list_ = node;
+
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
+ // Set the code for the function to non-optimized version.
+ function->ReplaceCode(function->shared()->code());
+
+ if (FLAG_trace_deopt) {
+ PrintF("[forced deoptimization: ");
+ function->PrintName();
+ PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
+ }
}
@@ -57,7 +145,42 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
- UNIMPLEMENTED();
+ const int kInstrSize = Assembler::kInstrSize;
+ // This structure comes from FullCodeGenerator::EmitStackCheck.
+ // The call of the stack guard check has the following form:
+ // sltu at, sp, t0
+ // beq at, zero_reg, ok
+ // lui t9, <stack guard address> upper
+ // ori t9, <stack guard address> lower
+ // jalr t9
+ // nop
+ // ----- pc_after points here
+
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+
+ // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
+ CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+ patcher.masm()->addiu(at, zero_reg, 1);
+
+ // Replace the stack check address in the load-immediate (lui/ori pair)
+ // with the entry address of the replacement code.
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+ reinterpret_cast<uint32_t>(check_code->entry()));
+ Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+ replacement_code->entry());
+
+ // We patched the code to the following form:
+ // addiu at, zero_reg, 1
+ // beq at, zero_reg, ok ;; Not changed
+ // lui t9, <on-stack replacement address> upper
+ // ori t9, <on-stack replacement address> lower
+ // jalr t9 ;; Not changed
+ // nop ;; Not changed
+ // ----- pc_after points here
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
}
@@ -65,34 +188,618 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
- UNIMPLEMENTED();
+ // Exact opposite of the function above.
+ const int kInstrSize = Assembler::kInstrSize;
+ ASSERT(Assembler::IsAddImmediate(
+ Assembler::instr_at(pc_after - 6 * kInstrSize)));
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+
+ // Restore the sltu instruction so beq can be taken again.
+ CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+ patcher.masm()->sltu(at, sp, t0);
+
+ // Replace the on-stack replacement address in the load-immediate (lui/ori
+ // pair) with the entry address of the normal stack-check code.
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+ reinterpret_cast<uint32_t>(replacement_code->entry()));
+ Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+ check_code->entry());
+
+ check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 4 * kInstrSize, check_code);
+}
+
+
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+ ByteArray* translations = data->TranslationByteArray();
+ int length = data->DeoptCount();
+ for (int i = 0; i < length; i++) {
+ if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ TranslationIterator it(translations, data->TranslationIndex(i)->value());
+ int value = it.Next();
+ ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+ // Read the number of frames.
+ value = it.Next();
+ if (value == 1) return i;
+ }
+ }
+ UNREACHABLE();
+ return -1;
}
void Deoptimizer::DoComputeOsrOutputFrame() {
- UNIMPLEMENTED();
+ DeoptimizationInputData* data = DeoptimizationInputData::cast(
+ optimized_code_->deoptimization_data());
+ unsigned ast_id = data->OsrAstId()->value();
+
+ int bailout_id = LookupBailoutId(data, ast_id);
+ unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+ ByteArray* translations = data->TranslationByteArray();
+
+ TranslationIterator iterator(translations, translation_index);
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator.Next());
+ ASSERT(Translation::BEGIN == opcode);
+ USE(opcode);
+ int count = iterator.Next();
+ ASSERT(count == 1);
+ USE(count);
+
+ opcode = static_cast<Translation::Opcode>(iterator.Next());
+ USE(opcode);
+ ASSERT(Translation::FRAME == opcode);
+ unsigned node_id = iterator.Next();
+ USE(node_id);
+ ASSERT(node_id == ast_id);
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+ USE(function);
+ ASSERT(function == function_);
+ unsigned height = iterator.Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ USE(height_in_bytes);
+
+ unsigned fixed_size = ComputeFixedSize(function_);
+ unsigned input_frame_size = input_->GetFrameSize();
+ ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+ unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+ unsigned outgoing_size = outgoing_height * kPointerSize;
+ unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+ ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
+
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
+ PrintF(" => node=%u, frame=%d->%d]\n",
+ ast_id,
+ input_frame_size,
+ output_frame_size);
+ }
+
+ // There's only one output frame in the OSR case.
+ output_count_ = 1;
+ output_ = new FrameDescription*[1];
+ output_[0] = new(output_frame_size) FrameDescription(
+ output_frame_size, function_);
+#ifdef DEBUG
+ output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
+
+ // Clear the incoming parameters in the optimized frame to avoid
+ // confusing the garbage collector.
+ unsigned output_offset = output_frame_size - kPointerSize;
+ int parameter_count = function_->shared()->formal_parameter_count() + 1;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_[0]->SetFrameSlot(output_offset, 0);
+ output_offset -= kPointerSize;
+ }
+
+ // Translate the incoming parameters. This may overwrite some of the
+ // incoming argument slots we've just cleared.
+ int input_offset = input_frame_size - kPointerSize;
+ bool ok = true;
+ int limit = input_offset - (parameter_count * kPointerSize);
+ while (ok && input_offset > limit) {
+ ok = DoOsrTranslateCommand(&iterator, &input_offset);
+ }
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Set them up explicitly.
+ for (int i = StandardFrameConstants::kCallerPCOffset;
+ ok && i >= StandardFrameConstants::kMarkerOffset;
+ i -= kPointerSize) {
+ uint32_t input_value = input_->GetFrameSlot(input_offset);
+ if (FLAG_trace_osr) {
+ const char* name = "UNKNOWN";
+ switch (i) {
+ case StandardFrameConstants::kCallerPCOffset:
+ name = "caller's pc";
+ break;
+ case StandardFrameConstants::kCallerFPOffset:
+ name = "fp";
+ break;
+ case StandardFrameConstants::kContextOffset:
+ name = "context";
+ break;
+ case StandardFrameConstants::kMarkerOffset:
+ name = "function";
+ break;
+ }
+ PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
+ output_offset,
+ input_value,
+ input_offset,
+ name);
+ }
+
+ output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+ input_offset -= kPointerSize;
+ output_offset -= kPointerSize;
+ }
+
+ // Translate the rest of the frame.
+ while (ok && input_offset >= 0) {
+ ok = DoOsrTranslateCommand(&iterator, &input_offset);
+ }
+
+ // If translation of any command failed, continue using the input frame.
+ if (!ok) {
+ delete output_[0];
+ output_[0] = input_;
+ output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
+ } else {
+ // Setup the frame pointer and the context pointer.
+ output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
+ output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
+
+ unsigned pc_offset = data->OsrPcOffset()->value();
+ uint32_t pc = reinterpret_cast<uint32_t>(
+ optimized_code_->entry() + pc_offset);
+ output_[0]->SetPc(pc);
+ }
+ Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
+ output_[0]->SetContinuation(
+ reinterpret_cast<uint32_t>(continuation->entry()));
+
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+ ok ? "finished" : "aborted",
+ reinterpret_cast<intptr_t>(function));
+ function->PrintName();
+ PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
+ }
}
+// This code is very similar to ia32/arm code, but relies on register names
+// (fp, sp) and how the frame is laid out.
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
int frame_index) {
- UNIMPLEMENTED();
-}
+ // Read the ast node id, function, and frame height for this output frame.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ USE(opcode);
+ ASSERT(Translation::FRAME == opcode);
+ int node_id = iterator->Next();
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating ");
+ function->PrintName();
+ PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ }
+
+ // The 'fixed' part of the frame consists of the incoming parameters and
+ // the part described by JavaScriptFrameConstants.
+ unsigned fixed_frame_size = ComputeFixedSize(function);
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+ output_frame->SetKind(Code::FUNCTION);
+#endif
+
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ ASSERT(frame_index >= 0 && frame_index < output_count_);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address for the bottommost output frame can be computed from
+ // the input frame pointer and the output frame's height. For all
+ // subsequent output frames, it can be computed from the previous one's
+ // top address and the current frame's size.
+ uint32_t top_address;
+ if (is_bottommost) {
+ // 2 = context and function in the frame.
+ top_address =
+ input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Synthesize their values and set them up
+ // explicitly.
+ //
+ // The caller's pc for the bottommost output frame is the same as in the
+ // input frame. For all subsequent output frames, it can be read from the
+ // previous one. This frame's pc can be computed from the non-optimized
+ // function code and AST id of the bailout.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The caller's frame pointer for the bottommost output frame is the same
+ // as in the input frame. For all subsequent output frames, it can be
+ // read from the previous one. Also compute and set this frame's frame
+ // pointer.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
+ output_frame->SetFp(fp_value);
+ if (is_topmost) {
+ output_frame->SetRegister(fp.code(), fp_value);
+ }
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(function->context());
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ if (is_topmost) {
+ output_frame->SetRegister(cp.code(), value);
+ }
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The function was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<uint32_t>(function);
+ // The function for the bottommost output frame should also agree with the
+ // input frame.
+ ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Translate the rest of the frame.
+ for (unsigned i = 0; i < height; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ ASSERT(0 == output_offset);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* non_optimized_code = function->shared()->code();
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ Address start = non_optimized_code->instruction_start();
+ unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+ unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+ uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
+ output_frame->SetPc(pc_value);
+
+ FullCodeGenerator::State state =
+ FullCodeGenerator::StateField::decode(pc_and_state);
+ output_frame->SetState(Smi::FromInt(state));
+
+
+ // Set the continuation for the topmost frame.
+ if (is_topmost && bailout_type_ != DEBUGGER) {
+ Builtins* builtins = isolate_->builtins();
+ Code* continuation = (bailout_type_ == EAGER)
+ ? builtins->builtin(Builtins::kNotifyDeoptimized)
+ : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+ output_frame->SetContinuation(
+ reinterpret_cast<uint32_t>(continuation->entry()));
+ }
+}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- UNIMPLEMENTED();
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+ }
}
+#define __ masm()->
+
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
- UNIMPLEMENTED();
+ GeneratePrologue();
+
+ Isolate* isolate = masm()->isolate();
+
+ CpuFeatures::Scope scope(FPU);
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ const int kDoubleRegsSize =
+ kDoubleSize * FPURegister::kNumAllocatableRegisters;
+
+ // Save all FPU registers before messing with them.
+ __ Subu(sp, sp, Operand(kDoubleRegsSize));
+ for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+ FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ sdc1(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ // Get the bailout id from the stack.
+ __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
+
+ // Get the address of the location in the code object if possible (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register t0.
+ if (type() == EAGER) {
+ __ mov(a3, zero_reg);
+ // Correct one word for bailout id.
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ } else if (type() == OSR) {
+ __ mov(a3, ra);
+ // Correct one word for bailout id.
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ } else {
+ __ mov(a3, ra);
+ // Correct two words for bailout id and return address.
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+ }
+
+ __ Subu(t0, fp, t0);
+
+ // Allocate a new deoptimizer object.
+ // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+ __ PrepareCallCFunction(6, t1);
+ __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ li(a1, Operand(type())); // bailout type,
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
+ __ li(t1, Operand(ExternalReference::isolate_address()));
+ __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ }
+
+ // Preserve "deoptimizer" object in register v0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+ __ mov(a0, v0);
+ __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ lw(a2, MemOperand(sp, i * kPointerSize));
+ __ sw(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, kDebugZapValue);
+ __ sw(a2, MemOperand(a1, offset));
+ }
+ }
+
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ ldc1(f0, MemOperand(sp, src_offset));
+ __ sdc1(f0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the bailout id, eventually return address, and the saved registers
+ // from the stack.
+ if (type() == EAGER || type() == OSR) {
+ __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ } else {
+ __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+ }
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ Addu(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ pop(t0);
+ __ sw(t0, MemOperand(a3, 0));
+ __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
+ __ addiu(a3, a3, sizeof(uint32_t)); // In delay slot.
+
+ // Compute the output frame in the deoptimizer.
+ __ push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 1);
+ }
+ __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop;
+ // Outer loop state: a0 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
+ __ sll(a1, a1, kPointerSizeLog2); // Count to offset.
+ __ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ lw(a2, MemOperand(a0, 0)); // output_[ix]
+ __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ bind(&inner_push_loop);
+ __ Subu(a3, a3, Operand(sizeof(uint32_t)));
+ __ Addu(t2, a2, Operand(a3));
+ __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
+ __ push(t3);
+ __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Addu(a0, a0, Operand(kPointerSize));
+ __ Branch(&outer_push_loop, lt, a0, Operand(a1));
+
+
+ // Push state, pc, and continuation from the last output frame.
+ if (type() != OSR) {
+ __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
+ __ push(t2);
+ }
+
+ __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
+ __ push(t2);
+ __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ push(t2);
+
+
+ // Technically restoring 'at' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ ASSERT(!(at.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ mov(at, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ lw(ToRegister(i), MemOperand(at, offset));
+ }
+ }
+
+ // Set up the roots register.
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate);
+ __ li(roots, Operand(roots_array_start));
+
+ __ pop(at); // Get continuation, leave pc on stack.
+ __ pop(ra);
+ __ Jump(at);
+ __ stop("Unreachable.");
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- UNIMPLEMENTED();
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+
+ // Create a sequence of deoptimization entries. Note that any
+ // registers may be still live.
+
+ Label done;
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ if (type() != EAGER) {
+ // Emulate ia32 like call by pushing return address to stack.
+ __ push(ra);
+ }
+ __ li(at, Operand(i));
+ __ push(at);
+ __ Branch(&done);
+
+ // Pad the rest of the code.
+ while (table_entry_size_ > (masm()->pc_offset() - start)) {
+ __ nop();
+ }
+
+ ASSERT_EQ(table_entry_size_, masm()->pc_offset() - start);
+ }
+ __ bind(&done);
}
+#undef __
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/full-codegen-mips.cc b/src/3rdparty/v8/src/mips/full-codegen-mips.cc
index b6bd407..5bb9b35 100644
--- a/src/3rdparty/v8/src/mips/full-codegen-mips.cc
+++ b/src/3rdparty/v8/src/mips/full-codegen-mips.cc
@@ -55,11 +55,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-static unsigned GetPropertyId(Property* property) {
- return property->id();
-}
-
-
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
@@ -191,13 +186,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in a1.
__ push(a1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -411,7 +405,7 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -434,7 +428,7 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -469,7 +463,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -528,7 +522,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -595,7 +589,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -698,7 +692,7 @@ void FullCodeGenerator::SetVar(Variable* var,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
@@ -709,13 +703,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
Label skip;
if (should_normalize) __ Branch(&skip);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
+ PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
Split(eq, a0, Operand(t0), if_true, if_false, NULL);
@@ -1200,9 +1188,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ bind(&fast);
}
- __ lw(a0, var->is_qml_global() ? QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF || var->is_qml_global())
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
@@ -1287,10 +1275,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
- __ lw(a0, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, var->is_qml_global()?RelocInfo::CODE_TARGET:RelocInfo::CODE_TARGET_CONTEXT);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(v0);
break;
}
@@ -1749,7 +1737,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1758,7 +1746,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1938,7 +1926,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Global var, const, or let.
__ mov(a0, result_register());
__ li(a2, Operand(var->name()));
- __ lw(a1, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ lw(a1, GlobalObjectOperand());
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
@@ -2226,8 +2214,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
@@ -2247,14 +2234,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
__ li(a1, Operand(Smi::FromInt(strict_mode)));
__ push(a1);
-
- // Push the qml mode flag.
- __ li(a1, Operand(Smi::FromInt(is_qml_mode())));
- __ push(a1);
-
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
}
@@ -2288,28 +2268,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i));
}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
- // Push the function and resolve eval.
- __ push(v0);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
// Push a copy of the function (found below the arguments) and
// resolve eval.
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(a1);
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- __ bind(&done);
+ EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in v0 (function) and
// v1 (receiver). Touch up the stack with the right values.
@@ -2326,9 +2289,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
context()->DropAndPlug(1, v0);
} else if (proxy != NULL && proxy->var()->IsUnallocated()) {
// Push global object as receiver for the call IC.
- __ lw(a0, proxy->var()->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ lw(a0, GlobalObjectOperand());
__ push(a0);
- EmitCallWithIC(expr, proxy->name(), proxy->var()->is_qml_global()?RelocInfo::CODE_TARGET:RelocInfo::CODE_TARGET_CONTEXT);
+ EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
// Call to a lookup slot (dynamically introduced variable).
Label slow, done;
@@ -2432,7 +2395,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2444,7 +2408,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ And(t0, v0, Operand(kSmiTagMask));
Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -2452,7 +2416,8 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2464,7 +2429,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ And(at, v0, Operand(kSmiTagMask | 0x80000000));
Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
@@ -2472,7 +2437,8 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2494,7 +2460,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ Branch(if_false, ne, at, Operand(zero_reg));
__ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
if_true, if_false, fall_through);
@@ -2502,7 +2468,8 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2516,7 +2483,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
if_true, if_false, fall_through);
@@ -2524,7 +2491,8 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2540,7 +2508,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
__ And(at, a1, Operand(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2548,8 +2516,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
-
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2625,12 +2593,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
__ jmp(if_true);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2644,7 +2613,7 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a2);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
__ Branch(if_false);
@@ -2652,7 +2621,8 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2666,7 +2636,7 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(JS_ARRAY_TYPE),
if_true, if_false, fall_through);
@@ -2674,7 +2644,8 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2688,15 +2659,15 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2718,7 +2689,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
if_true, if_false, fall_through);
@@ -2726,7 +2697,8 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2741,14 +2713,15 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
&if_true, &if_false, &fall_through);
__ pop(a1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in a1 and the formal
@@ -2762,9 +2735,8 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
__ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2784,7 +2756,8 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
@@ -2843,7 +2816,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
@@ -2851,6 +2824,7 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
@@ -2864,9 +2838,8 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
@@ -2917,9 +2890,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2929,9 +2903,10 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2942,7 +2917,8 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -2961,8 +2937,9 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2972,7 +2949,8 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); // Load the object.
@@ -3000,7 +2978,8 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
@@ -3012,7 +2991,8 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3030,7 +3010,8 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -3078,7 +3059,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -3128,9 +3110,9 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3140,7 +3122,8 @@ void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -3152,10 +3135,11 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
@@ -3164,10 +3148,11 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
@@ -3176,10 +3161,11 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
@@ -3188,8 +3174,9 @@ void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3197,7 +3184,8 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
@@ -3216,8 +3204,9 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3227,7 +3216,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3324,7 +3314,8 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3377,7 +3368,8 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
Register right = v0;
@@ -3414,7 +3406,8 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
@@ -3427,14 +3420,15 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
__ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3449,12 +3443,12 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
-
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(0));
@@ -3749,7 +3743,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// but "delete this" is allowed.
ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->IsUnallocated()) {
- __ lw(a2, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ lw(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
__ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
__ Push(a2, a1, a0);
@@ -3790,18 +3784,35 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
} else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(v0);
+ __ jmp(&done);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(v0);
+ __ bind(&done);
}
break;
}
@@ -4038,7 +4049,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
- __ lw(a0, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(proxy->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
@@ -4063,11 +4074,12 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(v0);
} else {
// This expression cannot throw a reference error at the top level.
- VisitInCurrentContext(expr);
+ VisitInDuplicateContext(expr);
}
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
Handle<String> check) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -4077,9 +4089,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
&if_true, &if_false, &fall_through);
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(expr);
+ VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(v0, if_true);
@@ -4165,7 +4177,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
break;
@@ -4174,7 +4186,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
break;
@@ -4222,7 +4234,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Handle<Code> ic = CompareIC::GetUninitialized(op);
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
}
@@ -4244,7 +4256,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
diff --git a/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc b/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
new file mode 100644
index 0000000..e640b53
--- /dev/null
+++ b/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
@@ -0,0 +1,4628 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "mips/lithium-codegen-mips.h"
+#include "mips/lithium-gap-resolver-mips.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ int deoptimization_index)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deoptimization_index_(deoptimization_index) { }
+ virtual ~SafepointGenerator() { }
+
+ virtual void BeforeCall(int call_size) const {
+ ASSERT(call_size >= 0);
+ // Ensure that we have enough space after the previous safepoint position
+ // for the generated code there.
+ int call_end = codegen_->masm()->pc_offset() + call_size;
+ int prev_jump_end =
+ codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
+ if (call_end < prev_jump_end) {
+ int padding_size = prev_jump_end - call_end;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ codegen_->masm()->nop();
+ padding_size -= Assembler::kInstrSize;
+ }
+ }
+ }
+
+ virtual void AfterCall() const {
+ codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ int deoptimization_index_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+ HPhase phase("Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+ CpuFeatures::Scope scope(FPU);
+
+ CodeStub::GenerateFPStubs();
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ PopulateDeoptimizationData(code);
+ Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+ if (FLAG_trace_bailout) {
+ SmartArrayPointer<char> name(
+ info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LCodeGen in @\"%s\": ", *name);
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+ PrintF("\n");
+ }
+ status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+ if (!FLAG_code_comments) return;
+ char buffer[4 * KB];
+ StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+ va_list arguments;
+ va_start(arguments, format);
+ builder.AddFormattedList(format, arguments);
+ va_end(arguments);
+
+ // Copy the string before recording it in the assembler to avoid
+ // issues when the stack allocated buffer goes out of scope.
+ size_t length = builder.position();
+ Vector<char> copy = Vector<char>::New(length + 1);
+ memcpy(copy.start(), builder.Finalize(), copy.length());
+ masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
+#endif
+
+ // a1: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (info_->is_strict_mode() || info_->is_native()) {
+ Label ok;
+ __ Branch(&ok, eq, t1, Operand(zero_reg));
+
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ sw(a2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
+
+ __ Push(ra, fp, cp, a1);
+ __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ if (FLAG_debug_code) {
+ __ li(a0, Operand(slots));
+ __ li(a2, Operand(kSlotsZapValue));
+ Label loop;
+ __ bind(&loop);
+ __ push(a2);
+ __ Subu(a0, a0, 1);
+ __ Branch(&loop, ne, a0, Operand(zero_reg));
+ } else {
+ __ Subu(sp, sp, Operand(slots * kPointerSize));
+ }
+ }
+
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is in a1.
+ __ push(a1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ // Context is returned in both v0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ lw(a0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextOperand(cp, var->index());
+ __ sw(a0, target);
+ // Update the write barrier. This clobbers a3 and a0.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+ ASSERT(is_generating());
+ bool emit_instructions = true;
+ for (current_instruction_ = 0;
+ !is_aborted() && current_instruction_ < instructions_->length();
+ current_instruction_++) {
+ LInstruction* instr = instructions_->at(current_instruction_);
+ if (instr->IsLabel()) {
+ LLabel* label = LLabel::cast(instr);
+ emit_instructions = !label->HasReplacement();
+ }
+
+ if (emit_instructions) {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ instr->CompileToNative(this);
+ }
+ }
+ return !is_aborted();
+}
+
+
+LInstruction* LCodeGen::GetNextInstruction() {
+ if (current_instruction_ < instructions_->length() - 1) {
+ return instructions_->at(current_instruction_ + 1);
+ } else {
+ return NULL;
+ }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+ __ bind(code->entry());
+ Comment(";;; Deferred code @%d: %s.",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ code->Generate();
+ __ jmp(code->exit());
+ }
+
+ // Pad code to ensure that the last piece of deferred code have
+ // room for lazy bailout.
+ while ((masm()->pc_offset() - LastSafepointEnd())
+ < Deoptimizer::patch_size()) {
+ __ nop();
+ }
+ }
+ // Deferred code is the last part of the instruction sequence. Mark
+ // the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+ // TODO(plind): not clear that this will have advantage for MIPS.
+ // Skipping it for now. Raised issue #100 for this.
+ Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
+ return false;
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+ return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+ return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ ASSERT(op->IsRegister());
+ return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+ if (op->IsRegister()) {
+ return ToRegister(op->index());
+ } else if (op->IsConstantOperand()) {
+ __ li(scratch, ToOperand(op));
+ return scratch;
+ } else if (op->IsStackSlot() || op->IsArgument()) {
+ __ lw(scratch, ToMemOperand(op));
+ return scratch;
+ }
+ UNREACHABLE();
+ return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ ASSERT(op->IsDoubleRegister());
+ return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+ FloatRegister flt_scratch,
+ DoubleRegister dbl_scratch) {
+ if (op->IsDoubleRegister()) {
+ return ToDoubleRegister(op->index());
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ li(at, Operand(static_cast<int32_t>(literal->Number())));
+ __ mtc1(at, flt_scratch);
+ __ cvt_d_w(dbl_scratch, flt_scratch);
+ return dbl_scratch;
+ } else if (r.IsDouble()) {
+ Abort("unsupported double immediate");
+ } else if (r.IsTagged()) {
+ Abort("unsupported tagged immediate");
+ }
+ } else if (op->IsStackSlot() || op->IsArgument()) {
+ MemOperand mem_op = ToMemOperand(op);
+ __ ldc1(dbl_scratch, mem_op);
+ return dbl_scratch;
+ }
+ UNREACHABLE();
+ return dbl_scratch;
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+ ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+ value->Number());
+ return static_cast<int32_t>(value->Number());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ return value->Number();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ return Operand(static_cast<int32_t>(literal->Number()));
+ } else if (r.IsDouble()) {
+ Abort("ToOperand Unsupported double immediate.");
+ }
+ ASSERT(r.IsTagged());
+ return Operand(literal);
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort("ToOperand IsDoubleRegister unimplemented");
+ return Operand(0);
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+ ASSERT(!op->IsRegister());
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ int index = op->index();
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, and
+ // context in the fixed part of the frame.
+ return MemOperand(fp, -(index + 3) * kPointerSize);
+ } else {
+ // Incoming parameter. Skip the return address.
+ return MemOperand(fp, -(index - 1) * kPointerSize);
+ }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+ ASSERT(op->IsDoubleStackSlot());
+ int index = op->index();
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, context,
+ // and the first word of the double in the fixed part of the frame.
+ return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
+ } else {
+ // Incoming parameter. Skip the return address and the first word of
+ // the double.
+ return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
+ }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->values()->length();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->BeginFrame(environment->ast_id(), closure_id, height);
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ // spilled_registers_ and spilled_double_registers_ are either
+ // both NULL or both set.
+ if (environment->spilled_registers() != NULL && value != NULL) {
+ if (value->IsRegister() &&
+ environment->spilled_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(translation,
+ environment->spilled_registers()[value->index()],
+ environment->HasTaggedValueAt(i));
+ } else if (
+ value->IsDoubleRegister() &&
+ environment->spilled_double_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(
+ translation,
+ environment->spilled_double_registers()[value->index()],
+ false);
+ }
+ }
+
+ AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+ LOperand* op,
+ bool is_tagged) {
+ if (op == NULL) {
+ // TODO(twuerthinger): Introduce marker operands to indicate that this value
+ // is not present and must be reconstructed from the deoptimizer. Currently
+ // this is only used for the arguments object.
+ translation->StoreArgumentsObject();
+ } else if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsArgument()) {
+ ASSERT(is_tagged);
+ int src_index = GetStackSlotCount() + op->index();
+ translation->StoreStackSlot(src_index);
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(literal);
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ ASSERT(instr != NULL);
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ __ Call(code, mode);
+ RegisterLazyDeoptimization(instr, safepoint_mode);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr) {
+ ASSERT(instr != NULL);
+ LPointerMap* pointers = instr->pointer_map();
+ ASSERT(pointers != NULL);
+ RecordPosition(pointers->position());
+
+ __ CallRuntime(function, num_arguments);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr) {
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ // Create the environment to bailout to. If the call has side effects
+ // execution has to continue after the call otherwise execution can continue
+ // from a previous bailout point repeating the call.
+ LEnvironment* deoptimization_environment;
+ if (instr->HasDeoptimizationEnvironment()) {
+ deoptimization_environment = instr->deoptimization_environment();
+ } else {
+ deoptimization_environment = instr->environment();
+ }
+
+ RegisterEnvironmentForDeoptimization(deoptimization_environment);
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(),
+ deoptimization_environment->deoptimization_index());
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(),
+ 0,
+ deoptimization_environment->deoptimization_index());
+ }
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+ if (!environment->HasBeenRegistered()) {
+ // Physical stack frame layout:
+ // -x ............. -4 0 ..................................... y
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+ // Layout of the environment:
+ // 0 ..................................................... size-1
+ // [parameters] [locals] [expression stack including arguments]
+
+ // Layout of the translation:
+ // 0 ........................................................ size - 1 + 4
+ // [expression stack including arguments] [locals] [4 words] [parameters]
+ // |>------------ translation_size ------------<|
+
+ int frame_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ }
+ Translation translation(&translations_, frame_count);
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ environment->Register(deoptimization_index, translation.index());
+ deoptimizations_.Add(environment);
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Register src1,
+ const Operand& src2) {
+ RegisterEnvironmentForDeoptimization(environment);
+ ASSERT(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(entry != NULL);
+ if (entry == NULL) {
+ Abort("bailout was not prepared");
+ return;
+ }
+
+ ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
+
+ if (FLAG_deopt_every_n_times == 1 &&
+ info_->shared_info()->opt_count() == id) {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ return;
+ }
+
+ if (FLAG_trap_on_deopt) {
+ Label skip;
+ if (cc != al) {
+ __ Branch(&skip, NegateCondition(cc), src1, src2);
+ }
+ __ stop("trap_on_deopt");
+ __ bind(&skip);
+ }
+
+ if (cc == al) {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ // TODO(plind): The Arm port is a little different here, due to their
+ // DeOpt jump table, which is not used for Mips yet.
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+ }
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+ ASSERT(FLAG_deopt);
+ Handle<DeoptimizationInputData> data =
+ factory()->NewDeoptimizationInputData(length, TENURED);
+
+ Handle<ByteArray> translations = translations_.CreateByteArray();
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ }
+ code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal);
+ return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length();
+ i < length;
+ i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepoint(
+ LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index) {
+ ASSERT(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+ kind, arguments, deoptimization_index);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer));
+ }
+ }
+ if (kind & Safepoint::kWithRegisters) {
+ // Register cp always contains a pointer to the context.
+ safepoint.DefinePointerRegister(cp);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ int deoptimization_index) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+}
+
+
+void LCodeGen::RecordSafepoint(int deoptimization_index) {
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ RecordSafepoint(&empty_pointers, deoptimization_index);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ int deoptimization_index) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
+ deoptimization_index);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+ LPointerMap* pointers,
+ int arguments,
+ int deoptimization_index) {
+ RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
+ deoptimization_index);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ if (label->is_loop_header()) {
+ Comment(";;; B%d - LOOP entry", label->block_id());
+ } else {
+ Comment(";;; B%d", label->block_id());
+ }
+ __ bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+ resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) DoParallelMove(move);
+ }
+
+ LInstruction* next = GetNextInstruction();
+ if (next != NULL && next->IsLazyBailout()) {
+ int pc = masm()->pc_offset();
+ safepoints_.SetPcAfterGap(pc);
+ }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpConstructResult: {
+ RegExpConstructResultStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::NumberToString: {
+ NumberToStringStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringAdd: {
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::TranscendentalCache: {
+ __ lw(a0, MemOperand(sp, 0));
+ TranscendentalCacheStub stub(instr->transcendental_type(),
+ TranscendentalCacheStub::TAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ Register scratch = scratch0();
+ const Register left = ToRegister(instr->InputAt(0));
+ const Register result = ToRegister(instr->result());
+
+ // p2constant holds the right side value if it's a power of 2 constant.
+ // In other cases it is 0.
+ int32_t p2constant = 0;
+
+ if (instr->InputAt(1)->IsConstantOperand()) {
+ p2constant = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
+ if (p2constant % 2 != 0) {
+ p2constant = 0;
+ }
+ // Result always takes the sign of the dividend (left).
+ p2constant = abs(p2constant);
+ }
+
+ // div runs in the background while we check for special cases.
+ Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+ __ div(left, right);
+
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+ }
+
+ Label skip_div, do_div;
+ if (p2constant != 0) {
+ // Fall back to the result of the div instruction if we could have sign
+ // problems.
+ __ Branch(&do_div, lt, left, Operand(zero_reg));
+ // Modulo by masking.
+ __ And(scratch, left, p2constant - 1);
+ __ Branch(&skip_div);
+ }
+
+ __ bind(&do_div);
+ __ mfhi(scratch);
+ __ bind(&skip_div);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Result always takes the sign of the dividend (left).
+ Label done;
+ __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
+ __ mov(result, scratch);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ __ bind(&done);
+ } else {
+ __ Move(result, scratch);
+ }
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ const Register left = ToRegister(instr->InputAt(0));
+ const Register right = ToRegister(instr->InputAt(1));
+ const Register result = ToRegister(instr->result());
+
+ // On MIPS div is asynchronous - it will run in the background while we
+ // check for special cases.
+ __ div(left, right);
+
+ // Check for x / 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label left_not_zero;
+ __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (-kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
+
+ __ mfhi(result);
+ DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ __ mflo(result);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+ // Note that result may alias left.
+ Register left = ToRegister(instr->InputAt(0));
+ LOperand* right_op = instr->InputAt(1);
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right_op->IsConstantOperand() && !can_overflow) {
+ // Use optimized code for specific constants.
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+ if (bailout_on_minus_zero && (constant < 0)) {
+ // The case of a null constant will be handled separately.
+ // If constant is negative and left is null, the result should be -0.
+ DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ }
+
+ switch (constant) {
+ case -1:
+ __ Subu(result, zero_reg, left);
+ break;
+ case 0:
+ if (bailout_on_minus_zero) {
+ // If left is strictly negative and the constant is null, the
+ // result is -0. Deoptimize if required, otherwise return 0.
+ DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+ }
+ __ mov(result, zero_reg);
+ break;
+ case 1:
+ // Nothing to do.
+ __ Move(result, left);
+ break;
+ default:
+ // Multiplying by powers of two and powers of two plus or minus
+ // one can be done faster with shifted operands.
+ // For other constants we emit standard code.
+ int32_t mask = constant >> 31;
+ uint32_t constant_abs = (constant + mask) ^ mask;
+
+ if (IsPowerOf2(constant_abs) ||
+ IsPowerOf2(constant_abs - 1) ||
+ IsPowerOf2(constant_abs + 1)) {
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ sll(result, left, shift);
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ sll(result, left, shift);
+ __ Addu(result, result, left);
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ sll(result, left, shift);
+ __ Subu(result, result, left);
+ }
+
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) {
+ __ Subu(result, zero_reg, result);
+ }
+
+ } else {
+ // Generate standard code.
+ __ li(at, constant);
+ __ mul(result, left, at);
+ }
+ }
+
+ } else {
+ Register right = EmitLoadRegister(right_op, scratch);
+ if (bailout_on_minus_zero) {
+ __ Or(ToRegister(instr->TempAt(0)), left, right);
+ }
+
+ if (can_overflow) {
+ // hi:lo = left * right.
+ __ mult(left, right);
+ __ mfhi(scratch);
+ __ mflo(result);
+ __ sra(at, result, 31);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ } else {
+ __ mul(result, left, right);
+ }
+
+ if (bailout_on_minus_zero) {
+ // Bail out if the result is supposed to be negative zero.
+ Label done;
+ __ Branch(&done, ne, result, Operand(zero_reg));
+ DeoptimizeIf(lt,
+ instr->environment(),
+ ToRegister(instr->TempAt(0)),
+ Operand(zero_reg));
+ __ bind(&done);
+ }
+ }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ LOperand* left_op = instr->InputAt(0);
+ LOperand* right_op = instr->InputAt(1);
+ ASSERT(left_op->IsRegister());
+ Register left = ToRegister(left_op);
+ Register result = ToRegister(instr->result());
+ Operand right(no_reg);
+
+ if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ right = Operand(EmitLoadRegister(right_op, at));
+ } else {
+ ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+ right = ToOperand(right_op);
+ }
+
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ And(result, left, right);
+ break;
+ case Token::BIT_OR:
+ __ Or(result, left, right);
+ break;
+ case Token::BIT_XOR:
+ __ Xor(result, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+ // result may alias either of them.
+ LOperand* right_op = instr->InputAt(1);
+ Register left = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ if (right_op->IsRegister()) {
+ // No need to mask the right operand on MIPS, it is built into the variable
+ // shift instructions.
+ switch (instr->op()) {
+ case Token::SAR:
+ __ srav(result, left, ToRegister(right_op));
+ break;
+ case Token::SHR:
+ __ srlv(result, left, ToRegister(right_op));
+ if (instr->can_deopt()) {
+ DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ }
+ break;
+ case Token::SHL:
+ __ sllv(result, left, ToRegister(right_op));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ // Mask the right_op operand.
+ int value = ToInteger32(LConstantOperand::cast(right_op));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ sra(result, left, shift_count);
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHR:
+ if (shift_count != 0) {
+ __ srl(result, left, shift_count);
+ } else {
+ if (instr->can_deopt()) {
+ __ And(at, left, Operand(0x80000000));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+ __ sll(result, left, shift_count);
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (!can_overflow) {
+ if (right->IsStackSlot() || right->IsArgument()) {
+ Register right_reg = EmitLoadRegister(right, at);
+ __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
+ } else {
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
+ __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
+ }
+ } else { // can_overflow.
+ Register overflow = scratch0();
+ Register scratch = scratch1();
+ if (right->IsStackSlot() ||
+ right->IsArgument() ||
+ right->IsConstantOperand()) {
+ Register right_reg = EmitLoadRegister(right, scratch);
+ __ SubuAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ right_reg,
+ overflow); // Reg at also used as scratch.
+ } else {
+ ASSERT(right->IsRegister());
+ // Due to overflow check macros not supporting constant operands,
+ // handling the IsConstantOperand case was moved to prev if clause.
+ __ SubuAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ ToRegister(right),
+ overflow); // Reg at also used as scratch.
+ }
+ DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ ASSERT(instr->result()->IsRegister());
+ __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ ASSERT(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ double v = instr->value();
+ __ Move(result, v);
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ ASSERT(instr->result()->IsRegister());
+ __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
+}
+
+
+void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
+}
+
+
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->TempAt(0));
+ Label done;
+
+ // If the object is a smi return the object.
+ __ Move(result, input);
+ __ JumpIfSmi(input, &done);
+
+ // If the object is not a value type, return the object.
+ __ GetObjectType(input, map, map);
+ __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
+ __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ __ Nor(result, zero_reg, Operand(input));
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+ __ push(input_reg);
+ CallRuntime(Runtime::kThrow, 1, instr);
+
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (!can_overflow) {
+ if (right->IsStackSlot() || right->IsArgument()) {
+ Register right_reg = EmitLoadRegister(right, at);
+ __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
+ } else {
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
+ __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
+ }
+ } else { // can_overflow.
+ Register overflow = scratch0();
+ Register scratch = scratch1();
+ if (right->IsStackSlot() ||
+ right->IsArgument() ||
+ right->IsConstantOperand()) {
+ Register right_reg = EmitLoadRegister(right, scratch);
+ __ AdduAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ right_reg,
+ overflow); // Reg at also used as scratch.
+ } else {
+ ASSERT(right->IsRegister());
+ // Due to overflow check macros not supporting constant operands,
+ // handling the IsConstantOperand case was moved to prev if clause.
+ __ AdduAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ ToRegister(right),
+ overflow); // Reg at also used as scratch.
+ }
+ DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ switch (instr->op()) {
+ case Token::ADD:
+ __ add_d(result, left, right);
+ break;
+ case Token::SUB:
+ __ sub_d(result, left, right);
+ break;
+ case Token::MUL:
+ __ mul_d(result, left, right);
+ break;
+ case Token::DIV:
+ __ div_d(result, left, right);
+ break;
+ case Token::MOD: {
+ // Save a0-a3 on the stack.
+ RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+ __ MultiPush(saved_regs);
+
+ __ PrepareCallCFunction(0, 2, scratch0());
+ __ SetCallCDoubleArguments(left, right);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ GetCFunctionDoubleResult(result);
+
+ // Restore saved register.
+ __ MultiPop(saved_regs);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+ ASSERT(ToRegister(instr->InputAt(1)).is(a0));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ // Other arch use a nop here, to signal that there is no inlined
+ // patchable code. Mips does not need the nop, since our marker
+ // instruction (andi zero_reg) will never be used in normal code.
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+ for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+ LLabel* label = chunk_->GetLabel(i);
+ if (!label->HasReplacement()) return i;
+ }
+ return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block,
+ Condition cc, Register src1, const Operand& src2) {
+ int next_block = GetNextEmittedBlock(current_block_);
+ right_block = chunk_->LookupDestination(right_block);
+ left_block = chunk_->LookupDestination(left_block);
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ Branch(chunk_->GetAssemblyLabel(right_block),
+ NegateCondition(cc), src1, src2);
+ } else if (right_block == next_block) {
+ __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+ } else {
+ __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+ __ Branch(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+void LCodeGen::EmitBranchF(int left_block, int right_block,
+ Condition cc, FPURegister src1, FPURegister src2) {
+ int next_block = GetNextEmittedBlock(current_block_);
+ right_block = chunk_->LookupDestination(right_block);
+ left_block = chunk_->LookupDestination(left_block);
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
+ NegateCondition(cc), src1, src2);
+ } else if (right_block == next_block) {
+ __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+ } else {
+ __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+ __ Branch(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsInteger32()) {
+ Register reg = ToRegister(instr->InputAt(0));
+ EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+ } else if (r.IsDouble()) {
+ DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
+ // Test the double value. Zero and NaN are false.
+ EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->InputAt(0));
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsBoolean()) {
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ EmitBranch(true_block, false_block, eq, reg, Operand(at));
+ } else if (type.IsSmi()) {
+ EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+ } else {
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(false_label, eq, reg, Operand(at));
+ }
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(true_label, eq, reg, Operand(at));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ __ Branch(false_label, eq, reg, Operand(at));
+ }
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(false_label, eq, reg, Operand(at));
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ __ Branch(false_label, eq, reg, Operand(zero_reg));
+ __ JumpIfSmi(reg, true_label);
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a Smi -> deopt.
+ __ And(at, reg, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ }
+
+ const Register map = scratch0();
+ if (expected.NeedsMap()) {
+ __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, at, Operand(1 << Map::kIsUndetectable));
+ __ Branch(false_label, ne, at, Operand(zero_reg));
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
+ __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
+ __ Branch(true_label, ne, at, Operand(zero_reg));
+ __ Branch(false_label);
+ __ bind(&not_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ DoubleRegister dbl_scratch = double_scratch0();
+ Label not_heap_number;
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&not_heap_number, ne, map, Operand(at));
+ __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
+ // Falls through if dbl_scratch == 0.
+ __ Branch(false_label);
+ __ bind(&not_heap_number);
+ }
+
+ // We've seen something for the first time -> deopt.
+ DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ }
+ }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ block = chunk_->LookupDestination(block);
+ int next_block = GetNextEmittedBlock(current_block_);
+ if (block != next_block) {
+ __ jmp(chunk_->GetAssemblyLabel(block));
+ }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = kNoCondition;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? lo : lt;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? hi : gt;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? ls : le;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? hs : ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ Condition cond = TokenToCondition(instr->op(), false);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block =
+ EvalComparison(instr->op(), left_val, right_val) ? true_block
+ : false_block;
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ // Compare left and right as doubles and load the
+ // resulting flags into the normal status register.
+ FPURegister left_reg = ToDoubleRegister(left);
+ FPURegister right_reg = ToDoubleRegister(right);
+
+ // If a NaN is involved, i.e. the result is unordered,
+ // jump to false block label.
+ __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
+ left_reg, right_reg);
+
+ EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
+ } else {
+ Register cmp_left;
+ Operand cmp_right = Operand(0);
+
+ if (right->IsConstantOperand()) {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
+ } else if (left->IsConstantOperand()) {
+ cmp_left = ToRegister(right);
+ cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
+ // We transposed the operands. Reverse the condition.
+ cond = ReverseCondition(cond);
+ } else {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(ToRegister(right));
+ }
+
+ EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
+ }
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ EmitBranch(true_block, false_block, eq, left, Operand(right));
+}
+
+
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ EmitBranch(true_block, false_block, eq, left,
+ Operand(instr->hydrogen()->right()));
+}
+
+
+
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+ Register scratch = scratch0();
+ Register reg = ToRegister(instr->InputAt(0));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ // If the expression is known to be untagged or a smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
+ if (instr->hydrogen()->representation().IsSpecialization() ||
+ instr->hydrogen()->type().IsSmi()) {
+ EmitGoto(false_block);
+ return;
+ }
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(at, nil_value);
+ if (instr->kind() == kStrictEquality) {
+ EmitBranch(true_block, false_block, eq, reg, Operand(at));
+ } else {
+ Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
+ __ LoadRoot(at, other_nil_value); // In the delay slot.
+ __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
+ __ JumpIfSmi(reg, false_label); // In the delay slot.
+ // Check for undetectable objects by looking in the bit field in
+ // the map. The object has already been smi checked.
+ __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ And(scratch, scratch, 1 << Map::kIsUndetectable);
+ EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
+ }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+ Register temp1,
+ Label* is_not_object,
+ Label* is_object) {
+ Register temp2 = scratch0();
+ __ JumpIfSmi(input, is_not_object);
+
+ __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+ __ Branch(is_object, eq, input, Operand(temp2));
+
+ // Load map.
+ __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+ __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
+ __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
+
+ // Load instance type and check that it is in object type range.
+ __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+ __ Branch(is_not_object,
+ lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+
+ return le;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = scratch0();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond =
+ EmitIsObject(reg, temp1, false_label, true_label);
+
+ EmitBranch(true_block, false_block, true_cond, temp2,
+ Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+ __ And(at, input_reg, kSmiTagMask);
+ EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ __ And(at, temp, Operand(1 << Map::kIsUndetectable));
+ EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT(from == to || to == LAST_TYPE);
+ return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return hs;
+ if (from == FIRST_TYPE) return ls;
+ UNREACHABLE();
+ return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register scratch = scratch0();
+ Register input = ToRegister(instr->InputAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ __ JumpIfSmi(input, false_label);
+
+ __ GetObjectType(input, scratch, scratch);
+ EmitBranch(true_block,
+ false_block,
+ BranchCondition(instr->hydrogen()),
+ scratch,
+ Operand(TestType(instr->hydrogen())));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(input);
+ }
+
+ __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ lw(scratch,
+ FieldMemOperand(input, String::kHashFieldOffset));
+ __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
+ EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+}
+
+
+// Branches to a label or falls through with this instance class-name adr
+// returned in temp reg, available for comparison by the caller. Trashes the
+// temp registers, but not the input. Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+ Label* is_false,
+ Handle<String>class_name,
+ Register input,
+ Register temp,
+ Register temp2) {
+ ASSERT(!input.is(temp));
+ ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
+ __ JumpIfSmi(input, is_false);
+
+ if (class_name->IsEqualTo(CStrVector("Function"))) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+ __ GetObjectType(input, temp, temp2);
+ __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ } else {
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ GetObjectType(input, temp, temp2);
+ __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ GetObjectType(temp, temp2, temp2);
+ if (class_name->IsEqualTo(CStrVector("Object"))) {
+ __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
+ } else {
+ __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(temp, FieldMemOperand(temp,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is a symbol because it's a literal.
+ // The name in the constructor is a symbol because of the way the context is
+ // booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are symbols it is sufficient to use an identity
+ // comparison.
+
+ // End with the address of this class_name instance in temp register.
+ // On MIPS, the caller must do the comparison with Handle<String>class_name.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = scratch0();
+ Register temp2 = ToRegister(instr->TempAt(0));
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+
+ EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ int true_block = instr->true_block_id();
+ int false_block = instr->false_block_id();
+
+ __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+ EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ Label true_label, done;
+ ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0.
+ ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1.
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(v0));
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+ __ Branch(&true_label, eq, result, Operand(zero_reg));
+ __ li(result, Operand(factory()->false_value()));
+ __ Branch(&done);
+ __ bind(&true_label);
+ __ li(result, Operand(factory()->true_value()));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ Label* map_check() { return &map_check_; }
+
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label done, false_result;
+ Register object = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(object.is(a0));
+ ASSERT(result.is(v0));
+
+ // A Smi is not instance of anything.
+ __ JumpIfSmi(object, &false_result);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ Label cache_miss;
+ Register map = temp;
+ __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch with
+ // the cached map.
+ __ li(at, Operand(factory()->the_hole_value()), true);
+ __ Branch(&cache_miss, ne, map, Operand(at));
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch
+ // with true or false.
+ __ li(result, Operand(factory()->the_hole_value()), true);
+ __ Branch(&done);
+
+ // The inlined call site cache did not match. Check null and string before
+ // calling the deferred code.
+ __ bind(&cache_miss);
+ // Null is not instance of anything.
+ __ LoadRoot(temp, Heap::kNullValueRootIndex);
+ __ Branch(&false_result, eq, object, Operand(temp));
+
+ // String values is not instance of anything.
+ Condition cc = __ IsObjectStringType(object, temp, temp);
+ __ Branch(&false_result, cc, temp, Operand(zero_reg));
+
+ // Go to the deferred code.
+ __ Branch(deferred->entry());
+
+ __ bind(&false_result);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result has either true or false. Deferred code also produces true or
+ // false object.
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(v0));
+
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ InstanceofStub stub(flags);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // Get the temp register reserved by the instruction. This needs to be t0 as
+ // its slot of the pushing of safepoint registers is used to communicate the
+ // offset to the location of the map check.
+ Register temp = ToRegister(instr->TempAt(0));
+ ASSERT(temp.is(t0));
+ __ li(InstanceofStub::right(), Operand(instr->function()));
+ static const int kAdditionalDelta = 7;
+ int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+ Label before_push_delta;
+ __ bind(&before_push_delta);
+ {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ li(temp, Operand(delta * kPointerSize), true);
+ __ StoreToSafepointRegisterSlot(temp, temp);
+ }
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ // Put the result value into the result register slot and
+ // restore all registers.
+ __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // On MIPS there is no need for a "no inlined smi code" marker (nop).
+
+ Condition condition = ComputeCompareCondition(op);
+ // A minor optimization that relies on LoadRoot always emitting one
+ // instruction.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+ Label done;
+ __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in v0.
+ __ push(v0);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ __ Addu(sp, sp, Operand(sp_delta));
+ __ Jump(ra);
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
+ __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(a0));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ __ li(a2, Operand(instr->name()));
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+
+ // Load the cell.
+ __ li(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ lw(scratch2,
+ FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), scratch2, Operand(at));
+ }
+
+ // Store the value.
+ __ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+
+ // Cells are always in the remembered set.
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteField(scratch,
+ JSGlobalPropertyCell::kValueOffset,
+ value,
+ scratch2,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(a1));
+ ASSERT(ToRegister(instr->value()).is(a0));
+
+ __ li(a2, Operand(instr->name()));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ lw(result, ContextOperand(context, instr->slot_index()));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ MemOperand target = ContextOperand(context, instr->slot_index());
+ __ sw(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch0(),
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ Register object = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ if (instr->hydrogen()->is_in_object()) {
+ __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
+ } else {
+ __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
+ }
+}
+
+
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name) {
+ LookupResult lookup(isolate());
+ type->LookupInDescriptors(NULL, *name, &lookup);
+ ASSERT(lookup.IsProperty() &&
+ (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+ if (lookup.type() == FIELD) {
+ int index = lookup.GetLocalFieldIndexFromMap(*type);
+ int offset = index * kPointerSize;
+ if (index < 0) {
+ // Negative property indices are in-object properties, indexed
+ // from the end of the fixed part of the object.
+ __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
+ } else {
+ // Non-negative property indices are in the properties array.
+ __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+ }
+ } else {
+ Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+ LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ }
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ int map_count = instr->hydrogen()->types()->length();
+ Handle<String> name = instr->hydrogen()->name();
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ li(a2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ Label done;
+ __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
+ Label next;
+ __ Branch(&next, ne, scratch, Operand(map));
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ Branch(&done);
+ __ bind(&next);
+ }
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ Branch(&generic, ne, scratch, Operand(map));
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ Branch(&done);
+ __ bind(&generic);
+ __ li(a2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->object()).is(a0));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ // Name is always in a2.
+ __ li(a2, Operand(instr->name()));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register scratch = scratch0();
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // Check that the function really is a function. Load map into the
+ // result register.
+ __ GetObjectType(function, result, scratch);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
+
+ // Get the prototype or initial map from the function.
+ __ lw(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ GetObjectType(result, scratch, scratch);
+ __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+ // Get the prototype from the initial map.
+ __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ Branch(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ __ bind(&non_instance);
+ __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+
+ __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
+ if (FLAG_debug_code) {
+ Label done, fail;
+ __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
+ __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
+ __ Branch(&done, eq, scratch, Operand(at));
+ // |scratch| still contains |input|'s map.
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+ __ Ext(scratch, scratch, Map::kElementsKindShift,
+ Map::kElementsKindBitCount);
+ __ Branch(&done, eq, scratch,
+ Operand(FAST_ELEMENTS));
+ __ Branch(&fail, lt, scratch,
+ Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ Branch(&done, le, scratch,
+ Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed.");
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoLoadExternalArrayPointer(
+ LLoadExternalArrayPointer* instr) {
+ Register to_reg = ToRegister(instr->result());
+ Register from_reg = ToRegister(instr->InputAt(0));
+ __ lw(to_reg, FieldMemOperand(from_reg,
+ ExternalArray::kExternalPointerOffset));
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register length = ToRegister(instr->length());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+
+ // Bailout index is not a valid argument index. Use unsigned check to get
+ // negative check for free.
+
+ // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
+ // as they do in Arm. It will save us an instruction.
+ DeoptimizeIf(ls, instr->environment(), length, Operand(index));
+
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them, add one more.
+ __ subu(length, length, index);
+ __ Addu(length, length, Operand(1));
+ __ sll(length, length, kPointerSizeLog2);
+ __ Addu(at, arguments, Operand(length));
+ __ lw(result, MemOperand(at, 0));
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register key = EmitLoadRegister(instr->key(), scratch0());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // Load the result.
+ __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
+ __ addu(scratch, elements, scratch);
+ __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int shift_size =
+ ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+
+ if (key_is_constant) {
+ __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(elements, elements, Operand(scratch));
+ __ Addu(elements, elements,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+
+ __ ldc1(result, MemOperand(elements));
+}
+
+
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int shift_size = ElementsKindToShiftSize(elements_kind);
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ FPURegister result = ToDoubleRegister(instr->result());
+ if (key_is_constant) {
+ __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+ } else {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), scratch0(), external_pointer);
+ }
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0()));
+ __ cvt_d_s(result, result);
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ ldc1(result, MemOperand(scratch0()));
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ MemOperand mem_operand(zero_reg);
+ if (key_is_constant) {
+ mem_operand = MemOperand(external_pointer,
+ constant_key * (1 << shift_size));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(scratch, scratch, external_pointer);
+ mem_operand = MemOperand(scratch);
+ }
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ lb(result, mem_operand);
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ lbu(result, mem_operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ lh(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ lhu(result, mem_operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ // TODO(danno): we could be more clever here, perhaps having a special
+ // version of the stub that detects if the overflow case actually
+ // happens, and generate code that returns a double rather than int.
+ DeoptimizeIf(Ugreater_equal, instr->environment(),
+ result, Operand(0x80000000));
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->object()).is(a1));
+ ASSERT(ToRegister(instr->key()).is(a0));
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register scratch = scratch0();
+ Register temp = scratch1();
+ Register result = ToRegister(instr->result());
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label done, adapted;
+ __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ movn(result, fp, temp); // move only if temp is not equal to zero (ne)
+ __ movz(result, scratch, temp); // move only if temp is equal to zero (eq)
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elem = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
+ __ Branch(&done, eq, fp, Operand(elem));
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(result,
+ MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(result);
+
+ // Argument length is in result register.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ ASSERT(receiver.is(a0)); // Used for parameter count.
+ ASSERT(function.is(a1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ lw(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+
+ // Do not transform the receiver to object for builtins.
+ int32_t strict_mode_function_mask =
+ 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+ __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
+ __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ Branch(&global_object, eq, receiver, Operand(scratch));
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Branch(&global_object, eq, receiver, Operand(scratch));
+
+ // Deoptimize if the receiver is not a JS object.
+ __ And(scratch, receiver, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+
+ __ GetObjectType(receiver, scratch, scratch);
+ DeoptimizeIf(lt, instr->environment(),
+ scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&receiver_ok);
+
+ __ bind(&global_object);
+ __ lw(receiver, GlobalObjectOperand());
+ __ lw(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ __ bind(&receiver_ok);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ push(receiver);
+ __ Move(receiver, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ Addu(elements, elements, Operand(1 * kPointerSize));
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
+ __ sll(scratch, length, 2);
+ __ bind(&loop);
+ __ Addu(scratch, elements, scratch);
+ __ lw(scratch, MemOperand(scratch));
+ __ push(scratch);
+ __ Subu(length, length, Operand(1));
+ __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
+ __ sll(scratch, length, 2);
+
+ __ bind(&invoke);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index());
+ // The number of arguments is stored in receiver which is a0, as expected
+ // by InvokeFunction.
+ v8::internal::ParameterCount actual(receiver);
+ __ InvokeFunction(function, actual, CALL_FUNCTION,
+ safepoint_generator, CALL_AS_METHOD);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->InputAt(0);
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+ Abort("DoPushArgument not implemented for double type.");
+ } else {
+ Register argument_reg = EmitLoadRegister(argument, at);
+ __ push(argument_reg);
+ }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ LoadHeapObject(result, instr->hydrogen()->closure());
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, cp);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ lw(result,
+ MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+ Register global = ToRegister(instr->global());
+ Register result = ToRegister(instr->result());
+ __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int arity,
+ LInstruction* instr,
+ CallKind call_kind) {
+ // Change context if needed.
+ bool change_context =
+ (info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+ if (change_context) {
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ }
+
+ // Set a0 to arguments count if adaption is not needed. Assumes that a0
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ li(a0, Operand(arity));
+ }
+
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+
+ // Invoke function.
+ __ SetCallKind(t1, call_kind);
+ __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(at);
+
+ // Setup deoptimization.
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+
+ // Restore context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ __ mov(a0, v0);
+ __ li(a1, Operand(instr->function()));
+ CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // Deoptimize if not a heap number.
+ __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+
+ Label done;
+ Register exponent = scratch0();
+ scratch = no_reg;
+ __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it.
+ __ Move(result, input);
+ __ And(at, exponent, Operand(HeapNumber::kSignMask));
+ __ Branch(&done, eq, at, Operand(zero_reg));
+
+ // Input is negative. Reverse its sign.
+ // Preserve the value of all registers.
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // Registers were saved at the safepoint, so we can use
+ // many scratch registers.
+ Register tmp1 = input.is(a1) ? a0 : a1;
+ Register tmp2 = input.is(a2) ? a0 : a2;
+ Register tmp3 = input.is(a3) ? a0 : a3;
+ Register tmp4 = input.is(t0) ? a0 : t0;
+
+ // exponent: floating point exponent value.
+
+ Label allocated, slow;
+ __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+ __ Branch(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp1.is(v0))
+ __ mov(tmp1, v0);
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input, input);
+ __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+ __ bind(&allocated);
+ // exponent: floating point exponent value.
+ // tmp1: allocated heap number.
+ __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
+ __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+ __ StoreToSafepointRegisterSlot(tmp1, result);
+ }
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Label done;
+ __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+ __ mov(result, input);
+ ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
+ __ subu(result, zero_reg, input);
+ // Overflow if result is still negative, ie 0x80000000.
+ DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LUnaryMathOperation* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LUnaryMathOperation* instr_;
+ };
+
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ FPURegister input = ToDoubleRegister(instr->InputAt(0));
+ FPURegister result = ToDoubleRegister(instr->result());
+ __ abs_d(result, input);
+ } else if (r.IsInteger32()) {
+ EmitIntegerMathAbs(instr);
+ } else {
+ // Representation is tagged.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->InputAt(0));
+ // Smi check.
+ __ JumpIfNotSmi(input, deferred->entry());
+ // If smi, handle it directly.
+ EmitIntegerMathAbs(instr);
+ __ bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ FPURegister single_scratch = double_scratch0().low();
+ Register scratch1 = scratch0();
+ Register except_flag = ToRegister(instr->TempAt(0));
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ single_scratch,
+ input,
+ scratch1,
+ except_flag);
+
+ // Deopt if the operation did not succeed.
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ // Load the result.
+ __ mfc1(result, single_scratch);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ Label done;
+ __ Branch(&done, ne, result, Operand(zero_reg));
+ __ mfc1(scratch1, input.high());
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Label done, check_sign_on_zero;
+
+ // Extract exponent bits.
+ __ mfc1(result, input.high());
+ __ Ext(scratch,
+ result,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+ Label skip1;
+ __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
+ __ mov(result, zero_reg);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Branch(&check_sign_on_zero);
+ } else {
+ __ Branch(&done);
+ }
+ __ bind(&skip1);
+
+ // The following conversion will not work with numbers
+ // outside of ]-2^32, 2^32[.
+ DeoptimizeIf(ge, instr->environment(), scratch,
+ Operand(HeapNumber::kExponentBias + 32));
+
+ // Save the original sign for later comparison.
+ __ And(scratch, result, Operand(HeapNumber::kSignMask));
+
+ __ Move(double_scratch0(), 0.5);
+ __ add_d(input, input, double_scratch0());
+
+ // Check sign of the result: if the sign changed, the input
+ // value was in ]0.5, 0[ and the result should be -0.
+ __ mfc1(result, input.high());
+ __ Xor(result, result, Operand(scratch));
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // ARM uses 'mi' here, which is 'lt'
+ DeoptimizeIf(lt, instr->environment(), result,
+ Operand(zero_reg));
+ } else {
+ Label skip2;
+ // ARM uses 'mi' here, which is 'lt'
+ // Negating it results in 'ge'
+ __ Branch(&skip2, ge, result, Operand(zero_reg));
+ __ mov(result, zero_reg);
+ __ Branch(&done);
+ __ bind(&skip2);
+ }
+
+ Register except_flag = scratch;
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ double_scratch0().low(),
+ input,
+ result,
+ except_flag);
+
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ __ mfc1(result, double_scratch0().low());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ __ Branch(&done, ne, result, Operand(zero_reg));
+ __ bind(&check_sign_on_zero);
+ __ mfc1(scratch, input.high());
+ __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ sqrt_d(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_scratch = double_scratch0();
+
+ // Add +0 to convert -0 to +0.
+ __ mtc1(zero_reg, double_scratch.low());
+ __ mtc1(zero_reg, double_scratch.high());
+ __ add_d(result, input, double_scratch);
+ __ sqrt_d(result, result);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ Register scratch = scratch0();
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ if (exponent_type.IsDouble()) {
+ // Prepare arguments and call C function.
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(ToDoubleRegister(left),
+ ToDoubleRegister(right));
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
+ } else if (exponent_type.IsInteger32()) {
+ ASSERT(ToRegister(right).is(a0));
+ // Prepare arguments and call C function.
+ __ PrepareCallCFunction(1, 1, scratch);
+ __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(isolate()), 1, 1);
+ } else {
+ ASSERT(exponent_type.IsTagged());
+ ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+
+ Register right_reg = ToRegister(right);
+
+ // Check for smi on the right hand side.
+ Label non_smi, call;
+ __ JumpIfNotSmi(right_reg, &non_smi);
+
+ // Untag smi and convert it to a double.
+ __ SmiUntag(right_reg);
+ FPURegister single_scratch = double_scratch0();
+ __ mtc1(right_reg, single_scratch);
+ __ cvt_d_w(result_reg, single_scratch);
+ __ Branch(&call);
+
+ // Heap number map check.
+ __ bind(&non_smi);
+ __ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ __ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset));
+
+ // Prepare arguments and call C function.
+ __ bind(&call);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
+ }
+ // Store the result in the result register.
+ __ GetCFunctionDoubleResult(result_reg);
+}
+
+
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathAbs:
+ DoMathAbs(instr);
+ break;
+ case kMathFloor:
+ DoMathFloor(instr);
+ break;
+ case kMathRound:
+ DoMathRound(instr);
+ break;
+ case kMathSqrt:
+ DoMathSqrt(instr);
+ break;
+ case kMathPowHalf:
+ DoMathPowHalf(instr);
+ break;
+ case kMathCos:
+ DoMathCos(instr);
+ break;
+ case kMathSin:
+ DoMathSin(instr);
+ break;
+ case kMathLog:
+ DoMathLog(instr);
+ break;
+ default:
+ Abort("Unimplemented type of LUnaryMathOperation.");
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(a1));
+ ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ int arity = instr->arity();
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ int arity = instr->arity();
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
+ __ li(a2, Operand(instr->name()));
+ CallCode(ic, mode, instr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ Drop(1);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ int arity = instr->arity();
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
+ __ li(a2, Operand(instr->name()));
+ CallCode(ic, mode, instr);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ __ li(a1, Operand(instr->target()));
+ CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+ __ li(a0, Operand(instr->arity()));
+ CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Register object = ToRegister(instr->object());
+ Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
+ int offset = instr->offset();
+
+ ASSERT(!object.is(value));
+
+ if (!instr->transition().is_null()) {
+ __ li(scratch, Operand(instr->transition()));
+ __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ }
+
+ // Do the store.
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ if (instr->is_in_object()) {
+ __ sw(value, FieldMemOperand(object, offset));
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ // Update the write barrier for the object for in-object properties.
+ __ RecordWriteField(object,
+ offset,
+ value,
+ scratch,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+ } else {
+ __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ sw(value, FieldMemOperand(scratch, offset));
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ // Update the write barrier for the properties array.
+ // object is used as a scratch register.
+ __ RecordWriteField(scratch,
+ offset,
+ value,
+ object,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->object()).is(a1));
+ ASSERT(ToRegister(instr->value()).is(a0));
+
+ // Name is always in a2.
+ __ li(a2, Operand(instr->name()));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ DeoptimizeIf(hs,
+ instr->environment(),
+ ToRegister(instr->index()),
+ Operand(ToRegister(instr->length())));
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->object());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ Register scratch = scratch0();
+
+ // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ // conversion, so it deopts in that case.
+ if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+ __ And(at, value, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ int offset =
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+ __ sw(value, FieldMemOperand(elements, offset));
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ Label not_nan;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ if (key_is_constant) {
+ __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(scratch, elements, Operand(scratch));
+ __ Addu(scratch, scratch,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ Label is_nan;
+ // Check for NaN. All NaNs must be canonicalized.
+ __ BranchF(NULL, &is_nan, eq, value, value);
+ __ Branch(&not_nan);
+
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ bind(&is_nan);
+ __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+ __ bind(&not_nan);
+ __ sdc1(value, MemOperand(scratch));
+}
+
+
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
+
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int shift_size = ElementsKindToShiftSize(elements_kind);
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ FPURegister value(ToDoubleRegister(instr->value()));
+ if (key_is_constant) {
+ __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+ } else {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), scratch0(), external_pointer);
+ }
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ cvt_s_d(double_scratch0(), value);
+ __ swc1(double_scratch0(), MemOperand(scratch0()));
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ sdc1(value, MemOperand(scratch0()));
+ }
+ } else {
+ Register value(ToRegister(instr->value()));
+ MemOperand mem_operand(zero_reg);
+ Register scratch = scratch0();
+ if (key_is_constant) {
+ mem_operand = MemOperand(external_pointer,
+ constant_key * (1 << shift_size));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(scratch, scratch, external_pointer);
+ mem_operand = MemOperand(scratch);
+ }
+ switch (elements_kind) {
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ sb(value, mem_operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ sh(value, mem_operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sw(value, mem_operand);
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->object()).is(a2));
+ ASSERT(ToRegister(instr->key()).is(a1));
+ ASSERT(ToRegister(instr->value()).is(a0));
+
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register new_map_reg = ToRegister(instr->new_map_reg());
+ Register scratch = scratch0();
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = from_map->elements_kind();
+ ElementsKind to_kind = to_map->elements_kind();
+
+ __ mov(ToRegister(instr->result()), object_reg);
+
+ Label not_applicable;
+ __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ Branch(&not_applicable, ne, scratch, Operand(from_map));
+
+ __ li(new_map_reg, Operand(to_map));
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+ scratch, kRAHasBeenSaved, kDontSaveFPRegs);
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(a2));
+ ASSERT(new_map_reg.is(a3));
+ __ mov(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+ RelocInfo::CODE_TARGET, instr);
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(a2));
+ ASSERT(new_map_reg.is(a3));
+ __ mov(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+ RelocInfo::CODE_TARGET, instr);
+ } else {
+ UNREACHABLE();
+ }
+ __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ __ push(ToRegister(instr->left()));
+ __ push(ToRegister(instr->right()));
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ Register temp = scratch1();
+ Register string = ToRegister(instr->string());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(this, instr);
+
+ // Fetch the instance type of the receiver into result register.
+ __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ And(temp, result, kIsIndirectStringMask);
+ __ Branch(&check_sequential, eq, temp, Operand(zero_reg));
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ And(temp, result, kSlicedNotConsMask);
+ __ Branch(&cons_string, eq, temp, Operand(zero_reg));
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ sra(temp, result, kSmiTagSize);
+ __ addu(index, index, temp);
+ __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded);
+
+ // Handle conses.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ LoadRoot(temp, Heap::kEmptyStringRootIndex);
+ __ Branch(deferred->entry(), ne, result, Operand(temp));
+ // Get the first of the two strings and load its instance type.
+ __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Check whether the string is sequential. The only non-sequential
+ // shapes we support have just been unwrapped above.
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(temp, result, Operand(kStringRepresentationMask));
+ __ Branch(deferred->entry(), ne, temp, Operand(zero_reg));
+
+ // Dispatch on the encoding: ASCII or two-byte.
+ Label ascii_string;
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ And(temp, result, Operand(kStringEncodingMask));
+ __ Branch(&ascii_string, ne, temp, Operand(zero_reg));
+
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
+ Label done;
+ __ Addu(result,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ sll(temp, index, 1);
+ __ Addu(result, result, temp);
+ __ lhu(result, MemOperand(result, 0));
+ __ Branch(&done);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ __ Addu(result,
+ string,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(result, result, index);
+ __ lbu(result, MemOperand(result, 0));
+
+ __ bind(&done);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
+ __ push(scratch);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(v0);
+ }
+ __ SmiUntag(v0);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ ASSERT(!char_code.is(result));
+
+ __ Branch(deferred->entry(), hi,
+ char_code, Operand(String::kMaxAsciiCharCode));
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ sll(scratch, char_code, kPointerSizeLog2);
+ __ Addu(result, result, scratch);
+ __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Branch(deferred->entry(), eq, result, Operand(scratch));
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+ Register string = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ __ lw(result, FieldMemOperand(string, String::kLengthOffset));
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ ASSERT(output->IsDoubleRegister());
+ FPURegister single_scratch = double_scratch0().low();
+ if (input->IsStackSlot()) {
+ Register scratch = scratch0();
+ __ lw(scratch, ToMemOperand(input));
+ __ mtc1(scratch, single_scratch);
+ } else {
+ __ mtc1(ToRegister(input), single_scratch);
+ }
+ __ cvt_d_w(ToDoubleRegister(output), single_scratch);
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+ class DeferredNumberTagI: public LDeferredCode {
+ public:
+ DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagI* instr_;
+ };
+
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+ Register overflow = scratch0();
+
+ DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+ __ SmiTagCheckOverflow(reg, overflow);
+ __ BranchOnOverflow(deferred->entry(), overflow);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+ Label slow;
+ Register reg = ToRegister(instr->InputAt(0));
+ FPURegister dbl_scratch = double_scratch0();
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ Label done;
+ __ SmiUntag(reg);
+ __ Xor(reg, reg, Operand(0x80000000));
+ __ mtc1(reg, dbl_scratch);
+ __ cvt_d_w(dbl_scratch, dbl_scratch);
+ if (FLAG_inline_new) {
+ __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
+ if (!reg.is(t1)) __ mov(reg, t1);
+ __ Branch(&done);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ // TODO(3095996): Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains an
+ // integer value.
+ __ StoreToSafepointRegisterSlot(zero_reg, reg);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ if (!reg.is(v0)) __ mov(reg, v0);
+
+ // Done. Put the value in dbl_scratch into the value of the allocated heap
+ // number.
+ __ bind(&done);
+ __ sdc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ StoreToSafepointRegisterSlot(reg, reg);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD: public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+ Register reg = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
+
+ DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ } else {
+ __ Branch(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ mov(reg, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ __ StoreToSafepointRegisterSlot(v0, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+ __ SmiTag(ToRegister(input));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register scratch = scratch0();
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ if (instr->needs_check()) {
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ // If the input is a HeapObject, value of scratch won't be zero.
+ __ And(scratch, ToRegister(input), Operand(kHeapObjectTag));
+ __ SmiUntag(ToRegister(input));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ SmiUntag(ToRegister(input));
+ }
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+ DoubleRegister result_reg,
+ bool deoptimize_on_undefined,
+ LEnvironment* env) {
+ Register scratch = scratch0();
+
+ Label load_smi, heap_number, done;
+
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi);
+
+ // Heap number map check.
+ __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(ne, env, scratch, Operand(at));
+ } else {
+ Label heap_number;
+ __ Branch(&heap_number, eq, scratch, Operand(at));
+
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, env, input_reg, Operand(at));
+
+ // Convert undefined to NaN.
+ __ LoadRoot(at, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ Branch(&done);
+
+ __ bind(&heap_number);
+ }
+ // Heap number to double register conversion.
+ __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ Branch(&done);
+
+ // Smi to double register conversion
+ __ bind(&load_smi);
+ __ SmiUntag(input_reg); // Untag smi before converting to float.
+ __ mtc1(input_reg, result_reg);
+ __ cvt_d_w(result_reg, result_reg);
+ __ SmiTag(input_reg); // Retag smi.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+ Register input_reg = ToRegister(instr->InputAt(0));
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+ DoubleRegister double_scratch = double_scratch0();
+ FPURegister single_scratch = double_scratch.low();
+
+ ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+ ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+ Label done;
+
+ // The input is a tagged HeapObject.
+ // Heap number map check.
+ __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ // This 'at' value and scratch1 map value are used for tests in both clauses
+ // of the if.
+
+ if (instr->truncating()) {
+ Register scratch3 = ToRegister(instr->TempAt(1));
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
+ ASSERT(!scratch3.is(input_reg) &&
+ !scratch3.is(scratch1) &&
+ !scratch3.is(scratch2));
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations.
+ Label heap_number;
+ __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
+ // Check for undefined. Undefined is converted to zero for truncating
+ // conversions.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
+ ASSERT(ToRegister(instr->result()).is(input_reg));
+ __ mov(input_reg, zero_reg);
+ __ Branch(&done);
+
+ __ bind(&heap_number);
+ __ ldc1(double_scratch2,
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ EmitECMATruncate(input_reg,
+ double_scratch2,
+ single_scratch,
+ scratch1,
+ scratch2,
+ scratch3);
+ } else {
+ // Deoptimize if we don't have a heap number.
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+
+ // Load the double value.
+ __ ldc1(double_scratch,
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ single_scratch,
+ double_scratch,
+ scratch1,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Deopt if the operation did not succeed.
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ // Load the result.
+ __ mfc1(input_reg, single_scratch);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Branch(&done, ne, input_reg, Operand(zero_reg));
+
+ __ mfc1(scratch1, double_scratch.high());
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ }
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ ASSERT(input->Equals(instr->result()));
+
+ Register input_reg = ToRegister(input);
+
+ DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+
+ // Let the deferred code handle the HeapObject case.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+
+ // Smi to int32 conversion.
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ DoubleRegister result_reg = ToDoubleRegister(result);
+
+ EmitNumberUntagD(input_reg, result_reg,
+ instr->hydrogen()->deoptimize_on_undefined(),
+ instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+ DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister double_scratch = double_scratch0();
+ FPURegister single_scratch = double_scratch0().low();
+
+ if (instr->truncating()) {
+ Register scratch3 = ToRegister(instr->TempAt(1));
+ __ EmitECMATruncate(result_reg,
+ double_input,
+ single_scratch,
+ scratch1,
+ scratch2,
+ scratch3);
+ } else {
+ Register except_flag = scratch2;
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ single_scratch,
+ double_input,
+ scratch1,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Deopt if the operation did not succeed (except_flag != 0).
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ // Load the result.
+ __ mfc1(result_reg, single_scratch);
+ }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ LOperand* input = instr->InputAt(0);
+ __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ LOperand* input = instr->InputAt(0);
+ __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+
+ __ GetObjectType(input, scratch, scratch);
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+ } else {
+ DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+ }
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT(tag == 0 || IsPowerOf2(tag));
+ __ And(at, scratch, mask);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
+ at, Operand(zero_reg));
+ } else {
+ __ And(scratch, scratch, Operand(mask));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+ }
+ }
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+ ASSERT(instr->InputAt(0)->IsRegister());
+ Register reg = ToRegister(instr->InputAt(0));
+ DeoptimizeIf(ne, instr->environment(), reg,
+ Operand(instr->hydrogen()->target()));
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+ Register scratch = scratch0();
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ Register reg = ToRegister(input);
+ __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ DeoptimizeIf(ne,
+ instr->environment(),
+ scratch,
+ Operand(instr->hydrogen()->map()));
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register unclamped_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register scratch = scratch0();
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ Label is_smi, done, heap_number;
+
+ // Both smi and heap number cases are handled.
+ __ JumpIfSmi(input_reg, &is_smi);
+
+ // Check for heap number
+ __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ DeoptimizeIf(ne, instr->environment(), input_reg,
+ Operand(factory()->undefined_value()));
+ __ mov(result_reg, zero_reg);
+ __ jmp(&done);
+
+ // Heap number
+ __ bind(&heap_number);
+ __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
+ HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+ __ jmp(&done);
+
+ // smi
+ __ bind(&is_smi);
+ __ SmiUntag(scratch, input_reg);
+ __ ClampUint8(result_reg, scratch);
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ factory()->NewJSGlobalPropertyCell(object);
+ __ li(result, Operand(cell));
+ __ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+ } else {
+ __ li(result, Operand(object));
+ }
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
+
+ Handle<JSObject> holder = instr->holder();
+ Handle<JSObject> current_prototype = instr->prototype();
+
+ // Load prototype object.
+ LoadHeapObject(temp1, current_prototype);
+
+ // Check prototype maps up to the holder.
+ while (!current_prototype.is_identical_to(holder)) {
+ __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
+ DeoptimizeIf(ne,
+ instr->environment(),
+ temp2,
+ Operand(Handle<Map>(current_prototype->map())));
+ current_prototype =
+ Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+ // Load next prototype object.
+ LoadHeapObject(temp1, current_prototype);
+ }
+
+ // Check the holder map.
+ __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
+ DeoptimizeIf(ne,
+ instr->environment(),
+ temp2,
+ Operand(Handle<Map>(current_prototype->map())));
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+ Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+ __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ li(a1, Operand(constant_elements));
+ __ Push(a3, a2, a1);
+
+ // Pick the right runtime function or stub to call.
+ int length = instr->hydrogen()->length();
+ if (instr->hydrogen()->IsCopyOnWrite()) {
+ ASSERT(instr->hydrogen()->depth() == 1);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else if (instr->hydrogen()->depth() > 1) {
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+ } else {
+ FastCloneShallowArrayStub::Mode mode =
+ constant_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ __ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
+ __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ li(a2, Operand(instr->hydrogen()->constant_properties()));
+ __ li(a1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+ __ Push(t0, a3, a2, a1);
+
+ // Pick the right runtime function to call.
+ if (instr->hydrogen()->depth() > 1) {
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+ } else {
+ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ }
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+ ASSERT(ToRegister(instr->result()).is(v0));
+ __ push(a0);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ Label materialized;
+ // Registers will be used as follows:
+ // a3 = JS function.
+ // t3 = literals array.
+ // a1 = regexp literal.
+ // a0 = regexp literal clone.
+ // a2 and t0-t2 are used as temporaries.
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+ int literal_offset = FixedArray::kHeaderSize +
+ instr->hydrogen()->literal_index() * kPointerSize;
+ __ lw(a1, FieldMemOperand(t3, literal_offset));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&materialized, ne, a1, Operand(at));
+
+ // Create regexp literal using runtime function
+ // Result will be in v0.
+ __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ li(t1, Operand(instr->hydrogen()->pattern()));
+ __ li(t0, Operand(instr->hydrogen()->flags()));
+ __ Push(t3, t2, t1, t0);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ __ mov(a1, v0);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+
+ __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ li(a0, Operand(Smi::FromInt(size)));
+ __ Push(a1, a0);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ __ pop(a1);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ // (Unroll copy loop once for better throughput).
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+ __ lw(a3, FieldMemOperand(a1, i));
+ __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
+ __ sw(a3, FieldMemOperand(v0, i));
+ __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
+ }
+ if ((size % (2 * kPointerSize)) != 0) {
+ __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
+ __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
+ }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && shared_info->num_literals() == 0) {
+ FastNewClosureStub stub(shared_info->strict_mode_flag());
+ __ li(a1, Operand(shared_info));
+ __ push(a1);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ li(a2, Operand(shared_info));
+ __ li(a1, Operand(pretenure
+ ? factory()->true_value()
+ : factory()->false_value()));
+ __ Push(cp, a2, a1);
+ CallRuntime(Runtime::kNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ Register input = ToRegister(instr->InputAt(0));
+ __ push(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Register cmp1 = no_reg;
+ Operand cmp2 = Operand(no_reg);
+
+ Condition final_branch_condition = EmitTypeofIs(true_label,
+ false_label,
+ input,
+ instr->type_literal(),
+ cmp1,
+ cmp2);
+
+ ASSERT(cmp1.is_valid());
+ ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
+
+ if (final_branch_condition != kNoCondition) {
+ EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
+ }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name,
+ Register& cmp1,
+ Operand& cmp2) {
+ // This function utilizes the delay slot heavily. This is used to load
+ // values that are always usable without depending on the type of the input
+ // register.
+ Condition final_branch_condition = kNoCondition;
+ Register scratch = scratch0();
+ if (type_name->Equals(heap()->number_symbol())) {
+ __ JumpIfSmi(input, true_label);
+ __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ cmp1 = input;
+ cmp2 = Operand(at);
+ final_branch_condition = eq;
+
+ } else if (type_name->Equals(heap()->string_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ GetObjectType(input, input, scratch);
+ __ Branch(USE_DELAY_SLOT, false_label,
+ ge, scratch, Operand(FIRST_NONSTRING_TYPE));
+ // input is an object so we can load the BitFieldOffset even if we take the
+ // other branch.
+ __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ And(at, at, 1 << Map::kIsUndetectable);
+ cmp1 = at;
+ cmp2 = Operand(zero_reg);
+ final_branch_condition = eq;
+
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ cmp1 = at;
+ cmp2 = Operand(input);
+ final_branch_condition = eq;
+
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ cmp1 = at;
+ cmp2 = Operand(input);
+ final_branch_condition = eq;
+
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ // The first instruction of JumpIfSmi is an And - it is safe in the delay
+ // slot.
+ __ JumpIfSmi(input, false_label);
+ // Check for undetectable objects => true.
+ __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ And(at, at, 1 << Map::kIsUndetectable);
+ cmp1 = at;
+ cmp2 = Operand(zero_reg);
+ final_branch_condition = ne;
+
+ } else if (type_name->Equals(heap()->function_symbol())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ JumpIfSmi(input, false_label);
+ __ GetObjectType(input, scratch, input);
+ __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
+ cmp1 = input;
+ cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+ final_branch_condition = eq;
+
+ } else if (type_name->Equals(heap()->object_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ if (!FLAG_harmony_typeof) {
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ }
+ // input is an object, it is safe to use GetObjectType in the delay slot.
+ __ GetObjectType(input, input, scratch);
+ __ Branch(USE_DELAY_SLOT, false_label,
+ lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ // Still an object, so the InstanceType can be loaded.
+ __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
+ __ Branch(USE_DELAY_SLOT, false_label,
+ gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ // Still an object, so the BitField can be loaded.
+ // Check for undetectable objects => false.
+ __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ And(at, at, 1 << Map::kIsUndetectable);
+ cmp1 = at;
+ cmp2 = Operand(zero_reg);
+ final_branch_condition = eq;
+
+ } else {
+ cmp1 = at;
+ cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
+ __ Branch(false_label);
+ }
+
+ return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp1 = ToRegister(instr->TempAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ EmitIsConstructCall(temp1, scratch0());
+
+ EmitBranch(true_block, false_block, eq, temp1,
+ Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
+ ASSERT(!temp1.is(temp2));
+ // Get the frame pointer for the calling frame.
+ __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+ __ Branch(&check_frame_marker, ne, temp2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ // No code for lazy bailout instruction. Used to capture environment after a
+ // call for populating the safepoint data with deoptimization data.
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+ Register object = ToRegister(instr->object());
+ Register key = ToRegister(instr->key());
+ Register strict = scratch0();
+ __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(object, key, strict);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index());
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+ Register obj = ToRegister(instr->object());
+ Register key = ToRegister(instr->key());
+ __ Push(key, obj);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RegisterLazyDeoptimization(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ }
+
+ // The gap code includes the restoring of the safepoint registers.
+ int pc = masm()->pc_offset();
+ safepoints_.SetPcAfterGap(pc);
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStackCheck* instr_;
+ };
+
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&done, hs, sp, Operand(at));
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+ environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+ instr->SpilledDoubleRegisterArray());
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment);
+ ASSERT(osr_pc_offset_ == -1);
+ osr_pc_offset_ = masm()->pc_offset();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/lithium-codegen-mips.h b/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
index 2aec684..f9c013c 100644
--- a/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
+++ b/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
@@ -29,35 +29,398 @@
#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#include "mips/lithium-mips.h"
-
+#include "mips/lithium-gap-resolver-mips.h"
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
+class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : chunk_(chunk),
+ masm_(assembler),
+ info_(info),
+ current_block_(-1),
+ current_instruction_(-1),
+ instructions_(chunk->instructions()),
+ deoptimizations_(4),
+ deopt_jump_table_(4),
+ deoptimization_literals_(8),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ status_(UNUSED),
+ deferred_(8),
+ osr_pc_offset_(-1),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+
+ // LOperand is loaded into scratch, unless already a register.
+ Register EmitLoadRegister(LOperand* op, Register scratch);
+
+ // LOperand must be a double register.
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // LOperand is loaded into dbl_scratch, unless already a double register.
+ DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+ FloatRegister flt_scratch,
+ DoubleRegister dbl_scratch);
+ int ToInteger32(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+ MemOperand ToHighMemOperand(LOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
- bool GenerateCode() {
- UNIMPLEMENTED();
- return false;
- }
+ bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
- void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+ void FinishCode(Handle<Code> code);
+
+ // Deferred code support.
+ template<int T>
+ void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
+ Token::Value op);
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+ void DoDeferredNumberTagI(LNumberTagI* instr);
+ void DoDeferredTaggedToI(LTaggedToI* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
+
+ // Parallel move support.
+ void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ enum Status {
+ UNUSED,
+ GENERATING,
+ DONE,
+ ABORTED
+ };
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_generating() const { return status_ == GENERATING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ StrictModeFlag strict_mode_flag() const {
+ return info()->strict_mode_flag();
+ }
+
+ LChunk* chunk() const { return chunk_; }
+ Scope* scope() const { return scope_; }
+ HGraph* graph() const { return chunk_->graph(); }
+
+ Register scratch0() { return lithiumScratchReg; }
+ Register scratch1() { return lithiumScratchReg2; }
+ DoubleRegister double_scratch0() { return lithiumScratchDouble; }
+
+ int GetNextEmittedBlock(int block);
+ LInstruction* GetNextInstruction();
+
+ void EmitClassOfTest(Label* if_true,
+ Label* if_false,
+ Handle<String> class_name,
+ Register input,
+ Register temporary,
+ Register temporary2);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ int GetParameterCount() const { return scope()->num_parameters(); }
+
+ void Abort(const char* format, ...);
+ void Comment(const char* format, ...);
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+ // Code generation passes. Returns true if code generation should
+ // continue.
+ bool GeneratePrologue();
+ bool GenerateBody();
+ bool GenerateDeferredCode();
+ bool GenerateDeoptJumpTable();
+ bool GenerateSafepointTable();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr);
+
+ // Generate a direct call to a known function. Expects the function
+ // to be in a1.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int arity,
+ LInstruction* instr,
+ CallKind call_kind);
+
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+
+ void RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+ void DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Register src1,
+ const Operand& src2);
+
+ void AddToTranslation(Translation* translation,
+ LOperand* op,
+ bool is_tagged);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ Register ToRegister(int index) const;
+ DoubleRegister ToDoubleRegister(int index) const;
+
+ // Specific math operations - used from DoUnaryMathOperation.
+ void EmitIntegerMathAbs(LUnaryMathOperation* instr);
+ void DoMathAbs(LUnaryMathOperation* instr);
+ void DoMathFloor(LUnaryMathOperation* instr);
+ void DoMathRound(LUnaryMathOperation* instr);
+ void DoMathSqrt(LUnaryMathOperation* instr);
+ void DoMathPowHalf(LUnaryMathOperation* instr);
+ void DoMathLog(LUnaryMathOperation* instr);
+ void DoMathCos(LUnaryMathOperation* instr);
+ void DoMathSin(LUnaryMathOperation* instr);
+
+ // Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index);
+ void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+ void RecordSafepoint(int deoptimization_index);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ int deoptimization_index);
+ void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+ int arguments,
+ int deoptimization_index);
+ void RecordPosition(int position);
+ int LastSafepointEnd() {
+ return static_cast<int>(safepoints_.GetPcAfterGap());
+ }
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
+ void EmitBranch(int left_block,
+ int right_block,
+ Condition cc,
+ Register src1,
+ const Operand& src2);
+ void EmitBranchF(int left_block,
+ int right_block,
+ Condition cc,
+ FPURegister src1,
+ FPURegister src2);
+ void EmitCmpI(LOperand* left, LOperand* right);
+ void EmitNumberUntagD(Register input,
+ DoubleRegister result,
+ bool deoptimize_on_undefined,
+ LEnvironment* env);
+
+ // Emits optimized code for typeof x == "y". Modifies input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ // Returns two registers in cmp1 and cmp2 that can be used in the
+ // Branch instruction after EmitTypeofIs.
+ Condition EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name,
+ Register& cmp1,
+ Operand& cmp2);
+
+ // Emits optimized code for %_IsObject(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsObject(Register input,
+ Register temp1,
+ Label* is_not_object,
+ Label* is_object);
+
+ // Emits optimized code for %_IsConstructCall().
+ // Caller should branch on equal condition.
+ void EmitIsConstructCall(Register temp1, Register temp2);
+
+ void EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name);
+
+ struct JumpTableEntry {
+ explicit inline JumpTableEntry(Address entry)
+ : label(),
+ address(entry) { }
+ Label label;
+ Address address;
+ };
+
+ LChunk* const chunk_;
+ MacroAssembler* const masm_;
+ CompilationInfo* const info_;
+
+ int current_block_;
+ int current_instruction_;
+ const ZoneList<LInstruction*>* instructions_;
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<JumpTableEntry> deopt_jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ Status status_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ int osr_pc_offset_;
+
+ // Builder that keeps track of safepoints in the code. The table
+ // itself is emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ PushSafepointRegistersScope(LCodeGen* codegen,
+ Safepoint::Kind kind)
+ : codegen_(codegen) {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = kind;
+
+ switch (codegen_->expected_safepoint_kind_) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PushSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PushSafepointRegistersAndDoubles();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ~PushSafepointRegistersScope() {
+ Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+ ASSERT((kind & Safepoint::kWithRegisters) != 0);
+ switch (kind) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PopSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PopSafepointRegistersAndDoubles();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class LEnvironment;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() { }
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label *exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
};
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc b/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc
new file mode 100644
index 0000000..8f7f89c
--- /dev/null
+++ b/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -0,0 +1,309 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "mips/lithium-gap-resolver-mips.h"
+#include "mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = lithiumScratchReg;
+static const DoubleRegister kSavedDoubleValueRegister = lithiumScratchDouble;
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner),
+ moves_(32),
+ root_index_(0),
+ in_cycle_(false),
+ saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when by reaching this move again.
+ PerformMove(i);
+ if (in_cycle_) {
+ RestoreValue();
+ }
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+
+ // We can only find a cycle, when doing a depth-first traversal of moves,
+ // be encountering the starting move again. So by spilling the source of
+ // the starting move, we break the cycle. All moves are then unblocked,
+ // and the starting move is completed by writing the spilled value to
+ // its destination. All other moves from the spilled source have been
+ // completed prior to breaking the cycle.
+ // An additional complication is that moves to MemOperands with large
+ // offsets (more than 1K or 4K) require us to spill this spilled value to
+ // the stack, to free up the register.
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+ // We save in a register the value that should end up in the source of
+ // moves_[root_index]. After performing all moves in the tree rooted
+ // in that move, we save the value to that source.
+ ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+ if (source->IsRegister()) {
+ __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ __ mov_d(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ ldc1(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+ // This move will be done by restoring the saved value to the destination.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+ if (saved_destination_->IsRegister()) {
+ __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
+ kSavedDoubleValueRegister);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ sdc1(kSavedDoubleValueRegister,
+ cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ sw(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ lw(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ if (!destination_operand.OffsetIsInt16Encodable()) {
+ // 'at' is overwritten while saving the value to the destination.
+ // Therefore we can't use 'at'. It is OK if the read from the source
+ // destroys 'at', since that happens before the value is read.
+ // This uses only a single reg of the double reg-pair.
+ __ lwc1(kSavedDoubleValueRegister, source_operand);
+ __ swc1(kSavedDoubleValueRegister, destination_operand);
+ } else {
+ __ lw(at, source_operand);
+ __ sw(at, destination_operand);
+ }
+ } else {
+ __ lw(kSavedValueRegister, source_operand);
+ __ sw(kSavedValueRegister, destination_operand);
+ }
+ }
+
+ } else if (source->IsConstantOperand()) {
+ Operand source_operand = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ __ li(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ li(kSavedValueRegister, source_operand);
+ __ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ sdc1(source_register, destination_operand);
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ // kSavedDoubleValueRegister was used to break the cycle,
+ // but kSavedValueRegister is free.
+ MemOperand source_high_operand =
+ cgen_->ToHighMemOperand(source);
+ MemOperand destination_high_operand =
+ cgen_->ToHighMemOperand(destination);
+ __ lw(kSavedValueRegister, source_operand);
+ __ sw(kSavedValueRegister, destination_operand);
+ __ lw(kSavedValueRegister, source_high_operand);
+ __ sw(kSavedValueRegister, destination_high_operand);
+ } else {
+ __ ldc1(kSavedDoubleValueRegister, source_operand);
+ __ sdc1(kSavedDoubleValueRegister, destination_operand);
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.h b/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.h
new file mode 100644
index 0000000..2506e38
--- /dev/null
+++ b/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.h
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/lithium-mips.cc b/src/3rdparty/v8/src/mips/lithium-mips.cc
new file mode 100644
index 0000000..a9a302c
--- /dev/null
+++ b/src/3rdparty/v8/src/mips/lithium-mips.cc
@@ -0,0 +1,2203 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-allocator-inl.h"
+#include "mips/lithium-mips.h"
+#include "mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ register_spills_[i] = NULL;
+ }
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ double_register_spills_[i] = NULL;
+ }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+ LOperand* spill_operand) {
+ ASSERT(spill_operand->IsStackSlot());
+ ASSERT(register_spills_[allocation_index] == NULL);
+ register_spills_[allocation_index] = spill_operand;
+}
+
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+ LOperand* spill_operand) {
+ ASSERT(spill_operand->IsDoubleStackSlot());
+ ASSERT(double_register_spills_[allocation_index] == NULL);
+ double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+ for (int i = 0; i < results_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ results_[i]->PrintTo(stream);
+ }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::SHL: return "sll-t";
+ case Token::SAR: return "sra-t";
+ case Token::SHR: return "srl-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ InputAt(0)->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ InputAt(1)->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(kind() == kStrictEquality ? " === " : " == ");
+ stream->Add(nil() == kNullValue ? "null" : "undefined");
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ *hydrogen()->type_literal()->ToCString(),
+ true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
+ stream->Add("/%s ", hydrogen()->OpName());
+ InputAt(0)->PrintTo(stream);
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ InputAt(1)->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) {
+ stream->Add("[a2] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) {
+ SmartArrayPointer<char> name_string = name()->ToCString();
+ stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) {
+ SmartArrayPointer<char> name_string = name()->ToCString();
+ stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+LChunk::LChunk(CompilationInfo* info, HGraph* graph)
+ : spill_slot_count_(0),
+ info_(info),
+ graph_(graph),
+ instructions_(32),
+ pointer_maps_(8),
+ inlined_closures_(1) {
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+ // Skip a slot if for a double-width slot.
+ if (is_double) spill_slot_count_++;
+ return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+ int index = GetNextSpillIndex(is_double);
+ if (is_double) {
+ return LDoubleStackSlot::Create(index);
+ } else {
+ return LStackSlot::Create(index);
+ }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+ HPhase phase("Mark empty blocks", this);
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ int first = block->first_instruction_index();
+ int last = block->last_instruction_index();
+ LInstruction* first_instr = instructions()->at(first);
+ LInstruction* last_instr = instructions()->at(last);
+
+ LLabel* label = LLabel::cast(first_instr);
+ if (last_instr->IsGoto()) {
+ LGoto* goto_instr = LGoto::cast(last_instr);
+ if (label->IsRedundant() &&
+ !label->is_loop_header()) {
+ bool can_eliminate = true;
+ for (int i = first + 1; i < last && can_eliminate; ++i) {
+ LInstruction* cur = instructions()->at(i);
+ if (cur->IsGap()) {
+ LGap* gap = LGap::cast(cur);
+ if (!gap->IsRedundant()) {
+ can_eliminate = false;
+ }
+ } else {
+ can_eliminate = false;
+ }
+ }
+
+ if (can_eliminate) {
+ label->set_replacement(GetLabel(goto_instr->block_id()));
+ }
+ }
+ }
+ }
+}
+
+
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+ LInstructionGap* gap = new LInstructionGap(block);
+ int index = -1;
+ if (instr->IsControl()) {
+ instructions_.Add(gap);
+ index = instructions_.length();
+ instructions_.Add(instr);
+ } else {
+ index = instructions_.length();
+ instructions_.Add(instr);
+ instructions_.Add(gap);
+ }
+ if (instr->HasPointerMap()) {
+ pointer_maps_.Add(instr->pointer_map());
+ instr->pointer_map()->set_lithium_position(index);
+ }
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+ return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+ // The receiver is at index 0, the first parameter at index 1, so we
+ // shift all parameter indexes down by the number of parameters, and
+ // make sure they end up negative so they are distinguishable from
+ // spill slots.
+ int result = index - info()->scope()->num_parameters() - 1;
+ ASSERT(result < 0);
+ return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+ ASSERT(-1 <= index); // -1 is the receiver.
+ return (1 + info()->scope()->num_parameters() - index) *
+ kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+ return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+ return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+ while (!IsGapAt(index)) index--;
+ return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+ GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+ return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+ LConstantOperand* operand) const {
+ return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new LChunk(info(), graph());
+ HPhase phase("Building chunk", chunk_);
+ status_ = BUILDING;
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* next = NULL;
+ if (i < blocks->length() - 1) next = blocks->at(i + 1);
+ DoBasicBlock(blocks->at(i), next);
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+ if (FLAG_trace_bailout) {
+ SmartArrayPointer<char> name(
+ info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LChunk building in @\"%s\": ", *name);
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+ PrintF("\n");
+ }
+ status_ = ABORTED;
+}
+
+
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+ return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+ return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ allocator_->RecordUse(value, operand);
+ return operand;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result) {
+ allocator_->RecordDefinition(current_instruction_, result);
+ instr->set_result(result);
+ return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateInstruction<1, I, T>* instr, int index) {
+ return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(
+ LTemplateInstruction<1, I, T>* instr, Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+ LInstruction* instr, int ast_id) {
+ ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+ ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ instruction_pending_deoptimization_environment_ = instr;
+ pending_deoptimization_ast_id_ = ast_id;
+ return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+ instruction_pending_deoptimization_environment_ = NULL;
+ pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ if (hinstr->HasObservableSideEffects()) {
+ ASSERT(hinstr->next()->IsSimulate());
+ HSimulate* sim = HSimulate::cast(hinstr->next());
+ instr = SetInstructionPendingDeoptimizationEnvironment(
+ instr, sim->ast_id());
+ }
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+ instr->MarkAsSaveDoubles();
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new LPointerMap(position_));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ allocator_->RecordTemporary(operand);
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ allocator_->RecordTemporary(operand);
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ allocator_->RecordTemporary(operand);
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+ }
+
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseRegisterAtStart(right_value);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ bool may_deopt = (op == Token::SHR && constant_value == 0);
+ bool does_deopt = false;
+ if (may_deopt) {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
+ break;
+ }
+ }
+ }
+
+ LInstruction* result =
+ DefineAsRegister(new LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ ASSERT(op != Token::MOD);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(op == Token::ADD ||
+ op == Token::DIV ||
+ op == Token::MOD ||
+ op == Token::MUL ||
+ op == Token::SUB);
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
+ LOperand* left_operand = UseFixed(left, a1);
+ LOperand* right_operand = UseFixed(right, a0);
+ LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+ ASSERT(is_building());
+ current_block_ = block;
+ next_block_ = next_block;
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+ pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while (current != NULL && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ next_block_ = NULL;
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+ if (current->has_position()) position_ = current->position();
+ LInstruction* instr = current->CompileToLithium(this);
+
+ if (instr != NULL) {
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ instr->set_hydrogen_value(current);
+ chunk_->AddInstruction(instr, current_block_);
+ }
+ current_instruction_ = old_current;
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
+ if (hydrogen_env == NULL) return NULL;
+
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ int ast_id = hydrogen_env->ast_id();
+ ASSERT(ast_id != AstNode::kNoNumber);
+ int value_count = hydrogen_env->length();
+ LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+ ast_id,
+ hydrogen_env->parameter_count(),
+ argument_count_,
+ value_count,
+ outer);
+ for (int i = 0; i < value_count; ++i) {
+ if (hydrogen_env->is_special_index(i)) continue;
+
+ HValue* value = hydrogen_env->values()->at(i);
+ LOperand* op = NULL;
+ if (value->IsArgumentsObject()) {
+ op = NULL;
+ } else if (value->IsPushArgument()) {
+ op = new LArgument((*argument_index_accumulator)++);
+ } else {
+ op = UseAny(value);
+ }
+ result->AddValue(op, value->representation());
+ }
+
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new LGoto(instr->FirstSuccessor()->block_id());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ HValue* v = instr->value();
+ if (v->EmitAtUses()) {
+ HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ ? instr->FirstSuccessor()
+ : instr->SecondSuccessor();
+ return new LGoto(successor->block_id());
+ }
+ return AssignEnvironment(new LBranch(UseRegister(v)));
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+ return DefineAsRegister(new LArgumentsLength(UseRegister(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ return DefineAsRegister(new LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LInstanceOf* result =
+ new LInstanceOf(UseFixed(instr->left(), a0),
+ UseFixed(instr->right(), a1));
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result =
+ new LInstanceOfKnownGlobal(UseFixed(instr->left(), a0), FixedTemp(t0));
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
+ LOperand* receiver = UseFixed(instr->receiver(), a0);
+ LOperand* length = UseFixed(instr->length(), a2);
+ LOperand* elements = UseFixed(instr->elements(), a3);
+ LApplyArguments* result = new LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+ ++argument_count_;
+ LOperand* argument = Use(instr->argument());
+ return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LOuterContext(context));
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalObject(context));
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+ LOperand* global_object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalReceiver(global_object));
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+ HCallConstantFunction* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallConstantFunction, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
+ argument_count_ -= instr->argument_count();
+ LInvokeFunction* result = new LInvokeFunction(function);
+ return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ BuiltinFunctionId op = instr->op();
+ if (op == kMathLog || op == kMathSin || op == kMathCos) {
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
+ return MarkAsCall(DefineFixedDouble(result, f4), instr);
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+ switch (op) {
+ case kMathAbs:
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ case kMathFloor:
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ case kMathSqrt:
+ return DefineAsRegister(result);
+ case kMathRound:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kMathPowHalf:
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+ ASSERT(instr->key()->representation().IsTagged());
+ argument_count_ -= instr->argument_count();
+ LOperand* key = UseFixed(instr->key(), a2);
+ return MarkAsCall(DefineFixed(new LCallKeyed(key), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallNamed, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallGlobal, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallKnownGlobal, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* constructor = UseFixed(instr->constructor(), a1);
+ argument_count_ -= instr->argument_count();
+ LCallNew* result = new LCallNew(constructor);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallFunction, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallRuntime, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineAsRegister(new LBitI(left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->representation().IsInteger32());
+ return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else if (instr->representation().IsInteger32()) {
+ // TODO(1042) The fixed register allocation
+ // is needed because we call TypeRecordingBinaryOpStub from
+ // the generated code, which requires registers a0
+ // and a1 to be used. We should remove that
+ // when we provide a native implementation.
+ LOperand* dividend = UseFixed(instr->left(), a0);
+ LOperand* divisor = UseFixed(instr->right(), a1);
+ return AssignEnvironment(AssignPointerMap(
+ DefineFixed(new LDivI(dividend, divisor), v0)));
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LModI* mod;
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ mod = new LModI(value, UseOrConstant(instr->right()));
+ } else {
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ mod = new LModI(dividend,
+ divisor,
+ TempRegister(),
+ FixedTemp(f20),
+ FixedTemp(f22));
+ }
+
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanBeDivByZero)) {
+ return AssignEnvironment(DefineAsRegister(mod));
+ } else {
+ return DefineAsRegister(mod);
+ }
+ } else if (instr->representation().IsTagged()) {
+ return DoArithmeticT(Token::MOD, instr);
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double modulo. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ LOperand* left = UseFixedDouble(instr->left(), f2);
+ LOperand* right = UseFixedDouble(instr->right(), f4);
+ LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+ return MarkAsCall(DefineFixedDouble(result, f2), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left;
+ LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LOperand* temp = NULL;
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ (instr->CheckFlag(HValue::kCanOverflow) ||
+ !right->IsConstantOperand())) {
+ left = UseRegister(instr->LeastConstantOperand());
+ temp = TempRegister();
+ } else {
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
+ }
+ return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new LSubI(left, right);
+ LInstruction* result = DefineAsRegister(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LAddI* add = new LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), f2);
+ LOperand* right = exponent_type.IsDouble() ?
+ UseFixedDouble(instr->right(), f4) :
+ UseFixed(instr->right(), a0);
+ LPower* result = new LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, f6),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ Representation r = instr->GetInputRepresentation();
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+ HCompareIDAndBranch* instr) {
+ Representation r = instr->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new LCmpIDAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new LCmpIDAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+ HCompareConstantEqAndBranch* instr) {
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LJSArrayLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
+ HFixedArrayBaseLength* instr) {
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LFixedArrayBaseLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+ LOperand* object = UseRegister(instr->value());
+ LValueOf* result = new LValueOf(object, TempRegister());
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+ UseRegister(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* value = UseFixed(instr->value(), a0);
+ return MarkAsCall(new LThrow(value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(instr->value());
+ LNumberUntagD* res = new LNumberUntagD(value);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else {
+ ASSERT(to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+ bool needs_check = !instr->value()->type().IsSmi();
+ LInstruction* res = NULL;
+ if (!needs_check) {
+ res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ } else {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
+ : NULL;
+ LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
+ : NULL;
+ res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
+ res = AssignEnvironment(res);
+ }
+ return res;
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ // Make sure that the temp and result_temp registers are
+ // different.
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
+ Define(result, result_temp);
+ return AssignPointerMap(result);
+ } else {
+ ASSERT(to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+ LDoubleToI* res =
+ new LDoubleToI(value,
+ TempRegister(),
+ instr->CanTruncateToInt32() ? TempRegister() : NULL);
+ return AssignEnvironment(DefineAsRegister(res));
+ }
+ } else if (from.IsInteger32()) {
+ if (to.IsTagged()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return DefineSameAsFirst(new LSmiTag(value));
+ } else {
+ LNumberTagI* result = new LNumberTagI(value);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ }
+ } else {
+ ASSERT(to.IsDouble());
+ LOperand* value = Use(instr->value());
+ return DefineAsRegister(new LInteger32ToDouble(value));
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckNonSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new LCheckInstanceType(value);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckFunction(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new LCheckMap(value);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ // Revisit this decision, here and 8 lines below.
+ return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(f22)));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsTagged());
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve f22 explicitly.
+ LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(f22));
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = FixedTemp(f22);
+ LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
+ return AssignEnvironment(DefineSameAsFirst(res));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ return new LReturn(UseFixed(instr->value(), v0));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsInteger32()) {
+ return DefineAsRegister(new LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new LConstantD);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new LLoadGlobalCell;
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), a0);
+ LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LOperand* temp = TempRegister();
+ LOperand* value = UseTempRegister(instr->value());
+ LInstruction* result = new LStoreGlobalCell(value, temp);
+ if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), a1);
+ LOperand* value = UseFixed(instr->value(), a0);
+ LStoreGlobalGeneric* result =
+ new LStoreGlobalGeneric(global_object, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
+ value = UseTempRegister(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ return new LStoreContextSlot(context, value);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ return DefineAsRegister(
+ new LLoadNamedField(UseRegisterAtStart(instr->object())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+ HLoadNamedFieldPolymorphic* instr) {
+ ASSERT(instr->representation().IsTagged());
+ if (instr->need_generic()) {
+ LOperand* obj = UseFixed(instr->object(), a0);
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+ } else {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* object = UseFixed(instr->object(), a0);
+ LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), v0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ return AssignEnvironment(DefineAsRegister(
+ new LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
+ HLoadExternalArrayPointer* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadExternalArrayPointer(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+ HLoadKeyedFastElement* instr) {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* elements = UseTempRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ Representation representation(instr->representation());
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LLoadKeyedSpecializedArrayElement* result =
+ new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
+ // An unsigned int array load might overflow and cause a deopt, make sure it
+ // has an environment.
+ return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+ AssignEnvironment(load_instr) : load_instr;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* object = UseFixed(instr->object(), a1);
+ LOperand* key = UseFixed(instr->key(), a0);
+
+ LInstruction* result =
+ DefineFixed(new LLoadKeyedGeneric(object, key), v0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+ HStoreKeyedFastElement* instr) {
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* obj = UseTempRegister(instr->object());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ LOperand* key = needs_write_barrier
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+
+ return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
+ Representation representation(instr->value()->representation());
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ bool val_is_temp_register =
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+ LOperand* val = val_is_temp_register
+ ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+
+ return new LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* obj = UseFixed(instr->object(), a2);
+ LOperand* key = UseFixed(instr->key(), a1);
+ LOperand* val = UseFixed(instr->value(), a0);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, NULL);
+ return DefineSameAsFirst(result);
+ } else {
+ LOperand* object = UseFixed(instr->object(), a0);
+ LOperand* fixed_object_reg = FixedTemp(a2);
+ LOperand* new_map_reg = FixedTemp(a3);
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+ LOperand* obj = needs_write_barrier
+ ? UseTempRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+
+ return new LStoreNamedField(obj, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* obj = UseFixed(instr->object(), a1);
+ LOperand* val = UseFixed(instr->value(), a0);
+
+ LInstruction* result = new LStoreNamedGeneric(obj, val);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return MarkAsCall(DefineFixed(new LStringAdd(left, right), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseTempRegister(instr->string());
+ LOperand* index = UseTempRegister(instr->index());
+ LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LStringCharFromCode* result = new LStringCharFromCode(char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+ LOperand* string = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LStringLength(string));
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LArrayLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LRegExpLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LFunctionLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+ LOperand* object = UseFixed(instr->object(), a0);
+ LOperand* key = UseFixed(instr->key(), a1);
+ LDeleteProperty* result = new LDeleteProperty(object, key);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
+ if (spill_index > LUnallocated::kMaxFixedIndex) {
+ Abort("Too many spill slots needed for OSR");
+ spill_index = 0;
+ }
+ return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallStub, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* length = UseTempRegister(instr->length());
+ LOperand* index = UseRegister(instr->index());
+ LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), a0);
+ LToFastProperties* result = new LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LTypeof* result = new LTypeof(UseFixed(instr->value(), a0));
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ HEnvironment* env = current_block_->last_environment();
+ ASSERT(env != NULL);
+
+ env->set_ast_id(instr->ast_id());
+
+ env->Drop(instr->pop_count());
+ for (int i = 0; i < instr->values()->length(); ++i) {
+ HValue* value = instr->values()->at(i);
+ if (instr->HasAssignedIndexAt(i)) {
+ env->Bind(instr->GetAssignedIndexAt(i), value);
+ } else {
+ env->Push(value);
+ }
+ }
+
+ // If there is an instruction pending deoptimization environment create a
+ // lazy bailout instruction to capture the environment.
+ if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+ LInstruction* result = new LLazyBailout;
+ result = AssignEnvironment(result);
+ instruction_pending_deoptimization_environment_->
+ set_deoptimization_environment(result->environment());
+ ClearInstructionPendingDeoptimizationEnvironment();
+ return result;
+ }
+
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ return MarkAsCall(new LStackCheck, instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->function(),
+ undefined,
+ instr->call_kind());
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment()->outer();
+ current_block_->UpdateEnvironment(outer);
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LOperand* object = UseRegisterAtStart(instr->object());
+ LIn* result = new LIn(key, object);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/lithium-mips.h b/src/3rdparty/v8/src/mips/lithium-mips.h
index ebc1e43..71f0bb2 100644
--- a/src/3rdparty/v8/src/mips/lithium-mips.h
+++ b/src/3rdparty/v8/src/mips/lithium-mips.h
@@ -32,275 +32,2188 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
-
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
+#include "utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LCodeGen;
-class LEnvironment;
-class Translation;
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
+ V(ControlInstruction) \
+ V(Call) \
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(ArrayLiteral) \
+ V(BitI) \
+ V(BitNotI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallConstantFunction) \
+ V(CallFunction) \
+ V(CallGlobal) \
+ V(CallKeyed) \
+ V(CallKnownGlobal) \
+ V(CallNamed) \
+ V(CallNew) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CheckFunction) \
+ V(CheckInstanceType) \
+ V(CheckMap) \
+ V(CheckNonSmi) \
+ V(CheckPrototypeMaps) \
+ V(CheckSmi) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CmpConstantEqAndBranch) \
+ V(CmpIDAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(ConstantD) \
+ V(ConstantI) \
+ V(ConstantT) \
+ V(Context) \
+ V(DeleteProperty) \
+ V(Deoptimize) \
+ V(DivI) \
+ V(DoubleToI) \
+ V(ElementsKind) \
+ V(FixedArrayBaseLength) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(GlobalObject) \
+ V(GlobalReceiver) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(In) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsNilAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(JSArrayLength) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadElements) \
+ V(LoadExternalArrayPointer) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyedFastDoubleElement) \
+ V(LoadKeyedFastElement) \
+ V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
+ V(LoadNamedField) \
+ V(LoadNamedFieldPolymorphic) \
+ V(LoadNamedGeneric) \
+ V(ModI) \
+ V(MulI) \
+ V(NumberTagD) \
+ V(NumberTagI) \
+ V(NumberUntagD) \
+ V(ObjectLiteral) \
+ V(OsrEntry) \
+ V(OuterContext) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreContextSlot) \
+ V(StoreGlobalCell) \
+ V(StoreGlobalGeneric) \
+ V(StoreKeyedFastDoubleElement) \
+ V(StoreKeyedFastElement) \
+ V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringLength) \
+ V(SubI) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(Throw) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(UnaryMathOperation) \
+ V(UnknownOSRValue) \
+ V(ValueOf)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const { return LInstruction::k##type; } \
+ virtual void CompileToNative(LCodeGen* generator); \
+ virtual const char* Mnemonic() const { return mnemonic; } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(hydrogen_value()); \
+ }
+
class LInstruction: public ZoneObject {
public:
- LInstruction() { }
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false),
+ is_save_doubles_(false) { }
virtual ~LInstruction() { }
- // Predicates should be generated by macro as in lithium-ia32.h.
- virtual bool IsLabel() const {
- UNIMPLEMENTED();
- return false;
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) = 0;
+ virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ void set_deoptimization_environment(LEnvironment* env) {
+ deoptimization_environment_.set(env);
}
- virtual bool IsOsrEntry() const {
- UNIMPLEMENTED();
- return false;
+ LEnvironment* deoptimization_environment() const {
+ return deoptimization_environment_.get();
}
+ bool HasDeoptimizationEnvironment() const {
+ return deoptimization_environment_.is_set();
+ }
+
+ void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
- LPointerMap* pointer_map() const {
- UNIMPLEMENTED();
- return NULL;
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ private:
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ SetOncePointer<LEnvironment> deoptimization_environment_;
+ bool is_call_;
+ bool is_save_doubles_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction: public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const { return R != 0; }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() { return results_[0]; }
+
+ int InputCount() { return I; }
+ LOperand* InputAt(int i) { return inputs_[i]; }
+
+ int TempCount() { return T; }
+ LOperand* TempAt(int i) { return temps_[i]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+};
+
+
+class LGap: public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block)
+ : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
}
- bool HasPointerMap() const {
- UNIMPLEMENTED();
- return false;
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const { return true; }
+ virtual void PrintDataTo(StringStream* stream);
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
}
- void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
- LEnvironment* environment() const {
- UNIMPLEMENTED();
- return NULL;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
+ if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ return parallel_moves_[pos];
}
- bool HasEnvironment() const {
- UNIMPLEMENTED();
- return false;
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
}
- virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap: public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto: public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(int block_id) : block_id_(block_id) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsControl() const { return true; }
+
+ int block_id() const { return block_id_; }
+
+ private:
+ int block_id_;
+};
+
+
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) { }
- virtual bool IsControl() const {
- UNIMPLEMENTED();
- return false;
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
}
+ int gap_instructions_size() { return gap_instructions_size_; }
- void MarkAsCall() { UNIMPLEMENTED(); }
- void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
+ private:
+ int gap_instructions_size_;
+};
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const {
- UNIMPLEMENTED();
- return false;
+
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
+class LLabel: public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LParameter: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+ TranscendentalCache::Type transcendental_type() {
+ return hydrogen()->transcendental_type();
+ }
+};
+
+
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
+ public:
+ virtual bool IsControl() const { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+ int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+ int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ private:
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
+};
+
+
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
}
- bool IsMarkedAsSaveDoubles() const {
- UNIMPLEMENTED();
- return false;
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
}
- virtual bool HasResult() const {
- UNIMPLEMENTED();
- return false;
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
}
- virtual LOperand* result() {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+ public:
+ LArgumentsElements() { }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LTemplateInstruction<1, 2, 3> {
+ public:
+ // Used when the right hand is a constant power of 2.
+ LModI(LOperand* left,
+ LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = NULL;
+ temps_[1] = NULL;
+ temps_[2] = NULL;
}
- virtual int InputCount() {
- UNIMPLEMENTED();
- return 0;
+ // Used for the standard case.
+ LModI(LOperand* left,
+ LOperand* right,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
}
- virtual LOperand* InputAt(int i) {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LDivI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- virtual int TempCount() {
- UNIMPLEMENTED();
- return 0;
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
}
- virtual LOperand* TempAt(int i) {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+ public:
+ LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- LOperand* FirstInput() {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->GetInputRepresentation().IsDouble();
}
- LOperand* Output() {
- UNIMPLEMENTED();
- return NULL;
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LUnaryMathOperation(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
-#ifdef DEBUG
- void VerifyCall() { UNIMPLEMENTED(); }
-#endif
+ DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+ virtual void PrintDataTo(StringStream* stream);
+ BuiltinFunctionId op() const { return hydrogen()->op(); }
};
-class LGap: public LInstruction {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- explicit LGap(HBasicBlock* block) { }
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
- HBasicBlock* block() const {
- UNIMPLEMENTED();
- return NULL;
+
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
+ inputs_[0] = left;
}
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
+};
+
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- UNIMPLEMENTED();
- return NULL;
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LIsNilAndBranch(LOperand* value) {
+ inputs_[0] = value;
}
- LParallelMove* GetParallelMove(InnerPosition pos) {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+
+ EqualityKind kind() const { return hydrogen()->kind(); }
+ NilValue nil() const { return hydrogen()->nil(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
};
-class LLabel: public LGap {
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
- explicit LLabel(HBasicBlock* block) : LGap(block) { }
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
};
-class LOsrEntry: public LInstruction {
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
- // Function could be generated by a macro as in lithium-ia32.h.
- static LOsrEntry* cast(LInstruction* instr) {
- UNIMPLEMENTED();
- return NULL;
+ explicit LHasInstanceTypeAndBranch(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand** SpilledRegisterArray() {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand** SpilledDoubleRegisterArray() {
- UNIMPLEMENTED();
- return NULL;
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
}
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
- UNIMPLEMENTED();
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- UNIMPLEMENTED();
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCmpT(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
};
-class LChunk: public ZoneObject {
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LChunk(HGraph* graph) { }
+ LInstanceOf(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
- HGraph* graph() const {
- UNIMPLEMENTED();
- return NULL;
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
- const ZoneList<LPointerMap*>* pointer_maps() const {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+};
+
+
+class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
}
- LOperand* GetNextSpillSlot(bool double_slot) {
- UNIMPLEMENTED();
- return NULL;
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- LConstantOperand* DefineConstantOperand(HConstant* constant) {
- UNIMPLEMENTED();
- return NULL;
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- LLabel* GetLabel(int block_id) const {
- UNIMPLEMENTED();
- return NULL;
+ Token::Value op() const { return op_; }
+
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LSubI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- const ZoneList<LInstruction*>* instructions() const {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantD: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantT: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value() const { return hydrogen()->handle(); }
+};
+
+
+class LBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LBranch(LOperand* value) {
+ inputs_[0] = value;
}
- int GetParameterStackSlot(int index) const {
- UNIMPLEMENTED();
- return 0;
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
- void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ virtual bool IsControl() const { return true; }
- LGap* GetGapAt(int index) const {
- UNIMPLEMENTED();
- return NULL;
+ Handle<Map> map() const { return hydrogen()->map(); }
+ int true_block_id() const {
+ return hydrogen()->FirstSuccessor()->block_id();
}
+ int false_block_id() const {
+ return hydrogen()->SecondSuccessor()->block_id();
+ }
+};
- bool IsGapAt(int index) const {
- UNIMPLEMENTED();
- return false;
+
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LJSArrayLength(LOperand* value) {
+ inputs_[0] = value;
}
- int NearestGapPos(int index) const {
- UNIMPLEMENTED();
- return 0;
+ DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+};
+
+
+class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFixedArrayBaseLength(LOperand* value) {
+ inputs_[0] = value;
}
- void MarkEmptyBlocks() { UNIMPLEMENTED(); }
+ DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
+ "fixed-array-base-length")
+ DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
+};
+
- CompilationInfo* info() const {
- UNIMPLEMENTED();
- return NULL;
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
}
-#ifdef DEBUG
- void Verify() { UNIMPLEMENTED(); }
-#endif
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
+class LValueOf: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LValueOf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+ DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+};
+
+
+class LThrow: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LThrow(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LBitNotI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LPower: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+
+ virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
+ virtual void CompileToNative(LCodeGen* generator);
+ virtual const char* Mnemonic() const;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
+ virtual void CompileToNative(LCodeGen* generator);
+ virtual const char* Mnemonic() const;
+
+ Token::Value op() const { return op_; }
+
+ private:
+ Token::Value op_;
+};
+
+
+class LReturn: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LReturn(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedFieldPolymorphic(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+
+ LOperand* object() { return inputs_[0]; }
+};
+
+
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedGeneric(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ LOperand* object() { return inputs_[0]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadFunctionPrototype(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+
+ LOperand* function() { return inputs_[0]; }
+};
+
+
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadElements(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadExternalArrayPointer(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
+ "load-external-array-pointer")
+};
+
+
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+};
+
+
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadGlobalGeneric(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ LOperand* global_object() { return inputs_[0]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LStoreGlobalGeneric(LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = global_object;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+ LOperand* global_object() { return InputAt(0); }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ LOperand* value() { return InputAt(1); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* value() { return InputAt(1); }
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LOuterContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGlobalObject(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGlobalReceiver(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+ LOperand* global() { return InputAt(0); }
+};
+
+
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<JSFunction> function() { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInvokeFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ LOperand* function() { return inputs_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallKeyed(LOperand* key) {
+ inputs_[0] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+ DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<String> name() const { return hydrogen()->name(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 2; }
+};
+
+
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+ DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<String> name() const {return hydrogen()->name(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<JSFunction> target() const { return hydrogen()->target(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallNew(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberTagI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+ public:
+ LTaggedToI(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberUntagD(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ bool needs_check() const { return needs_check_; }
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreNamedField(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool is_in_object() { return hydrogen()->is_in_object(); }
+ int offset() { return hydrogen()->offset(); }
+ Handle<Map> transition() const { return hydrogen()->transition(); }
+};
+
+
+class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ bool strict_mode() { return strict_mode_flag() == kStrictMode; }
+};
+
+
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* new_map_temp,
+ LOperand* temp_reg) {
+ inputs_[0] = object;
+ temps_[0] = new_map_temp;
+ temps_[1] = temp_reg;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_reg() { return temps_[0]; }
+ LOperand* temp_reg() { return temps_[1]; }
+ Handle<Map> original_map() { return hydrogen()->original_map(); }
+ Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringAdd(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+};
+
+
+
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharCodeAt(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+};
+
+
+class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringCharFromCode(LOperand* char_code) {
+ inputs_[0] = char_code;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+
+ LOperand* char_code() { return inputs_[0]; }
+};
+
+
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringLength(LOperand* string) {
+ inputs_[0] = string;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+ DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+ LOperand* string() { return inputs_[0]; }
+};
+
+
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckFunction(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+ DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckInstanceType(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckMap(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
+ public:
+ LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) {
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+ Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+ Handle<JSObject> holder() const { return hydrogen()->holder(); }
+};
+
+
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+};
+
+
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampDToUint8(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampTToUint8(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+ Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LTypeof(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LTypeofIsAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+ public:
+ explicit LIsConstructCallAndBranch(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LDeleteProperty(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry();
+
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+ LOperand** SpilledRegisterArray() { return register_spills_; }
+ LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+ void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+ void MarkSpilledDoubleRegister(int allocation_index,
+ LOperand* spill_operand);
+
+ private:
+ // Arrays of spill slot operands for registers with an assigned spill
+ // slot, i.e., that must also be restored to the spill slot on OSR entry.
+ // NULL if the register has no assigned spill slot. Indexed by allocation
+ // index.
+ LOperand* register_spills_[Register::kNumAllocatableRegisters];
+ LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LIn(LOperand* key, LOperand* object) {
+ inputs_[0] = key;
+ inputs_[1] = object;
+ }
+
+ LOperand* key() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(In, "in")
+};
+
+
+class LChunkBuilder;
+class LChunk: public ZoneObject {
+ public:
+ explicit LChunk(CompilationInfo* info, HGraph* graph);
+
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ LConstantOperand* DefineConstantOperand(HConstant* constant);
+ Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+ Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+ int GetNextSpillIndex(bool is_double);
+ LOperand* GetNextSpillSlot(bool is_double);
+
+ int ParameterAt(int index);
+ int GetParameterStackSlot(int index) const;
+ int spill_slot_count() const { return spill_slot_count_; }
+ CompilationInfo* info() const { return info_; }
+ HGraph* graph() const { return graph_; }
+ const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+ void AddGapMove(int index, LOperand* from, LOperand* to);
+ LGap* GetGapAt(int index) const;
+ bool IsGapAt(int index) const;
+ int NearestGapPos(int index) const;
+ void MarkEmptyBlocks();
+ const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+ LLabel* GetLabel(int block_id) const {
+ HBasicBlock* block = graph_->blocks()->at(block_id);
+ int first_instruction = block->first_instruction_index();
+ return LLabel::cast(instructions_[first_instruction]);
+ }
+ int LookupDestination(int block_id) const {
+ LLabel* cur = GetLabel(block_id);
+ while (cur->replacement() != NULL) {
+ cur = cur->replacement();
+ }
+ return cur->block_id();
+ }
+ Label* GetAssemblyLabel(int block_id) const {
+ LLabel* label = GetLabel(block_id);
+ ASSERT(!label->HasReplacement());
+ return label->label();
+ }
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+ return &inlined_closures_;
+ }
+
+ void AddInlinedClosure(Handle<JSFunction> closure) {
+ inlined_closures_.Add(closure);
+ }
+
+ private:
+ int spill_slot_count_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ ZoneList<LInstruction*> instructions_;
+ ZoneList<LPointerMap*> pointer_maps_;
+ ZoneList<Handle<JSFunction> > inlined_closures_;
};
class LChunkBuilder BASE_EMBEDDED {
public:
- LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ next_block_(NULL),
+ argument_count_(0),
+ allocator_(allocator),
+ position_(RelocInfo::kNoPosition),
+ instruction_pending_deoptimization_environment_(NULL),
+ pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
// Build the sequence for the graph.
- LChunk* Build() {
- UNIMPLEMENTED();
- return NULL;
- };
+ LChunk* Build();
// Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
- UNIMPLEMENTED(); \
- return NULL; \
- }
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ LChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
+ HGraph* graph() const { return graph_; }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ void Abort(const char* format, ...);
+
+ // Methods for getting operands for Use / Define / Temp.
+ LRegister* ToOperand(Register reg);
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // Operand created by UseRegister is guaranteed to be live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ // Operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register that may be trashed.
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+ // An input operand in a register or stack slot.
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+ // An input operand in a register, stack slot or a constant operand.
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+ int index);
+ template<int I, int T>
+ LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+ Register reg);
+ template<int I, int T>
+ LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+ DoubleRegister reg);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+ LInstruction* AssignPointerMap(LInstruction* instr);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+ LInstruction* MarkAsSaveDoubles(LInstruction* instr);
+
+ LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+ LInstruction* instr, int ast_id);
+ void ClearInstructionPendingDeoptimizationEnvironment();
+
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
+
+ void VisitInstruction(HInstruction* current);
+
+ void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+ LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+
+ LChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ HBasicBlock* next_block_;
+ int argument_count_;
+ LAllocator* allocator_;
+ int position_;
+ LInstruction* instruction_pending_deoptimization_environment_;
+ int pending_deoptimization_ast_id_;
+
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/macro-assembler-mips.cc b/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
index 32dce66..36c4f45 100644
--- a/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
+++ b/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
@@ -3610,7 +3610,7 @@ void MacroAssembler::InvokeFunction(Register function,
}
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
@@ -3618,7 +3618,7 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- li(a1, Operand(Handle<JSFunction>(function)));
+ li(a1, Operand(function));
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -3739,45 +3739,19 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
}
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
- Register r1, const Operand& r2) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
- kNoASTId, cond, r1, r2);
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
- Condition cond,
- Register r1,
- const Operand& r2) {
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
- return result;
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+ int stack_space) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
const int kNextOffset = 0;
@@ -3848,11 +3822,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
Ret();
bind(&promote_scheduled_exception);
- MaybeObject* result = TryTailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
- if (result->IsFailure()) {
- return result;
- }
+ TailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0,
+ 1);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -3865,8 +3838,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
1);
mov(v0, s0);
jmp(&leave_exit_frame);
-
- return result;
}
@@ -4089,17 +4060,6 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- li(a0, num_arguments);
- return TryJumpToExternalReference(ext);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -4116,14 +4076,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& builtin) {
- li(a1, Operand(builtin));
- CEntryStub stub(1);
- return TryTailCallStub(&stub);
-}
-
-
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
diff --git a/src/3rdparty/v8/src/mips/macro-assembler-mips.h b/src/3rdparty/v8/src/mips/macro-assembler-mips.h
index 5224db9..b9bd2f3 100644
--- a/src/3rdparty/v8/src/mips/macro-assembler-mips.h
+++ b/src/3rdparty/v8/src/mips/macro-assembler-mips.h
@@ -112,11 +112,6 @@ static inline MemOperand GlobalObjectOperand() {
}
-static inline MemOperand QmlGlobalObjectOperand() {
- return ContextOperand(cp, Context::QML_GLOBAL_INDEX);
-}
-
-
// Generate a MemOperand for loading a field from an object.
static inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
@@ -817,7 +812,7 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- void InvokeFunction(JSFunction* function,
+ void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind);
@@ -1047,27 +1042,9 @@ class MacroAssembler: public Assembler {
void CallStub(CodeStub* stub, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
- Condition cond = cc_always,
- Register r1 = zero_reg,
- const Operand& r2 =
- Operand(zero_reg));
-
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
- Condition cond = cc_always,
- Register r1 = zero_reg,
- const Operand& r2 =
- Operand(zero_reg));
-
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
@@ -1088,12 +1065,6 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
- // Tail call of a runtime routine (jump). Try to generate the code if
- // necessary. Do not perform a GC but instead return a retry after GC
- // failure.
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
@@ -1144,16 +1115,15 @@ class MacroAssembler: public Assembler {
void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context.
- MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
- int stack_space);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context. stack_space
+ // - space to be unwound on exit (includes the call js arguments space and
+ // the additional space allocated for the fast call).
+ void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin);
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
diff --git a/src/3rdparty/v8/src/mips/stub-cache-mips.cc b/src/3rdparty/v8/src/mips/stub-cache-mips.cc
index 296f186..cb1f092 100644
--- a/src/3rdparty/v8/src/mips/stub-cache-mips.cc
+++ b/src/3rdparty/v8/src/mips/stub-cache-mips.cc
@@ -151,66 +151,6 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateDictionaryNegativeLookup(
- MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register scratch0,
- Register scratch1) {
- ASSERT(name->IsSymbol());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ Branch(miss_label, ne, at, Operand(zero_reg));
-
-
- // Check that receiver is a JSObject.
- __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Load properties array.
- Register properties = scratch0;
- __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ Branch(miss_label, ne, map, Operand(tmp));
-
- // Restore the temporarily used register.
- __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- MaybeObject* result = StringDictionaryLookupStub::TryGenerateNegativeLookup(
- masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- if (result->IsFailure()) return result;
-
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- return result;
-}
-
-
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@@ -294,7 +234,10 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -302,8 +245,8 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
__ li(at, isolate->global());
__ Branch(miss, ne, prototype, Operand(at));
// Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(isolate->global_context()->get(index));
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->global_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ li(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -565,23 +508,24 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
- JSObject* holder_obj) {
+ Handle<JSObject> holder_obj) {
__ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
- __ li(scratch, Operand(Handle<Object>(interceptor)));
+ __ li(scratch, Operand(interceptor));
__ Push(scratch, receiver, holder);
__ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
__ push(scratch);
}
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
@@ -617,10 +561,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
-static MaybeObject* GenerateFastApiDirectCall(
- MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee js function
@@ -631,18 +574,18 @@ static MaybeObject* GenerateFastApiDirectCall(
// -- sp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ li(t1, Operand(Handle<JSFunction>(function)));
+ Handle<JSFunction> function = optimization.constant_function();
+ __ li(t1, Operand(function));
__ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ li(a0, api_call_info_handle);
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data());
+ if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+ __ li(a0, api_call_info);
__ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
} else {
- __ li(t2, Operand(Handle<Object>(call_data)));
+ __ li(t2, call_data);
}
// Store js function and call data.
@@ -653,10 +596,6 @@ static MaybeObject* GenerateFastApiDirectCall(
// (refer to layout above).
__ Addu(a2, sp, Operand(2 * kPointerSize));
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
const int kApiStackSpace = 4;
FrameScope frame_scope(masm, StackFrame::MANUAL);
@@ -682,17 +621,15 @@ static MaybeObject* GenerateFastApiDirectCall(
// v8::Arguments::is_construct_call = 0
__ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
ExternalReference ref =
ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
class CallInterceptorCompiler BASE_EMBEDDED {
@@ -706,86 +643,63 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name_(name),
extra_ic_state_(extra_ic_state) {}
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ void Compile(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
-
CallOptimization optimization(lookup);
-
if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+ holder, lookup, name, optimization, miss);
} else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value();
+ CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+ name, holder, miss);
}
}
private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ void CompileCacheable(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
-
Counters* counters = masm->isolate()->counters();
-
int depth1 = kInvalidProtoDepth;
int depth2 = kInvalidProtoDepth;
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
+ !lookup->holder()->IsGlobalObject()) {
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(
+ object, interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(
+ interceptor_holder, Handle<JSObject>(lookup->holder()));
}
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
+ can_do_fast_api_call =
+ depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
__ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
+ scratch1, scratch2);
if (can_do_fast_api_call) {
__ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -798,9 +712,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -813,10 +727,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
+ Handle<JSObject>(lookup->holder()),
+ scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -827,10 +742,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- MaybeObject* result = GenerateFastApiDirectCall(masm,
- optimization,
- arguments_.immediate());
- if (result->IsFailure()) return result;
+ GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -851,68 +763,57 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm);
}
-
- return masm->isolate()->heap()->undefined_value();
}
void CompileRegular(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
- JSObject* interceptor_holder,
+ Handle<String> name,
+ Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
+ scratch1, scratch2, scratch3,
+ name, miss_label);
// Call a runtime function to load the interceptor property.
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
+ PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ CallExternalReference(
ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
5);
-
// Restore the name_ register.
__ pop(name_);
-
// Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- JSObject* holder_obj,
+ Handle<JSObject> holder_obj,
Register scratch,
Label* interceptor_succeeded) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(holder, name_);
-
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
holder_obj);
-
__ pop(name_); // Restore the name.
__ pop(receiver); // Restore the holder.
}
-
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
__ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
@@ -945,29 +846,6 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
- ASSERT(cell->value()->IsTheHole());
- __ li(scratch, Operand(Handle<Object>(cell)));
- __ lw(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(miss, ne, scratch, Operand(at));
- return cell;
-}
-
-
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
static void GenerateCheckPropertyCells(MacroAssembler* masm,
@@ -990,34 +868,6 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCells(
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- Register scratch,
- Label* miss) {
- JSObject* current = object;
- while (current != holder) {
- if (current->IsGlobalObject()) {
- // Returns a cell or a failure.
- MaybeObject* result = TryGenerateCheckPropertyCell(
- masm,
- GlobalObject::cast(current),
- name,
- scratch,
- miss);
- if (result->IsFailure()) return result;
- }
- ASSERT(current->IsJSObject());
- current = JSObject::cast(current->GetPrototype());
- }
- return NULL;
-}
-
-
// Convert and store int passed in register ival to IEEE 754 single precision
// floating point value at memory location (dst + 4 * wordoffset)
// If FPU is available use it for conversion.
@@ -1240,146 +1090,6 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-Register StubCompiler::CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- JSObject* current = object;
- while (current != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- ASSERT(current->GetPrototype()->IsJSObject());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
- Object* lookup_result = NULL; // Initialization to please compiler.
- if (!maybe_lookup_result->ToObject(&lookup_result)) {
- set_failure(Failure::cast(maybe_lookup_result));
- return reg;
- }
- name = String::cast(lookup_result);
- }
- ASSERT(current->property_dictionary()->FindEntry(name) ==
- StringDictionary::kNotFound);
-
- MaybeObject* negative_lookup =
- TryGenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
-
- if (negative_lookup->IsFailure()) {
- set_failure(Failure::cast(negative_lookup));
- return reg;
- }
-
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now the object is in holder_reg.
- __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- // Branch on the result of the map check.
- __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- // Restore scratch register to be the map of the object. In the
- // new space case below, we load the prototype from the map in
- // the scratch register.
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
-
- reg = holder_reg; // From now the object is in holder_reg.
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- // Branch on the result of the map check.
- __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // From now the object is in holder_reg.
- __ li(reg, Operand(Handle<JSObject>(prototype)));
- }
-
- if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
-
- // Check the holder map.
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-
- // Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
-
- MaybeObject* result = TryGenerateCheckPropertyCells(masm(),
- object,
- holder,
- name,
- scratch1,
- miss);
- if (result->IsFailure()) set_failure(Failure::cast(result));
-
- // Return the register containing the holder.
- return reg;
-}
-
-
void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1424,49 +1134,43 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
}
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss, scratch1);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
- name, miss);
+ Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+ scratch2, scratch3, name, miss);
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
__ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
- Handle<AccessorInfo> callback_handle(callback);
- if (heap()->InNewSpace(callback_handle->data())) {
- __ li(scratch3, callback_handle);
+ if (heap()->InNewSpace(callback->data())) {
+ __ li(scratch3, callback);
__ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
} else {
- __ li(scratch3, Handle<Object>(callback_handle->data()));
+ __ li(scratch3, Handle<Object>(callback->data()));
}
__ Push(reg, scratch3, name_reg);
__ mov(a2, scratch2); // Saved in case scratch2 == a1.
__ mov(a1, sp); // a1 (first argument - see note below) = Handle<String>
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
// struct from the function (which is currently the case). This means we pass
// the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
// will handle setting up a0.
const int kApiStackSpace = 1;
-
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
@@ -1476,28 +1180,26 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
// a2 (second argument - see note above) = AccessorInfo&
__ Addu(a2, sp, kPointerSize);
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
+ const int kStackUnwindSpace = 4;
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
ExternalReference ref =
ExternalReference(&fun,
ExternalReference::DIRECT_GETTER_CALL,
masm()->isolate());
- // 4 args - will be freed later by LeaveExitFrame.
- return masm()->TryCallApiFunctionAndReturn(ref, 4);
+ __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1513,9 +1215,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
+ lookup->GetCallbackObject()->IsAccessorInfo()) {
+ compile_followup_inline =
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
}
}
@@ -1532,14 +1234,12 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Requires a frame to make GC aware of pushed pointers.
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
// CALLBACKS case needs a receiver to be passed into C++ callback.
__ Push(receiver, holder_reg, name_reg);
} else {
__ Push(holder_reg, name_reg);
}
-
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
@@ -1548,7 +1248,6 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
holder_reg,
name_reg,
interceptor_holder);
-
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
@@ -1563,16 +1262,14 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
__ pop(receiver);
}
-
// Leave the internal frame.
}
-
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
- lookup->holder(),
+ Handle<JSObject>(lookup->holder()),
scratch1,
scratch2,
scratch3,
@@ -1591,15 +1288,14 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
+ Handle<AccessorInfo> callback(
+ AccessorInfo::cast(lookup->GetCallbackObject()));
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
// Important invariant in CALLBACKS case: the code above must be
// structured to never clobber |receiver| register.
- __ li(scratch2, Handle<AccessorInfo>(callback));
+ __ li(scratch2, callback);
// holder_reg is either receiver or scratch1.
if (!receiver.is(holder_reg)) {
ASSERT(scratch1.is(holder_reg));
@@ -1642,9 +1338,9 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1657,7 +1353,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(a0, miss);
}
@@ -1666,15 +1362,16 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
}
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Label* miss) {
// Get the value from the cell.
- __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ li(a3, Operand(cell));
__ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (heap()->InNewSpace(function)) {
+ if (heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1689,7 +1386,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Branch(miss, ne, t0, Operand(a3));
} else {
- __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
+ __ Branch(miss, ne, a1, Operand(function));
}
}
@@ -1703,20 +1400,6 @@ void CallStubCompiler::GenerateMissBranch() {
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGenerateMissBranch() {
- MaybeObject* maybe_obj =
- isolate()->stub_cache()->TryComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
-}
-
-
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
int index,
@@ -1751,11 +1434,12 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -1765,11 +1449,11 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
Register receiver = a1;
@@ -1781,8 +1465,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), receiver,
- holder, a3, v0, t0, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
+ name, &miss);
if (argc == 0) {
// Nothing to do, just return the length.
@@ -1791,10 +1475,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Ret();
} else {
Label call_builtin;
-
Register elements = a3;
Register end_elements = t1;
-
// Get the elements array of the object.
__ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1935,19 +1617,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -1957,25 +1639,22 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss, return_undefined, call_builtin;
-
Register receiver = a1;
Register elements = a3;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ lw(receiver, MemOperand(sp, argc * kPointerSize));
-
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object),
- receiver, holder, elements, t0, v0, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+ t0, v0, name, &miss);
// Get the elements array of the object.
__ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -2024,20 +1703,19 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2047,10 +1725,9 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
@@ -2063,16 +1740,16 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
index_out_of_range_label = &miss;
}
- GenerateNameCheck(Handle<String>(name), &name_miss);
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
v0,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
- a1, a3, t0, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ v0, holder, a1, a3, t0, name, &miss);
Register receiver = a1;
Register index = t1;
@@ -2085,20 +1762,20 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
+ StringCharCodeAtGenerator generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -2109,22 +1786,21 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in a2.
- __ li(a2, Handle<String>(name));
+ __ li(a2, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2134,31 +1810,28 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
-
if (kind_ == Code::CALL_IC &&
(CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
- GenerateNameCheck(Handle<String>(name), &name_miss);
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
v0,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
- a1, a3, t0, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ v0, holder, a1, a3, t0, name, &miss);
Register receiver = v0;
Register index = t1;
@@ -2172,21 +1845,21 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
+ StringCharAtGenerator generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -2197,22 +1870,21 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in a2.
- __ li(a2, Handle<String>(name));
+ __ li(a2, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2225,22 +1897,23 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(a1, &miss);
- CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2256,13 +1929,13 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Convert the smi code to uint16.
__ And(code, code, Operand(Smi::FromInt(0xffff)));
- StringCharFromCodeGenerator char_from_code_generator(code, v0);
- char_from_code_generator.GenerateFast(masm());
+ StringCharFromCodeGenerator generator(code, v0);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
@@ -2271,19 +1944,19 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ bind(&miss);
// a2: function name.
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2292,30 +1965,29 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(FPU))
- return heap()->undefined_value();
- CpuFeatures::Scope scope_fpu(FPU);
+ if (!CpuFeatures::IsSupported(FPU)) {
+ return Handle<Code>::null();
+ }
+ CpuFeatures::Scope scope_fpu(FPU);
const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss, slow;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(a1, &miss);
-
- CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2405,19 +2077,19 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ bind(&miss);
// a2: function name.
- MaybeObject* obj = TryGenerateMissBranch();
- if (obj->IsFailure()) return obj;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2427,25 +2099,23 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// -----------------------------------
const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
- if (cell == NULL) {
+ GenerateNameCheck(name, &miss);
+ if (cell.is_null()) {
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(a1, &miss);
-
- CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2507,37 +2177,36 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ bind(&miss);
// a2: function name.
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
Counters* counters = isolate()->counters();
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- if (!object->IsJSObject()) return heap()->undefined_value();
+ if (object->IsGlobalObject()) return Handle<Code>::null();
+ if (!cell.is_null()) return Handle<Code>::null();
+ if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+ Handle<JSObject>::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
- GenerateNameCheck(Handle<String>(name), &miss_before_stack_reserved);
+ GenerateNameCheck(name, &miss_before_stack_reserved);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2552,45 +2221,42 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
ReserveSpaceForFastApiCall(masm(), a0);
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+ CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
depth, &miss);
- MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
+ GenerateFastApiDirectCall(masm(), optimization, argc);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
CheckType check) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // Undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2605,16 +2271,14 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(masm()->isolate()->counters()->call_const(),
1, a0, a3);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
+ name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2625,28 +2289,25 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
// Check that the object is a two-byte string or a symbol.
__ GetObjectType(a1, a3, a3);
__ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
- a1, t0, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ a0, holder, a3, a1, t0, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case NUMBER_CHECK:
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
__ And(t1, a1, Operand(kSmiTagMask));
@@ -2657,18 +2318,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
- a1, t0, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ a0, holder, a3, a1, t0, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case BOOLEAN_CHECK:
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
Label fast;
// Check that the object is a boolean.
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
@@ -2679,16 +2340,17 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
- a1, t0, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ a0, holder, a3, a1, t0, name, &miss);
+ } else {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
}
break;
}
- default:
- UNREACHABLE();
- }
-
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
@@ -2697,17 +2359,16 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -2715,11 +2376,10 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
@@ -2727,64 +2387,47 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
__ lw(a1, MemOperand(sp, argc * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- a1,
- a3,
- t0,
- a0,
- &miss);
- if (result->IsFailure()) {
- return result;
- }
+ compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
+ &miss);
// Move returned value, the function to call, to a1.
__ mov(a1, v0);
// Restore receiver.
__ lw(a0, MemOperand(sp, argc * kPointerSize));
- GenerateCallFunction(masm(), Handle<Object>(object), arguments(), &miss,
- extra_state_);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // Undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
GenerateGlobalReceiverCheck(object, holder, name, &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
@@ -2801,7 +2444,6 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Jump to the cached code (tail call).
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
- Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
@@ -2816,11 +2458,10 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(NORMAL, name);
+ return GetCode(NORMAL, name);
}
@@ -3053,29 +2694,24 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
}
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- a2 : name
// -- ra : return address
// -----------------------------------
Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
- callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, callback, name,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return TryGetCode(CALLBACKS, name);
+ return GetCode(CALLBACKS, name);
}
@@ -3099,9 +2735,9 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
}
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- a2 : name
@@ -3112,21 +2748,13 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object,
- holder,
- &lookup,
- a0,
- a2,
- a3,
- a1,
- t0,
- name,
+ GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
@@ -3200,11 +2828,11 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -3213,19 +2841,14 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
Label miss;
// Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
-
- MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
- t0, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
+ __ Branch(&miss, ne, a0, Operand(name));
+ GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, callback, name,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return TryGetCode(CALLBACKS, name);
+ return GetCode(CALLBACKS, name);
}
@@ -3253,9 +2876,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -3264,24 +2888,16 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
Label miss;
// Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+ __ Branch(&miss, ne, a0, Operand(name));
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- a1,
- a0,
- a2,
- a3,
- t0,
- name,
+ GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
@@ -3496,7 +3112,8 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+ Handle<JSFunction> function) {
// a0 : argc
// a1 : constructor
// ra : return address
@@ -3541,12 +3158,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// a2: initial map
// t7: undefined
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(a3,
- t4,
- t5,
- t6,
- &generic_stub_call,
- SIZE_IN_WORDS);
+ __ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
@@ -3581,7 +3193,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// t7: undefined
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
+ Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
Label not_passed, next;
@@ -4053,9 +3665,9 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
__ bind(&miss_force_generic);
- Code* stub = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+ Handle<Code> stub =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(stub, RelocInfo::CODE_TARGET);
}
@@ -4483,9 +4095,9 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
__ Ret();
__ bind(&miss_force_generic);
- Code* stub = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+ Handle<Code> stub =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(stub, RelocInfo::CODE_TARGET);
}
diff --git a/src/3rdparty/v8/src/objects-inl.h b/src/3rdparty/v8/src/objects-inl.h
index 13d7591..51d92ba 100644
--- a/src/3rdparty/v8/src/objects-inl.h
+++ b/src/3rdparty/v8/src/objects-inl.h
@@ -1343,7 +1343,7 @@ int JSObject::GetInternalFieldCount() {
// Make sure to adjust for the number of in-object properties. These
// properties do contribute to the size, but are not internal fields.
return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
- map()->inobject_properties() - (map()->has_external_resource()?1:0);
+ map()->inobject_properties();
}
@@ -1373,23 +1373,6 @@ void JSObject::SetInternalField(int index, Object* value) {
}
-void JSObject::SetExternalResourceObject(Object *value) {
- ASSERT(map()->has_external_resource());
- int offset = GetHeaderSize() + kPointerSize * GetInternalFieldCount();
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
-}
-
-
-Object *JSObject::GetExternalResourceObject() {
- if (map()->has_external_resource()) {
- return READ_FIELD(this, GetHeaderSize() + kPointerSize * GetInternalFieldCount());
- } else {
- return GetHeap()->undefined_value();
- }
-}
-
-
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
@@ -1872,7 +1855,7 @@ Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
ASSERT(GetType(descriptor_number) == CALLBACKS);
Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
- return reinterpret_cast<AccessorDescriptor*>(p->address());
+ return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
}
@@ -2099,7 +2082,6 @@ SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
-SMI_ACCESSORS(SeqString, symbol_id, kSymbolIdOffset)
uint32_t String::hash_field() {
@@ -2749,14 +2731,14 @@ bool Map::is_extensible() {
void Map::set_attached_to_shared_function_info(bool value) {
if (value) {
- set_bit_field3(bit_field3() | (1 << kAttachedToSharedFunctionInfo));
+ set_bit_field2(bit_field2() | (1 << kAttachedToSharedFunctionInfo));
} else {
- set_bit_field3(bit_field3() & ~(1 << kAttachedToSharedFunctionInfo));
+ set_bit_field2(bit_field2() & ~(1 << kAttachedToSharedFunctionInfo));
}
}
bool Map::attached_to_shared_function_info() {
- return ((1 << kAttachedToSharedFunctionInfo) & bit_field3()) != 0;
+ return ((1 << kAttachedToSharedFunctionInfo) & bit_field2()) != 0;
}
@@ -2771,47 +2753,6 @@ void Map::set_is_shared(bool value) {
bool Map::is_shared() {
return ((1 << kIsShared) & bit_field3()) != 0;
}
-
-void Map::set_has_external_resource(bool value) {
- if (value) {
- set_bit_field(bit_field() | (1 << kHasExternalResource));
- } else {
- set_bit_field(bit_field() & ~(1 << kHasExternalResource));
- }
-}
-
-bool Map::has_external_resource()
-{
- return ((1 << kHasExternalResource) & bit_field()) != 0;
-}
-
-
-void Map::set_use_user_object_comparison(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kUseUserObjectComparison));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kUseUserObjectComparison));
- }
-}
-
-bool Map::use_user_object_comparison() {
- return ((1 << kUseUserObjectComparison) & bit_field2()) != 0;
-}
-
-
-void Map::set_named_interceptor_is_fallback(bool value)
-{
- if (value) {
- set_bit_field3(bit_field3() | (1 << kNamedInterceptorIsFallback));
- } else {
- set_bit_field3(bit_field3() & ~(1 << kNamedInterceptorIsFallback));
- }
-}
-
-bool Map::named_interceptor_is_fallback()
-{
- return ((1 << kNamedInterceptorIsFallback) & bit_field3()) != 0;
-}
JSFunction* Map::unchecked_constructor() {
@@ -3313,7 +3254,6 @@ ACCESSORS(InterceptorInfo, query, Object, kQueryOffset)
ACCESSORS(InterceptorInfo, deleter, Object, kDeleterOffset)
ACCESSORS(InterceptorInfo, enumerator, Object, kEnumeratorOffset)
ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
-ACCESSORS(InterceptorInfo, is_fallback, Smi, kFallbackOffset)
ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
@@ -3345,10 +3285,6 @@ ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
kInternalFieldCountOffset)
-ACCESSORS(ObjectTemplateInfo, has_external_resource, Object,
- kHasExternalResourceOffset)
-ACCESSORS(ObjectTemplateInfo, use_user_object_comparison, Object,
- kUseUserObjectComparisonOffset)
ACCESSORS(SignatureInfo, receiver, Object, kReceiverOffset)
ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
@@ -3550,8 +3486,6 @@ void SharedFunctionInfo::set_strict_mode_flag(StrictModeFlag strict_mode_flag) {
BOOL_GETTER(SharedFunctionInfo, compiler_hints, strict_mode,
kStrictModeFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, qml_mode,
- kQmlModeFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
name_should_print_as_anonymous,
@@ -3913,13 +3847,13 @@ ObjectHashTable* JSWeakMap::unchecked_table() {
}
-Address Foreign::address() {
- return AddressFrom<Address>(READ_INTPTR_FIELD(this, kAddressOffset));
+Address Foreign::foreign_address() {
+ return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset));
}
-void Foreign::set_address(Address value) {
- WRITE_INTPTR_FIELD(this, kAddressOffset, OffsetFrom(value));
+void Foreign::set_foreign_address(Address value) {
+ WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
}
@@ -4602,14 +4536,14 @@ int JSObject::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
void Foreign::ForeignIterateBody(ObjectVisitor* v) {
v->VisitExternalReference(
- reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
+ reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
}
template<typename StaticVisitor>
void Foreign::ForeignIterateBody() {
StaticVisitor::VisitExternalReference(
- reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
+ reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
}
diff --git a/src/3rdparty/v8/src/objects-printer.cc b/src/3rdparty/v8/src/objects-printer.cc
index b788504..1ca97de 100644
--- a/src/3rdparty/v8/src/objects-printer.cc
+++ b/src/3rdparty/v8/src/objects-printer.cc
@@ -759,7 +759,7 @@ void Code::CodePrint(FILE* out) {
void Foreign::ForeignPrint(FILE* out) {
- PrintF(out, "foreign address : %p", address());
+ PrintF(out, "foreign address : %p", foreign_address());
}
diff --git a/src/3rdparty/v8/src/objects.cc b/src/3rdparty/v8/src/objects.cc
index 6a4eff9..b33e424 100644
--- a/src/3rdparty/v8/src/objects.cc
+++ b/src/3rdparty/v8/src/objects.cc
@@ -220,7 +220,7 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->address());
+ Foreign::cast(structure)->foreign_address());
MaybeObject* value = (callback->getter)(receiver, callback->data);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return value;
@@ -1980,10 +1980,9 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
MaybeObject* JSReceiver::SetProperty(String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool skip_fallback_interceptor) {
+ StrictModeFlag strict_mode) {
LookupResult result(GetIsolate());
- LocalLookup(name, &result, skip_fallback_interceptor);
+ LocalLookup(name, &result);
return SetProperty(&result, name, value, attributes, strict_mode);
}
@@ -2007,7 +2006,7 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->address());
+ Foreign::cast(structure)->foreign_address());
MaybeObject* obj = (callback->setter)(this, value, callback->data);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (obj->IsFailure()) return obj;
@@ -2149,47 +2148,35 @@ MaybeObject* JSObject::SetPropertyWithCallbackSetterInPrototypes(
bool* found,
StrictModeFlag strict_mode) {
Heap* heap = GetHeap();
- LookupResult result(heap->isolate());
- LookupCallbackSetterInPrototypes(name, &result);
- if (result.IsFound()) {
+ // We could not find a local property so let's check whether there is an
+ // accessor that wants to handle the property.
+ LookupResult accessor_result(heap->isolate());
+ LookupCallbackSetterInPrototypes(name, &accessor_result);
+ if (accessor_result.IsFound()) {
*found = true;
- if (result.type() == CALLBACKS) {
- return SetPropertyWithCallback(result.GetCallbackObject(),
+ if (accessor_result.type() == CALLBACKS) {
+ return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
name,
value,
- result.holder(),
+ accessor_result.holder(),
strict_mode);
- } else if (result.type() == HANDLER) {
- // We could not find a local property so let's check whether there is an
- // accessor that wants to handle the property.
- LookupResult accessor_result(heap->isolate());
- LookupCallbackSetterInPrototypes(name, &accessor_result);
- if (accessor_result.IsFound()) {
- if (accessor_result.type() == CALLBACKS) {
- return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
- name,
- value,
- accessor_result.holder(),
- strict_mode);
- } else if (accessor_result.type() == HANDLER) {
- // There is a proxy in the prototype chain. Invoke its
- // getPropertyDescriptor trap.
- bool found = false;
- // SetPropertyWithHandlerIfDefiningSetter can cause GC,
- // make sure to use the handlified references after calling
- // the function.
- Handle<JSObject> self(this);
- Handle<String> hname(name);
- Handle<Object> hvalue(value);
- MaybeObject* result =
- accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
- name, value, attributes, strict_mode, &found);
- if (found) return result;
- // The proxy does not define the property as an accessor.
- // Consequently, it has no effect on setting the receiver.
- return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
- }
- }
+ } else if (accessor_result.type() == HANDLER) {
+ // There is a proxy in the prototype chain. Invoke its
+ // getPropertyDescriptor trap.
+ bool found = false;
+ // SetPropertyWithHandlerIfDefiningSetter can cause GC,
+ // make sure to use the handlified references after calling
+ // the function.
+ Handle<JSObject> self(this);
+ Handle<String> hname(name);
+ Handle<Object> hvalue(value);
+ MaybeObject* result =
+ accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
+ name, value, attributes, strict_mode, &found);
+ if (found) return result;
+ // The proxy does not define the property as an accessor.
+ // Consequently, it has no effect on setting the receiver.
+ return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
}
}
*found = false;
@@ -4214,8 +4201,7 @@ AccessorDescriptor* Map::FindAccessor(String* name) {
}
-void JSReceiver::LocalLookup(String* name, LookupResult* result,
- bool skip_fallback_interceptor) {
+void JSReceiver::LocalLookup(String* name, LookupResult* result) {
ASSERT(name->IsString());
Heap* heap = GetHeap();
@@ -4247,31 +4233,23 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result,
}
// Check for lookup interceptor except when bootstrapping.
- bool wouldIntercept = js_object->HasNamedInterceptor() &&
- !heap->isolate()->bootstrapper()->IsActive();
- if (wouldIntercept && !map()->named_interceptor_is_fallback()) {
+ if (js_object->HasNamedInterceptor() &&
+ !heap->isolate()->bootstrapper()->IsActive()) {
result->InterceptorResult(js_object);
return;
}
js_object->LocalLookupRealNamedProperty(name, result);
-
- if (wouldIntercept && !skip_fallback_interceptor && !result->IsProperty() &&
- map()->named_interceptor_is_fallback()) {
- result->InterceptorResult(js_object);
- return;
- }
}
-void JSReceiver::Lookup(String* name, LookupResult* result,
- bool skip_fallback_interceptor) {
+void JSReceiver::Lookup(String* name, LookupResult* result) {
// Ecma-262 3rd 8.6.2.4
Heap* heap = GetHeap();
for (Object* current = this;
current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
- JSReceiver::cast(current)->LocalLookup(name, result, skip_fallback_interceptor);
+ JSReceiver::cast(current)->LocalLookup(name, result);
if (result->IsProperty()) return;
}
result->NotFound();
@@ -6726,71 +6704,6 @@ static inline bool CompareStringContentsPartial(Isolate* isolate,
}
-bool String::SlowEqualsExternal(uc16 *string, int length) {
- int len = this->length();
- if (len != length) return false;
- if (len == 0) return true;
-
- // We know the strings are both non-empty. Compare the first chars
- // before we try to flatten the strings.
- if (this->Get(0) != string[0]) return false;
-
- String* lhs = this->TryFlattenGetString();
-
- if (lhs->IsFlat()) {
- String::FlatContent lhs_content = lhs->GetFlatContent();
- if (lhs->IsAsciiRepresentation()) {
- Vector<const char> vec1 = lhs_content.ToAsciiVector();
- VectorIterator<char> buf1(vec1);
- VectorIterator<uc16> ib(string, length);
- return CompareStringContents(&buf1, &ib);
- } else {
- Vector<const uc16> vec1 = lhs_content.ToUC16Vector();
- Vector<const uc16> vec2(string, length);
- return CompareRawStringContents(vec1, vec2);
- }
- } else {
- Isolate* isolate = GetIsolate();
- isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
- VectorIterator<uc16> ib(string, length);
- return CompareStringContents(isolate->objects_string_compare_buffer_a(), &ib);
- }
-}
-
-
-bool String::SlowEqualsExternal(char *string, int length)
-{
- int len = this->length();
- if (len != length) return false;
- if (len == 0) return true;
-
- // We know the strings are both non-empty. Compare the first chars
- // before we try to flatten the strings.
- if (this->Get(0) != string[0]) return false;
-
- String* lhs = this->TryFlattenGetString();
-
- if (StringShape(lhs).IsSequentialAscii()) {
- const char* str1 = SeqAsciiString::cast(lhs)->GetChars();
- return CompareRawStringContents(Vector<const char>(str1, len),
- Vector<const char>(string, len));
- }
-
- if (lhs->IsFlat()) {
- String::FlatContent lhs_content = lhs->GetFlatContent();
- Vector<const uc16> vec1 = lhs_content.ToUC16Vector();
- VectorIterator<const uc16> buf1(vec1);
- VectorIterator<char> buf2(string, length);
- return CompareStringContents(&buf1, &buf2);
- } else {
- Isolate* isolate = GetIsolate();
- isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
- VectorIterator<char> ib(string, length);
- return CompareStringContents(isolate->objects_string_compare_buffer_a(), &ib);
- }
-}
-
-
bool String::SlowEquals(String* other) {
// Fast check: negative check with lengths.
int len = length();
@@ -7686,8 +7599,8 @@ void SharedFunctionInfo::DetachInitialMap() {
Map* map = reinterpret_cast<Map*>(initial_map());
// Make the map remember to restore the link if it survives the GC.
- map->set_bit_field3(
- map->bit_field3() | (1 << Map::kAttachedToSharedFunctionInfo));
+ map->set_bit_field2(
+ map->bit_field2() | (1 << Map::kAttachedToSharedFunctionInfo));
// Undo state changes made by StartInobjectTracking (except the
// construction_count). This way if the initial map does not survive the GC
@@ -7707,8 +7620,8 @@ void SharedFunctionInfo::DetachInitialMap() {
// Called from GC, hence reinterpret_cast and unchecked accessors.
void SharedFunctionInfo::AttachInitialMap(Map* map) {
- map->set_bit_field3(
- map->bit_field3() & ~(1 << Map::kAttachedToSharedFunctionInfo));
+ map->set_bit_field2(
+ map->bit_field2() & ~(1 << Map::kAttachedToSharedFunctionInfo));
// Resume inobject slack tracking.
set_initial_map(map);
@@ -10599,8 +10512,10 @@ class StringSharedKey : public HashTableKey {
FixedArray* pair = FixedArray::cast(other);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
if (shared != shared_) return false;
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
- Smi::cast(pair->get(2))->value());
+ int strict_unchecked = Smi::cast(pair->get(2))->value();
+ ASSERT(strict_unchecked == kStrictMode ||
+ strict_unchecked == kNonStrictMode);
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
if (strict_mode != strict_mode_) return false;
String* source = String::cast(pair->get(1));
return source->Equals(source_);
@@ -10632,8 +10547,10 @@ class StringSharedKey : public HashTableKey {
FixedArray* pair = FixedArray::cast(obj);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
String* source = String::cast(pair->get(1));
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
- Smi::cast(pair->get(2))->value());
+ int strict_unchecked = Smi::cast(pair->get(2))->value();
+ ASSERT(strict_unchecked == kStrictMode ||
+ strict_unchecked == kNonStrictMode);
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
return StringSharedHashHelper(source, shared, strict_mode);
}
@@ -10791,24 +10708,9 @@ class AsciiSymbolKey : public SequentialSymbolKey<char> {
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- MaybeObject *result = HEAP->AllocateAsciiSymbol(string_, hash_field_);
- if (!result->IsFailure() && result->ToObjectUnchecked()->IsSeqString()) {
- while (true) {
- Atomic32 my_symbol_id = next_symbol_id;
- if (my_symbol_id > Smi::kMaxValue)
- break;
- if (my_symbol_id == NoBarrier_CompareAndSwap(&next_symbol_id, my_symbol_id, my_symbol_id + 1)) {
- SeqString::cast(result->ToObjectUnchecked())->set_symbol_id(my_symbol_id);
- break;
- }
- }
- }
- return result;
+ return HEAP->AllocateAsciiSymbol(string_, hash_field_);
}
-
- static Atomic32 next_symbol_id;
};
-Atomic32 AsciiSymbolKey::next_symbol_id = 1;
class SubStringAsciiSymbolKey : public HashTableKey {
diff --git a/src/3rdparty/v8/src/objects.h b/src/3rdparty/v8/src/objects.h
index 3137437..3187467 100644
--- a/src/3rdparty/v8/src/objects.h
+++ b/src/3rdparty/v8/src/objects.h
@@ -1362,8 +1362,7 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT MaybeObject* SetProperty(String* key,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool skip_fallback_interceptor = false);
+ StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
String* key,
Object* value,
@@ -1415,8 +1414,8 @@ class JSReceiver: public HeapObject {
// Lookup a property. If found, the result is valid and has
// detailed information.
- void LocalLookup(String* name, LookupResult* result, bool skip_fallback_interceptor = false);
- void Lookup(String* name, LookupResult* result, bool skip_fallback_interceptor = false);
+ void LocalLookup(String* name, LookupResult* result);
+ void Lookup(String* name, LookupResult* result);
protected:
Smi* GenerateIdentityHash();
@@ -1760,9 +1759,6 @@ class JSObject: public JSReceiver {
inline Object* GetInternalField(int index);
inline void SetInternalField(int index, Object* value);
- inline void SetExternalResourceObject(Object *);
- inline Object *GetExternalResourceObject();
-
// The following lookup functions skip interceptors.
void LocalLookupRealNamedProperty(String* name, LookupResult* result);
void LookupRealNamedProperty(String* name, LookupResult* result);
@@ -3120,9 +3116,6 @@ class SerializedScopeInfo : public FixedArray {
// Is this scope a strict mode scope?
bool IsStrictMode();
- // Is this scope a qml mode scope?
- bool IsQmlMode();
-
// Return the number of stack slots for code.
int NumberOfStackSlots();
@@ -4177,11 +4170,11 @@ class Map: public HeapObject {
// Tells whether the instance has a call-as-function handler.
inline void set_has_instance_call_handler() {
- set_bit_field3(bit_field3() | (1 << kHasInstanceCallHandler));
+ set_bit_field(bit_field() | (1 << kHasInstanceCallHandler));
}
inline bool has_instance_call_handler() {
- return ((1 << kHasInstanceCallHandler) & bit_field3()) != 0;
+ return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
}
inline void set_is_extensible(bool value);
@@ -4249,20 +4242,6 @@ class Map: public HeapObject {
inline void set_is_access_check_needed(bool access_check_needed);
inline bool is_access_check_needed();
- // Whether the named interceptor is a fallback interceptor or not
- inline void set_named_interceptor_is_fallback(bool value);
- inline bool named_interceptor_is_fallback();
-
- // Tells whether the instance has the space for an external resource
- // object
- inline void set_has_external_resource(bool value);
- inline bool has_external_resource();
-
- // Tells whether the user object comparison callback should be used for
- // comparisons involving this object
- inline void set_use_user_object_comparison(bool value);
- inline bool use_user_object_comparison();
-
// [prototype]: implicit prototype object.
DECL_ACCESSORS(prototype, Object)
@@ -4503,14 +4482,14 @@ class Map: public HeapObject {
static const int kHasNamedInterceptor = 3;
static const int kHasIndexedInterceptor = 4;
static const int kIsUndetectable = 5;
- static const int kHasExternalResource = 6;
+ static const int kHasInstanceCallHandler = 6;
static const int kIsAccessCheckNeeded = 7;
// Bit positions for bit field 2
static const int kIsExtensible = 0;
static const int kFunctionWithPrototype = 1;
static const int kStringWrapperSafeForDefaultValueOf = 2;
- static const int kUseUserObjectComparison = 3;
+ static const int kAttachedToSharedFunctionInfo = 3;
// No bits can be used after kElementsKindFirstBit, they are all reserved for
// storing ElementKind.
static const int kElementsKindShift = 4;
@@ -4527,9 +4506,6 @@ class Map: public HeapObject {
// Bit positions for bit field 3
static const int kIsShared = 0;
- static const int kNamedInterceptorIsFallback = 1;
- static const int kHasInstanceCallHandler = 2;
- static const int kAttachedToSharedFunctionInfo = 3;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
@@ -4938,9 +4914,6 @@ class SharedFunctionInfo: public HeapObject {
inline StrictModeFlag strict_mode_flag();
inline void set_strict_mode_flag(StrictModeFlag strict_mode_flag);
- // Indicates whether the function is a qml mode function.
- DECL_BOOLEAN_ACCESSORS(qml_mode)
-
// False if the function definitely does not allocate an arguments object.
DECL_BOOLEAN_ACCESSORS(uses_arguments)
@@ -5162,7 +5135,6 @@ class SharedFunctionInfo: public HeapObject {
kCodeAgeShift,
kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
kStrictModeFunction,
- kQmlModeFunction,
kUsesArguments,
kHasDuplicateParameters,
kNative,
@@ -6229,9 +6201,6 @@ class String: public HeapObject {
bool IsAsciiEqualTo(Vector<const char> str);
bool IsTwoByteEqualTo(Vector<const uc16> str);
- bool SlowEqualsExternal(uc16 *string, int length);
- bool SlowEqualsExternal(char *string, int length);
-
// Return a UTF8 representation of the string. The string is null
// terminated but may optionally contain nulls. Length is returned
// in length_output if length_output is not a null pointer The string
@@ -6488,13 +6457,8 @@ class SeqString: public String {
// Casting.
static inline SeqString* cast(Object* obj);
- // Get and set the symbol id of the string
- inline int symbol_id();
- inline void set_symbol_id(int value);
-
// Layout description.
- static const int kSymbolIdOffset = String::kSize;
- static const int kHeaderSize = kSymbolIdOffset + kPointerSize;
+ static const int kHeaderSize = String::kSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
@@ -7210,8 +7174,8 @@ class JSWeakMap: public JSObject {
class Foreign: public HeapObject {
public:
// [address]: field containing the address.
- inline Address address();
- inline void set_address(Address value);
+ inline Address foreign_address();
+ inline void set_foreign_address(Address value);
// Casting.
static inline Foreign* cast(Object* obj);
@@ -7234,10 +7198,10 @@ class Foreign: public HeapObject {
// Layout description.
- static const int kAddressOffset = HeapObject::kHeaderSize;
- static const int kSize = kAddressOffset + kPointerSize;
+ static const int kForeignAddressOffset = HeapObject::kHeaderSize;
+ static const int kSize = kForeignAddressOffset + kPointerSize;
- STATIC_CHECK(kAddressOffset == Internals::kForeignAddressOffset);
+ STATIC_CHECK(kForeignAddressOffset == Internals::kForeignAddressOffset);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
@@ -7418,7 +7382,6 @@ class InterceptorInfo: public Struct {
DECL_ACCESSORS(deleter, Object)
DECL_ACCESSORS(enumerator, Object)
DECL_ACCESSORS(data, Object)
- DECL_ACCESSORS(is_fallback, Smi)
static inline InterceptorInfo* cast(Object* obj);
@@ -7438,8 +7401,7 @@ class InterceptorInfo: public Struct {
static const int kDeleterOffset = kQueryOffset + kPointerSize;
static const int kEnumeratorOffset = kDeleterOffset + kPointerSize;
static const int kDataOffset = kEnumeratorOffset + kPointerSize;
- static const int kFallbackOffset = kDataOffset + kPointerSize;
- static const int kSize = kFallbackOffset + kPointerSize;
+ static const int kSize = kDataOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
@@ -7561,8 +7523,6 @@ class ObjectTemplateInfo: public TemplateInfo {
public:
DECL_ACCESSORS(constructor, Object)
DECL_ACCESSORS(internal_field_count, Object)
- DECL_ACCESSORS(has_external_resource, Object)
- DECL_ACCESSORS(use_user_object_comparison, Object)
static inline ObjectTemplateInfo* cast(Object* obj);
@@ -7579,9 +7539,7 @@ class ObjectTemplateInfo: public TemplateInfo {
static const int kConstructorOffset = TemplateInfo::kHeaderSize;
static const int kInternalFieldCountOffset =
kConstructorOffset + kPointerSize;
- static const int kHasExternalResourceOffset = kInternalFieldCountOffset + kPointerSize;
- static const int kUseUserObjectComparisonOffset = kHasExternalResourceOffset + kPointerSize;
- static const int kSize = kUseUserObjectComparisonOffset + kPointerSize;
+ static const int kSize = kInternalFieldCountOffset + kPointerSize;
};
diff --git a/src/3rdparty/v8/src/parser.cc b/src/3rdparty/v8/src/parser.cc
index 357d340..0a635fc 100644
--- a/src/3rdparty/v8/src/parser.cc
+++ b/src/3rdparty/v8/src/parser.cc
@@ -607,8 +607,7 @@ Parser::Parser(Handle<Script> script,
FunctionLiteral* Parser::ParseProgram(Handle<String> source,
bool in_global_context,
- StrictModeFlag strict_mode,
- bool qml_mode) {
+ StrictModeFlag strict_mode) {
ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(isolate()->counters()->parse());
@@ -624,11 +623,11 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
ExternalTwoByteStringUC16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(source, in_global_context, strict_mode, qml_mode, &zone_scope);
+ return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
} else {
GenericStringUC16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(source, in_global_context, strict_mode, qml_mode, &zone_scope);
+ return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
}
}
@@ -636,7 +635,6 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
bool in_global_context,
StrictModeFlag strict_mode,
- bool qml_mode,
ZoneScope* zone_scope) {
ASSERT(top_scope_ == NULL);
ASSERT(target_stack_ == NULL);
@@ -656,9 +654,6 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
LexicalScope lexical_scope(this, scope, isolate());
ASSERT(top_scope_->strict_mode_flag() == kNonStrictMode);
top_scope_->SetStrictModeFlag(strict_mode);
- if (qml_mode) {
- scope->EnableQmlMode();
- }
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
bool ok = true;
int beg_loc = scanner().location().beg_pos;
@@ -752,10 +747,6 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
scope->strict_mode_flag() == info->strict_mode_flag());
ASSERT(info->strict_mode_flag() == shared_info->strict_mode_flag());
scope->SetStrictModeFlag(shared_info->strict_mode_flag());
- if (shared_info->qml_mode()) {
- top_scope_->EnableQmlMode();
- }
-
FunctionLiteral::Type type = shared_info->is_expression()
? (shared_info->is_anonymous()
? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -1865,11 +1856,6 @@ Block* Parser::ParseVariableDeclarations(
arguments->Add(value);
value = NULL; // zap the value to avoid the unnecessary assignment
- int qml_mode = 0;
- if (top_scope_->is_qml_mode() && !Isolate::Current()->global()->HasProperty(*name))
- qml_mode = 1;
- arguments->Add(NewNumberLiteral(qml_mode));
-
// Construct the call to Runtime_InitializeConstGlobal
// and add it to the initialization statement block.
// Note that the function does different things depending on
@@ -1886,11 +1872,6 @@ Block* Parser::ParseVariableDeclarations(
StrictModeFlag flag = initialization_scope->strict_mode_flag();
arguments->Add(NewNumberLiteral(flag));
- int qml_mode = 0;
- if (top_scope_->is_qml_mode() && !Isolate::Current()->global()->HasProperty(*name))
- qml_mode = 1;
- arguments->Add(NewNumberLiteral(qml_mode));
-
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
// necessarily be stored in the global object in that case,
@@ -2479,7 +2460,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// implementing stack allocated block scoped variables.
Variable* temp = top_scope_->DeclarationScope()->NewTemporary(name);
VariableProxy* temp_proxy = new(zone()) VariableProxy(isolate(), temp);
- VariableProxy* each = top_scope_->NewUnresolved(name, inside_with());
+ VariableProxy* each = top_scope_->NewUnresolved(name);
ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
Target target(&this->target_stack_, loop);
@@ -2951,23 +2932,14 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// Keep track of eval() calls since they disable all local variable
// optimizations.
// The calls that need special treatment are the
- // direct (i.e. not aliased) eval calls. These calls are all of the
- // form eval(...) with no explicit receiver object where eval is not
- // declared in the current scope chain.
+ // direct eval calls. These calls are all of the form eval(...), with
+ // no explicit receiver.
// These calls are marked as potentially direct eval calls. Whether
// they are actually direct calls to eval is determined at run time.
- // TODO(994): In ES5, it doesn't matter if the "eval" var is declared
- // in the local scope chain. It only matters that it's called "eval",
- // is called without a receiver and it refers to the original eval
- // function.
VariableProxy* callee = result->AsVariableProxy();
if (callee != NULL &&
callee->IsVariable(isolate()->factory()->eval_symbol())) {
- Handle<String> name = callee->name();
- Variable* var = top_scope_->Lookup(name);
- if (var == NULL) {
- top_scope_->DeclarationScope()->RecordEvalCall();
- }
+ top_scope_->DeclarationScope()->RecordEvalCall();
}
result = NewCall(result, args, pos);
break;
@@ -5330,7 +5302,7 @@ static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
int flags,
ParserRecorder* recorder) {
Isolate* isolate = Isolate::Current();
- JavaScriptScanner scanner(isolate->unicode_cache());
+ Scanner scanner(isolate->unicode_cache());
scanner.SetHarmonyScoping((flags & kHarmonyScoping) != 0);
scanner.Initialize(source);
intptr_t stack_limit = isolate->stack_guard()->real_climit();
@@ -5437,8 +5409,7 @@ bool ParserApi::Parse(CompilationInfo* info) {
Handle<String> source = Handle<String>(String::cast(script->source()));
result = parser.ParseProgram(source,
info->is_global(),
- info->strict_mode_flag(),
- info->is_qml_mode());
+ info->strict_mode_flag());
}
}
info->SetFunction(result);
diff --git a/src/3rdparty/v8/src/parser.h b/src/3rdparty/v8/src/parser.h
index a60951d..9624301 100644
--- a/src/3rdparty/v8/src/parser.h
+++ b/src/3rdparty/v8/src/parser.h
@@ -77,6 +77,8 @@ class FunctionEntry BASE_EMBEDDED {
int literal_count() { return backing_[kLiteralCountOffset]; }
int property_count() { return backing_[kPropertyCountOffset]; }
StrictModeFlag strict_mode_flag() {
+ ASSERT(backing_[kStrictModeOffset] == kStrictMode ||
+ backing_[kStrictModeOffset] == kNonStrictMode);
return static_cast<StrictModeFlag>(backing_[kStrictModeOffset]);
}
@@ -430,8 +432,7 @@ class Parser {
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram(Handle<String> source,
bool in_global_context,
- StrictModeFlag strict_mode,
- bool qml_mode = false);
+ StrictModeFlag strict_mode);
FunctionLiteral* ParseLazy(CompilationInfo* info);
@@ -477,7 +478,6 @@ class Parser {
FunctionLiteral* DoParseProgram(Handle<String> source,
bool in_global_context,
StrictModeFlag strict_mode,
- bool qml_mode,
ZoneScope* zone_scope);
// Report syntax error
@@ -486,7 +486,7 @@ class Parser {
void ReportMessage(const char* message, Vector<const char*> args);
bool inside_with() const { return top_scope_->inside_with(); }
- JavaScriptScanner& scanner() { return scanner_; }
+ Scanner& scanner() { return scanner_; }
Mode mode() const { return mode_; }
ScriptDataImpl* pre_data() const { return pre_data_; }
@@ -726,7 +726,7 @@ class Parser {
ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
- JavaScriptScanner scanner_;
+ Scanner scanner_;
Scope* top_scope_;
diff --git a/src/3rdparty/v8/src/platform-linux.cc b/src/3rdparty/v8/src/platform-linux.cc
index 451f6fc..90f45dd 100644
--- a/src/3rdparty/v8/src/platform-linux.cc
+++ b/src/3rdparty/v8/src/platform-linux.cc
@@ -46,7 +46,7 @@
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
+#ifdef __GLIBC__
#include <execinfo.h> // backtrace, backtrace_symbols
#endif // def __GLIBC__
#include <strings.h> // index
@@ -553,7 +553,7 @@ void OS::SignalCodeMovingGC() {
int OS::StackWalk(Vector<OS::StackFrame> frames) {
// backtrace is a glibc extension.
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
+#ifdef __GLIBC__
int frames_size = frames.length();
ScopedVector<void*> addresses(frames_size);
diff --git a/src/3rdparty/v8/src/preparser-api.cc b/src/3rdparty/v8/src/preparser-api.cc
index 25c7a82..1bca9a3 100644
--- a/src/3rdparty/v8/src/preparser-api.cc
+++ b/src/3rdparty/v8/src/preparser-api.cc
@@ -182,7 +182,7 @@ PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
internal::InputStreamUTF16Buffer buffer(input);
uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
internal::UnicodeCache unicode_cache;
- internal::JavaScriptScanner scanner(&unicode_cache);
+ internal::Scanner scanner(&unicode_cache);
scanner.Initialize(&buffer);
internal::CompleteParserRecorder recorder;
preparser::PreParser::PreParseResult result =
diff --git a/src/3rdparty/v8/src/preparser.cc b/src/3rdparty/v8/src/preparser.cc
index b1628eb..291dfd3 100644
--- a/src/3rdparty/v8/src/preparser.cc
+++ b/src/3rdparty/v8/src/preparser.cc
@@ -72,7 +72,7 @@ void PreParser::ReportUnexpectedToken(i::Token::Value token) {
if (token == i::Token::ILLEGAL && stack_overflow_) {
return;
}
- i::JavaScriptScanner::Location source_location = scanner_->location();
+ i::Scanner::Location source_location = scanner_->location();
// Four of the tokens are treated specially
switch (token) {
@@ -647,7 +647,7 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
Expect(i::Token::THROW, CHECK_OK);
if (scanner_->HasAnyLineTerminatorBeforeNext()) {
- i::JavaScriptScanner::Location pos = scanner_->location();
+ i::Scanner::Location pos = scanner_->location();
ReportMessageAt(pos, "newline_after_throw", NULL);
*ok = false;
return Statement::Default();
diff --git a/src/3rdparty/v8/src/preparser.h b/src/3rdparty/v8/src/preparser.h
index 45e81e9..647a142 100644
--- a/src/3rdparty/v8/src/preparser.h
+++ b/src/3rdparty/v8/src/preparser.h
@@ -116,7 +116,7 @@ class PreParser {
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
- static PreParseResult PreParseProgram(i::JavaScriptScanner* scanner,
+ static PreParseResult PreParseProgram(i::Scanner* scanner,
i::ParserRecorder* log,
int flags,
uintptr_t stack_limit) {
@@ -449,7 +449,7 @@ class PreParser {
};
// Private constructor only used in PreParseProgram.
- PreParser(i::JavaScriptScanner* scanner,
+ PreParser(i::Scanner* scanner,
i::ParserRecorder* log,
uintptr_t stack_limit,
bool allow_lazy,
@@ -619,7 +619,7 @@ class PreParser {
Identifier identifier,
bool* ok);
- i::JavaScriptScanner* scanner_;
+ i::Scanner* scanner_;
i::ParserRecorder* log_;
Scope* scope_;
uintptr_t stack_limit_;
diff --git a/src/3rdparty/v8/src/prettyprinter.cc b/src/3rdparty/v8/src/prettyprinter.cc
index 73812fd..37c76ce 100644
--- a/src/3rdparty/v8/src/prettyprinter.cc
+++ b/src/3rdparty/v8/src/prettyprinter.cc
@@ -618,9 +618,6 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
EmbeddedVector<char, 256> buf;
int pos = OS::SNPrintF(buf, "%s (mode = %s", info,
Variable::Mode2String(var->mode()));
- if (var->is_qml_global()) {
- pos += OS::SNPrintF(buf + pos, ":QML");
- }
OS::SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true);
}
diff --git a/src/3rdparty/v8/src/runtime.cc b/src/3rdparty/v8/src/runtime.cc
index 939bc89..f2e23f4 100644
--- a/src/3rdparty/v8/src/runtime.cc
+++ b/src/3rdparty/v8/src/runtime.cc
@@ -106,6 +106,16 @@ namespace internal {
type name = NumberTo##Type(obj);
+// Assert that the given argument has a valid value for a StrictModeFlag
+// and store it in a StrictModeFlag variable with the given name.
+#define CONVERT_STRICT_MODE_ARG(name, index) \
+ ASSERT(args[index]->IsSmi()); \
+ ASSERT(args.smi_at(index) == kStrictMode || \
+ args.smi_at(index) == kNonStrictMode); \
+ StrictModeFlag name = \
+ static_cast<StrictModeFlag>(args.smi_at(index));
+
+
MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
JSObject* boilerplate) {
StackLimitCheck check(isolate);
@@ -1307,23 +1317,19 @@ static Failure* ThrowRedeclarationError(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
ASSERT(args.length() == 3);
HandleScope scope(isolate);
+ Handle<GlobalObject> global = Handle<GlobalObject>(
+ isolate->context()->global());
Handle<Context> context = args.at<Context>(0);
CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
CONVERT_SMI_ARG_CHECKED(flags, 2);
- Handle<JSObject> js_global = Handle<JSObject>(isolate->context()->global());
- Handle<JSObject> qml_global = Handle<JSObject>(isolate->context()->qml_global());
-
// Traverse the name/value pairs and set the properties.
int length = pairs->length();
- for (int i = 0; i < length; i += 3) {
+ for (int i = 0; i < length; i += 2) {
HandleScope scope(isolate);
Handle<String> name(String::cast(pairs->get(i)));
Handle<Object> value(pairs->get(i + 1), isolate);
- Handle<Smi> is_qml_global(Smi::cast(pairs->get(i + 2)));
-
- Handle<JSObject> global = is_qml_global->value()?qml_global:js_global;
// We have to declare a global const property. To capture we only
// assign to it when evaluating the assignment for "const x =
@@ -1334,7 +1340,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
// Lookup the property in the global object, and don't set the
// value of the variable if the property is already there.
LookupResult lookup(isolate);
- global->Lookup(*name, &lookup, true);
+ global->Lookup(*name, &lookup);
if (lookup.IsProperty()) {
// We found an existing property. Unless it was an interceptor
// that claims the property is absent, skip this declaration.
@@ -1361,7 +1367,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
}
LookupResult lookup(isolate);
- global->LocalLookup(*name, &lookup, true);
+ global->LocalLookup(*name, &lookup);
// Compute the property attributes. According to ECMA-262, section
// 13, page 71, the property must be read-only and
@@ -1402,8 +1408,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
name,
value,
static_cast<PropertyAttributes>(attr),
- strict_mode,
- true));
+ strict_mode));
}
}
@@ -1510,23 +1515,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
NoHandleAllocation nha;
// args[0] == name
// args[1] == strict_mode
- // args[2] == qml_mode
- // args[3] == value (optional)
+ // args[2] == value (optional)
// Determine if we need to assign to the variable if it already
// exists (based on the number of arguments).
- RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
- bool assign = args.length() == 4;
+ RUNTIME_ASSERT(args.length() == 2 || args.length() == 3);
+ bool assign = args.length() == 3;
CONVERT_ARG_CHECKED(String, name, 0);
+ GlobalObject* global = isolate->context()->global();
RUNTIME_ASSERT(args[1]->IsSmi());
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(1));
- ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
-
- RUNTIME_ASSERT(args[2]->IsSmi());
- int qml_mode = Smi::cast(args[2])->value();
-
- JSObject* global = qml_mode?isolate->context()->qml_global():isolate->context()->global();
+ CONVERT_STRICT_MODE_ARG(strict_mode, 1);
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable.
@@ -1544,7 +1543,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
while (object->IsJSObject() &&
JSObject::cast(object)->map()->is_hidden_prototype()) {
JSObject* raw_holder = JSObject::cast(object);
- raw_holder->LocalLookup(*name, &lookup, true);
+ raw_holder->LocalLookup(*name, &lookup);
if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
HandleScope handle_scope(isolate);
Handle<JSObject> holder(raw_holder);
@@ -1555,7 +1554,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
// Found an interceptor that's not read only.
if (assign) {
return raw_holder->SetProperty(
- &lookup, *name, args[3], attributes, strict_mode);
+ &lookup, *name, args[2], attributes, strict_mode);
} else {
return isolate->heap()->undefined_value();
}
@@ -1565,9 +1564,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
}
// Reload global in case the loop above performed a GC.
- global = qml_mode?isolate->context()->qml_global():isolate->context()->global();
+ global = isolate->context()->global();
if (assign) {
- return global->SetProperty(*name, args[3], attributes, strict_mode, true);
+ return global->SetProperty(*name, args[2], attributes, strict_mode);
}
return isolate->heap()->undefined_value();
}
@@ -1577,15 +1576,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// All constants are declared with an initial value. The name
// of the constant is the first argument and the initial value
// is the second.
- RUNTIME_ASSERT(args.length() == 3);
+ RUNTIME_ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, name, 0);
Handle<Object> value = args.at<Object>(1);
- RUNTIME_ASSERT(args[2]->IsSmi());
- int qml_mode = Smi::cast(args[2])->value();
-
// Get the current global object from top.
- JSObject* global = qml_mode?isolate->context()->qml_global():isolate->context()->global();
+ GlobalObject* global = isolate->context()->global();
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable. Since it's a const, it must be READ_ONLY too.
@@ -1609,7 +1605,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// Restore global object from context (in case of GC) and continue
// with setting the value.
HandleScope handle_scope(isolate);
- Handle<JSObject> global(qml_mode?isolate->context()->qml_global():isolate->context()->global());
+ Handle<GlobalObject> global(isolate->context()->global());
// BUG 1213575: Handle the case where we have to set a read-only
// property through an interceptor and only do it if it's
@@ -4184,6 +4180,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
}
+MaybeObject* TransitionElements(Handle<Object> object,
+ ElementsKind to_kind,
+ Isolate* isolate) {
+ HandleScope scope(isolate);
+ if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
+ ElementsKind from_kind =
+ Handle<JSObject>::cast(object)->map()->elements_kind();
+ if (Map::IsValidElementsTransition(from_kind, to_kind)) {
+ Handle<Object> result =
+ TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
+ if (result.is_null()) return isolate->ThrowIllegalOperation();
+ return *result;
+ }
+ return isolate->ThrowIllegalOperation();
+}
+
+
// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
NoHandleAllocation ha;
@@ -4200,40 +4213,63 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
//
// Additionally, we need to make sure that we do not cache results
// for objects that require access checks.
- if (args[0]->IsJSObject() &&
- !args[0]->IsJSGlobalProxy() &&
- !args[0]->IsAccessCheckNeeded() &&
- args[1]->IsString()) {
- JSObject* receiver = JSObject::cast(args[0]);
- String* key = String::cast(args[1]);
- if (receiver->HasFastProperties()) {
- // Attempt to use lookup cache.
- Map* receiver_map = receiver->map();
- KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
- int offset = keyed_lookup_cache->Lookup(receiver_map, key);
- if (offset != -1) {
- Object* value = receiver->FastPropertyAt(offset);
- return value->IsTheHole() ? isolate->heap()->undefined_value() : value;
- }
- // Lookup cache miss. Perform lookup and update the cache if appropriate.
- LookupResult result(isolate);
- receiver->LocalLookup(key, &result);
- if (result.IsProperty() && result.type() == FIELD) {
- int offset = result.GetFieldIndex();
- keyed_lookup_cache->Update(receiver_map, key, offset);
- return receiver->FastPropertyAt(offset);
+ if (args[0]->IsJSObject()) {
+ if (!args[0]->IsJSGlobalProxy() &&
+ !args[0]->IsAccessCheckNeeded() &&
+ args[1]->IsString()) {
+ JSObject* receiver = JSObject::cast(args[0]);
+ String* key = String::cast(args[1]);
+ if (receiver->HasFastProperties()) {
+ // Attempt to use lookup cache.
+ Map* receiver_map = receiver->map();
+ KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
+ int offset = keyed_lookup_cache->Lookup(receiver_map, key);
+ if (offset != -1) {
+ Object* value = receiver->FastPropertyAt(offset);
+ return value->IsTheHole()
+ ? isolate->heap()->undefined_value()
+ : value;
+ }
+ // Lookup cache miss. Perform lookup and update the cache if
+ // appropriate.
+ LookupResult result(isolate);
+ receiver->LocalLookup(key, &result);
+ if (result.IsProperty() && result.type() == FIELD) {
+ int offset = result.GetFieldIndex();
+ keyed_lookup_cache->Update(receiver_map, key, offset);
+ return receiver->FastPropertyAt(offset);
+ }
+ } else {
+ // Attempt dictionary lookup.
+ StringDictionary* dictionary = receiver->property_dictionary();
+ int entry = dictionary->FindEntry(key);
+ if ((entry != StringDictionary::kNotFound) &&
+ (dictionary->DetailsAt(entry).type() == NORMAL)) {
+ Object* value = dictionary->ValueAt(entry);
+ if (!receiver->IsGlobalObject()) return value;
+ value = JSGlobalPropertyCell::cast(value)->value();
+ if (!value->IsTheHole()) return value;
+ // If value is the hole do the general lookup.
+ }
}
- } else {
- // Attempt dictionary lookup.
- StringDictionary* dictionary = receiver->property_dictionary();
- int entry = dictionary->FindEntry(key);
- if ((entry != StringDictionary::kNotFound) &&
- (dictionary->DetailsAt(entry).type() == NORMAL)) {
- Object* value = dictionary->ValueAt(entry);
- if (!receiver->IsGlobalObject()) return value;
- value = JSGlobalPropertyCell::cast(value)->value();
- if (!value->IsTheHole()) return value;
- // If value is the hole do the general lookup.
+ } else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) {
+ // JSObject without a string key. If the key is a Smi, check for a
+ // definite out-of-bounds access to elements, which is a strong indicator
+ // that subsequent accesses will also call the runtime. Proactively
+ // transition elements to FAST_ELEMENTS to avoid excessive boxing of
+ // doubles for those future calls in the case that the elements would
+ // become FAST_DOUBLE_ELEMENTS.
+ Handle<JSObject> js_object(args.at<JSObject>(0));
+ ElementsKind elements_kind = js_object->GetElementsKind();
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements_kind == FAST_DOUBLE_ELEMENTS) {
+ FixedArrayBase* elements = js_object->elements();
+ if (args.at<Smi>(1)->value() >= elements->length()) {
+ MaybeObject* maybe_object = TransitionElements(js_object,
+ FAST_ELEMENTS,
+ isolate);
+ if (maybe_object->IsFailure()) return maybe_object;
+ }
}
}
} else if (args[0]->IsString() && args[1]->IsSmi()) {
@@ -4607,10 +4643,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
StrictModeFlag strict_mode = kNonStrictMode;
if (args.length() == 5) {
- CONVERT_SMI_ARG_CHECKED(strict_unchecked, 4);
- RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
- strict_unchecked == kNonStrictMode);
- strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
+ CONVERT_STRICT_MODE_ARG(strict_mode_flag, 4);
+ strict_mode = strict_mode_flag;
}
return Runtime::SetObjectProperty(isolate,
@@ -4622,23 +4656,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
}
-MaybeObject* TransitionElements(Handle<Object> object,
- ElementsKind to_kind,
- Isolate* isolate) {
- HandleScope scope(isolate);
- if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
- ElementsKind from_kind =
- Handle<JSObject>::cast(object)->map()->elements_kind();
- if (Map::IsValidElementsTransition(from_kind, to_kind)) {
- Handle<Object> result =
- TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
- if (result.is_null()) return isolate->ThrowIllegalOperation();
- return *result;
- }
- return isolate->ThrowIllegalOperation();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 1);
@@ -4672,6 +4689,44 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
+ RUNTIME_ASSERT(args.length() == 5);
+ CONVERT_ARG_CHECKED(JSObject, object, 0);
+ CONVERT_SMI_ARG_CHECKED(store_index, 1);
+ Handle<Object> value = args.at<Object>(2);
+ CONVERT_ARG_CHECKED(FixedArray, literals, 3);
+ CONVERT_SMI_ARG_CHECKED(literal_index, 4);
+ HandleScope scope;
+
+ Object* raw_boilerplate_object = literals->get(literal_index);
+ Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
+#if DEBUG
+ ElementsKind elements_kind = object->GetElementsKind();
+#endif
+ ASSERT(elements_kind <= FAST_DOUBLE_ELEMENTS);
+ // Smis should never trigger transitions.
+ ASSERT(!value->IsSmi());
+
+ if (value->IsNumber()) {
+ ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
+ TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+ ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+ FixedDoubleArray* double_array =
+ FixedDoubleArray::cast(object->elements());
+ HeapNumber* number = HeapNumber::cast(*value);
+ double_array->set(store_index, number->Number());
+ } else {
+ ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements_kind == FAST_DOUBLE_ELEMENTS);
+ TransitionElementsKind(object, FAST_ELEMENTS);
+ FixedArray* object_array =
+ FixedArray::cast(object->elements());
+ object_array->set(store_index, *value);
+ }
+ return *object;
+}
+
+
// Set a local property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
@@ -7095,29 +7150,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_UserObjectEquals) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(JSObject, lhs, args[1]);
- CONVERT_CHECKED(JSObject, rhs, args[0]);
-
- bool result;
-
- v8::UserObjectComparisonCallback callback = isolate->UserObjectComparisonCallback();
- if (callback) {
- HandleScope scope(isolate);
- Handle<JSObject> lhs_handle(lhs);
- Handle<JSObject> rhs_handle(rhs);
- result = callback(v8::Utils::ToLocal(lhs_handle), v8::Utils::ToLocal(rhs_handle));
- } else {
- result = (lhs == rhs);
- }
-
- return Smi::FromInt(result?0:1);
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
@@ -7526,7 +7558,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
}
-static int MakeDay(int year, int month, int day) {
+static int MakeDay(int year, int month) {
static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
181, 212, 243, 273, 304, 334};
static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
@@ -7563,23 +7595,22 @@ static int MakeDay(int year, int month, int day) {
year1 / 400 -
base_day;
- if (year % 4 || (year % 100 == 0 && year % 400 != 0)) {
- return day_from_year + day_from_month[month] + day - 1;
+ if ((year % 4 != 0) || (year % 100 == 0 && year % 400 != 0)) {
+ return day_from_year + day_from_month[month];
}
- return day_from_year + day_from_month_leap[month] + day - 1;
+ return day_from_year + day_from_month_leap[month];
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
NoHandleAllocation ha;
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(year, 0);
CONVERT_SMI_ARG_CHECKED(month, 1);
- CONVERT_SMI_ARG_CHECKED(date, 2);
- return Smi::FromInt(MakeDay(year, month, date));
+ return Smi::FromInt(MakeDay(year, month));
}
@@ -7808,7 +7839,7 @@ static inline void DateYMDFromTimeAfter1970(int date,
month = kMonthInYear[date];
day = kDayInYear[date];
- ASSERT(MakeDay(year, month, day) == save_date);
+ ASSERT(MakeDay(year, month) + day - 1 == save_date);
}
@@ -7822,7 +7853,7 @@ static inline void DateYMDFromTimeSlow(int date,
year = 400 * (date / kDaysIn400Years) - kYearsOffset;
date %= kDaysIn400Years;
- ASSERT(MakeDay(year, 0, 1) + date == save_date);
+ ASSERT(MakeDay(year, 0) + date == save_date);
date--;
int yd1 = date / kDaysIn100Years;
@@ -7845,8 +7876,8 @@ static inline void DateYMDFromTimeSlow(int date,
ASSERT(is_leap || (date >= 0));
ASSERT((date < 365) || (is_leap && (date < 366)));
ASSERT(is_leap == ((year % 4 == 0) && (year % 100 || (year % 400 == 0))));
- ASSERT(is_leap || ((MakeDay(year, 0, 1) + date) == save_date));
- ASSERT(!is_leap || ((MakeDay(year, 0, 1) + date + 1) == save_date));
+ ASSERT(is_leap || ((MakeDay(year, 0) + date) == save_date));
+ ASSERT(!is_leap || ((MakeDay(year, 0) + date + 1) == save_date));
if (is_leap) {
day = kDayInYear[2*365 + 1 + date];
@@ -7856,7 +7887,7 @@ static inline void DateYMDFromTimeSlow(int date,
month = kMonthInYear[date];
}
- ASSERT(MakeDay(year, month, day) == save_date);
+ ASSERT(MakeDay(year, month) + day - 1 == save_date);
}
@@ -7882,19 +7913,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateYMDFromTime) {
FixedArrayBase* elms_base = FixedArrayBase::cast(res_array->elements());
RUNTIME_ASSERT(elms_base->length() == 3);
- RUNTIME_ASSERT(res_array->GetElementsKind() <= FAST_DOUBLE_ELEMENTS);
+ RUNTIME_ASSERT(res_array->HasFastTypeElements());
- if (res_array->HasFastDoubleElements()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(res_array->elements());
- elms->set(0, year);
- elms->set(1, month);
- elms->set(2, day);
- } else {
- FixedArray* elms = FixedArray::cast(res_array->elements());
- elms->set(0, Smi::FromInt(year));
- elms->set(1, Smi::FromInt(month));
- elms->set(2, Smi::FromInt(day));
- }
+ MaybeObject* maybe = res_array->EnsureWritableFastElements();
+ if (maybe->IsFailure()) return maybe;
+ FixedArray* elms = FixedArray::cast(res_array->elements());
+ elms->set(0, Smi::FromInt(year));
+ elms->set(1, Smi::FromInt(month));
+ elms->set(2, Smi::FromInt(day));
return isolate->heap()->undefined_value();
}
@@ -8221,13 +8247,9 @@ static void TrySettingInlineConstructStub(Isolate* isolate,
prototype = Handle<Object>(function->instance_prototype(), isolate);
}
if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
- HandleScope scope(isolate);
ConstructStubCompiler compiler(isolate);
- MaybeObject* code = compiler.CompileConstructStub(*function);
- if (!code->IsFailure()) {
- function->shared()->set_construct_stub(
- Code::cast(code->ToObjectUnchecked()));
- }
+ Handle<Code> code = compiler.CompileConstructStub(function);
+ function->shared()->set_construct_stub(*code);
}
}
@@ -9048,10 +9070,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
Handle<Object> value(args[0], isolate);
CONVERT_ARG_CHECKED(Context, context, 1);
CONVERT_ARG_CHECKED(String, name, 2);
- CONVERT_SMI_ARG_CHECKED(strict_unchecked, 3);
- RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
- strict_unchecked == kNonStrictMode);
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
+ CONVERT_STRICT_MODE_ARG(strict_mode, 3);
int index;
PropertyAttributes attributes;
@@ -9421,8 +9440,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
context,
true,
- kNonStrictMode,
- false);
+ kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
@@ -9435,8 +9453,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
static ObjectPair CompileGlobalEval(Isolate* isolate,
Handle<String> source,
Handle<Object> receiver,
- StrictModeFlag strict_mode,
- bool qml_mode) {
+ StrictModeFlag strict_mode) {
Handle<Context> context = Handle<Context>(isolate->context());
Handle<Context> global_context = Handle<Context>(context->global_context());
@@ -9454,8 +9471,7 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
source,
Handle<Context>(isolate->context()),
context->IsGlobalContext(),
- strict_mode,
- qml_mode);
+ strict_mode);
if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
Handle<JSFunction> compiled =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -9465,98 +9481,26 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
- ASSERT(args.length() == 5);
-
- HandleScope scope(isolate);
- Handle<Object> callee = args.at<Object>(0);
- Handle<Object> receiver; // Will be overwritten.
-
- // Compute the calling context.
- Handle<Context> context = Handle<Context>(isolate->context(), isolate);
-#ifdef DEBUG
- // Make sure Isolate::context() agrees with the old code that traversed
- // the stack frames to compute the context.
- StackFrameLocator locator;
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- ASSERT(Context::cast(frame->context()) == *context);
-#endif
-
- // Find where the 'eval' symbol is bound. It is unaliased only if
- // it is bound in the global context.
- int index = -1;
- PropertyAttributes attributes = ABSENT;
- BindingFlags binding_flags;
- while (true) {
- // Don't follow context chains in Context::Lookup and implement the loop
- // up the context chain here, so that we can know the context where eval
- // was found.
- receiver = context->Lookup(isolate->factory()->eval_symbol(),
- FOLLOW_PROTOTYPE_CHAIN,
- &index,
- &attributes,
- &binding_flags);
- // Stop search when eval is found or when the global context is
- // reached.
- if (attributes != ABSENT || context->IsGlobalContext()) break;
- context = Handle<Context>(context->previous(), isolate);
- }
-
- // If eval could not be resolved, it has been deleted and we need to
- // throw a reference error.
- if (attributes == ABSENT) {
- Handle<Object> name = isolate->factory()->eval_symbol();
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return MakePair(isolate->Throw(*reference_error), NULL);
- }
-
- if (!context->IsGlobalContext()) {
- // 'eval' is not bound in the global context. Just call the function
- // with the given arguments. This is not necessarily the global eval.
- if (receiver->IsContext() || receiver->IsJSContextExtensionObject()) {
- receiver = isolate->factory()->the_hole_value();
- }
- return MakePair(*callee, *receiver);
- }
-
- // 'eval' is bound in the global context, but it may have been overwritten.
- // Compare it to the builtin 'GlobalEval' function to make sure.
- if (*callee != isolate->global_context()->global_eval_fun() ||
- !args[1]->IsString()) {
- return MakePair(*callee, isolate->heap()->the_hole_value());
- }
-
- ASSERT(args[3]->IsSmi());
- ASSERT(args[4]->IsSmi());
- return CompileGlobalEval(isolate,
- args.at<String>(1),
- args.at<Object>(2),
- static_cast<StrictModeFlag>(args.smi_at(3)),
- Smi::cast(args[4])->value());
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEvalNoLookup) {
- ASSERT(args.length() == 5);
+ ASSERT(args.length() == 4);
HandleScope scope(isolate);
Handle<Object> callee = args.at<Object>(0);
- // 'eval' is bound in the global context, but it may have been overwritten.
- // Compare it to the builtin 'GlobalEval' function to make sure.
+ // If "eval" didn't refer to the original GlobalEval, it's not a
+ // direct call to eval.
+ // (And even if it is, but the first argument isn't a string, just let
+ // execution default to an indirect call to eval, which will also return
+ // the first argument without doing anything).
if (*callee != isolate->global_context()->global_eval_fun() ||
!args[1]->IsString()) {
return MakePair(*callee, isolate->heap()->the_hole_value());
}
- ASSERT(args[3]->IsSmi());
- ASSERT(args[4]->IsSmi());
+ CONVERT_STRICT_MODE_ARG(strict_mode, 3);
return CompileGlobalEval(isolate,
args.at<String>(1),
args.at<Object>(2),
- static_cast<StrictModeFlag>(args.smi_at(3)),
- Smi::cast(args[4])->value());
+ strict_mode);
}
@@ -10742,6 +10686,18 @@ static const int kFrameDetailsAtReturnIndex = 7;
static const int kFrameDetailsFlagsIndex = 8;
static const int kFrameDetailsFirstDynamicIndex = 9;
+
+static SaveContext* FindSavedContextForFrame(Isolate* isolate,
+ JavaScriptFrame* frame) {
+ SaveContext* save = isolate->save_context();
+ while (save != NULL && !save->IsBelowFrame(frame)) {
+ save = save->prev();
+ }
+ ASSERT(save != NULL);
+ return save;
+}
+
+
// Return an array with frame details
// args[0]: number: break id
// args[1]: number: frame index
@@ -10797,11 +10753,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Traverse the saved contexts chain to find the active context for the
// selected frame.
- SaveContext* save = isolate->save_context();
- while (save != NULL && !save->below(it.frame())) {
- save = save->prev();
- }
- ASSERT(save != NULL);
+ SaveContext* save = FindSavedContextForFrame(isolate, it.frame());
// Get the frame id.
Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
@@ -11266,22 +11218,14 @@ class ScopeIterator {
// Global code
CompilationInfo info(script);
info.MarkAsGlobal();
- if (shared_info->qml_mode())
- info.MarkAsQmlMode();
- bool result = ParserApi::Parse(&info);
- ASSERT(result);
- result = Scope::Analyze(&info);
- ASSERT(result);
+ CHECK(ParserApi::Parse(&info));
+ CHECK(Scope::Analyze(&info));
scope = info.function()->scope();
} else {
// Function code
CompilationInfo info(shared_info);
- if (shared_info->qml_mode())
- info.MarkAsQmlMode();
- bool result = ParserApi::Parse(&info);
- ASSERT(result);
- result = Scope::Analyze(&info);
- ASSERT(result);
+ CHECK(ParserApi::Parse(&info));
+ CHECK(Scope::Analyze(&info));
scope = info.function()->scope();
}
@@ -11299,6 +11243,7 @@ class ScopeIterator {
ScopeType scope_type = Type();
if (scope_type == ScopeTypeGlobal) {
// The global scope is always the last in the chain.
+ ASSERT(context_->IsGlobalContext());
context_ = Handle<Context>();
return;
}
@@ -11322,7 +11267,7 @@ class ScopeIterator {
!scope_info->HasContext());
return ScopeTypeLocal;
case GLOBAL_SCOPE:
- ASSERT(context_->IsGlobalContext() || scope_info->IsQmlMode());
+ ASSERT(context_->IsGlobalContext());
return ScopeTypeGlobal;
case WITH_SCOPE:
ASSERT(context_->IsWithContext());
@@ -11360,12 +11305,10 @@ class ScopeIterator {
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
return Handle<JSObject>(CurrentContext()->global());
- case ScopeIterator::ScopeTypeLocal: {
- Handle<SerializedScopeInfo> scope_info = nested_scope_chain_.last();
- ASSERT(nested_scope_chain_.length() == 1);
+ case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
+ ASSERT(nested_scope_chain_.length() == 1);
return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_);
- }
case ScopeIterator::ScopeTypeWith:
// Return the with object.
return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
@@ -12081,15 +12024,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
ScopeInfo<> sinfo(*scope_info);
- bool qml_mode = function->shared()->qml_mode();
// Traverse the saved contexts chain to find the active context for the
// selected frame.
- SaveContext* save = isolate->save_context();
- while (save != NULL && !save->below(frame)) {
- save = save->prev();
- }
- ASSERT(save != NULL);
+ SaveContext* save = FindSavedContextForFrame(isolate, frame);
+
SaveContext savex(isolate);
isolate->set_context(*(save->context()));
@@ -12155,8 +12094,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Compiler::CompileEval(function_source,
context,
context->IsGlobalContext(),
- kNonStrictMode,
- qml_mode);
+ kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
@@ -12166,8 +12104,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<Object> receiver(frame->receiver(), isolate);
Handle<Object> evaluation_function =
Execution::Call(compiled_function, receiver, 0, NULL,
- &has_pending_exception, false,
- Handle<Object>(function->context()->qml_global()));
+ &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
Handle<Object> arguments = GetArgumentsObject(isolate,
@@ -12248,7 +12185,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
// Currently, the eval code will be executed in non-strict mode,
// even in the strict code context.
Handle<SharedFunctionInfo> shared =
- Compiler::CompileEval(source, context, is_global, kNonStrictMode, false);
+ Compiler::CompileEval(source, context, is_global, kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
Handle<JSFunction>(
diff --git a/src/3rdparty/v8/src/runtime.h b/src/3rdparty/v8/src/runtime.h
index cd36da9..c411b30 100644
--- a/src/3rdparty/v8/src/runtime.h
+++ b/src/3rdparty/v8/src/runtime.h
@@ -98,6 +98,7 @@ namespace internal {
F(SetNewFunctionAttributes, 1, 1) \
F(AllocateInNewSpace, 1, 1) \
F(SetNativeFlag, 1, 1) \
+ F(StoreArrayLiteralElement, 5, 1) \
\
/* Array join support */ \
F(PushIfAbsent, 2, 1) \
@@ -157,7 +158,6 @@ namespace internal {
/* Comparisons */ \
F(NumberEquals, 2, 1) \
F(StringEquals, 2, 1) \
- F(UserObjectEquals, 2, 1) \
\
F(NumberCompare, 3, 1) \
F(SmiLexicographicCompare, 2, 1) \
@@ -247,7 +247,7 @@ namespace internal {
F(DateLocalTimezone, 1, 1) \
F(DateLocalTimeOffset, 0, 1) \
F(DateDaylightSavingsOffset, 1, 1) \
- F(DateMakeDay, 3, 1) \
+ F(DateMakeDay, 2, 1) \
F(DateYMDFromTime, 2, 1) \
\
/* Numbers */ \
@@ -258,8 +258,7 @@ namespace internal {
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
- F(ResolvePossiblyDirectEval, 5, 2) \
- F(ResolvePossiblyDirectEvalNoLookup, 5, 2) \
+ F(ResolvePossiblyDirectEval, 4, 2) \
\
F(SetProperty, -1 /* 4 or 5 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \
@@ -337,8 +336,8 @@ namespace internal {
/* Declarations and initialization */ \
F(DeclareGlobals, 3, 1) \
F(DeclareContextSlot, 4, 1) \
- F(InitializeVarGlobal, -1 /* 3 or 4 */, 1) \
- F(InitializeConstGlobal, 3, 1) \
+ F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
+ F(InitializeConstGlobal, 2, 1) \
F(InitializeConstContextSlot, 3, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
\
diff --git a/src/3rdparty/v8/src/scanner.cc b/src/3rdparty/v8/src/scanner.cc
index 95748f2..01fe81c 100644..100755
--- a/src/3rdparty/v8/src/scanner.cc
+++ b/src/3rdparty/v8/src/scanner.cc
@@ -36,30 +36,26 @@ namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
-// Scanner::LiteralScope
-
-Scanner::LiteralScope::LiteralScope(Scanner* self)
- : scanner_(self), complete_(false) {
- self->StartLiteral();
-}
-
+// Scanner
-Scanner::LiteralScope::~LiteralScope() {
- if (!complete_) scanner_->DropLiteral();
-}
+Scanner::Scanner(UnicodeCache* unicode_cache)
+ : unicode_cache_(unicode_cache),
+ octal_pos_(Location::invalid()),
+ harmony_scoping_(false) { }
-void Scanner::LiteralScope::Complete() {
- scanner_->TerminateLiteral();
- complete_ = true;
+void Scanner::Initialize(UC16CharacterStream* source) {
+ source_ = source;
+ // Need to capture identifiers in order to recognize "get" and "set"
+ // in object literals.
+ Init();
+ // Skip initial whitespace allowing HTML comment ends just like
+ // after a newline and scan first token.
+ has_line_terminator_before_next_ = true;
+ SkipWhiteSpace();
+ Scan();
}
-// ----------------------------------------------------------------------------
-// Scanner
-
-Scanner::Scanner(UnicodeCache* unicode_cache)
- : unicode_cache_(unicode_cache) { }
-
uc32 Scanner::ScanHexNumber(int expected_length) {
ASSERT(expected_length <= 4); // prevent overflow
@@ -88,29 +84,6 @@ uc32 Scanner::ScanHexNumber(int expected_length) {
}
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner
-
-JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
- : Scanner(scanner_contants),
- octal_pos_(Location::invalid()),
- harmony_scoping_(false) { }
-
-
-void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
- source_ = source;
- // Need to capture identifiers in order to recognize "get" and "set"
- // in object literals.
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
-}
-
-
// Ensure that tokens can be stored in a byte.
STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
@@ -247,7 +220,7 @@ static const byte one_char_tokens[] = {
};
-Token::Value JavaScriptScanner::Next() {
+Token::Value Scanner::Next() {
current_ = next_;
has_line_terminator_before_next_ = false;
has_multiline_comment_before_next_ = false;
@@ -279,7 +252,7 @@ static inline bool IsByteOrderMark(uc32 c) {
}
-bool JavaScriptScanner::SkipWhiteSpace() {
+bool Scanner::SkipWhiteSpace() {
int start_position = source_pos();
while (true) {
@@ -319,7 +292,7 @@ bool JavaScriptScanner::SkipWhiteSpace() {
}
-Token::Value JavaScriptScanner::SkipSingleLineComment() {
+Token::Value Scanner::SkipSingleLineComment() {
Advance();
// The line terminator at the end of the line is not considered
@@ -335,7 +308,7 @@ Token::Value JavaScriptScanner::SkipSingleLineComment() {
}
-Token::Value JavaScriptScanner::SkipMultiLineComment() {
+Token::Value Scanner::SkipMultiLineComment() {
ASSERT(c0_ == '*');
Advance();
@@ -361,7 +334,7 @@ Token::Value JavaScriptScanner::SkipMultiLineComment() {
}
-Token::Value JavaScriptScanner::ScanHtmlComment() {
+Token::Value Scanner::ScanHtmlComment() {
// Check for <!-- comments.
ASSERT(c0_ == '!');
Advance();
@@ -376,7 +349,7 @@ Token::Value JavaScriptScanner::ScanHtmlComment() {
}
-void JavaScriptScanner::Scan() {
+void Scanner::Scan() {
next_.literal_chars = NULL;
Token::Value token;
do {
@@ -616,7 +589,7 @@ void JavaScriptScanner::Scan() {
}
-void JavaScriptScanner::SeekForward(int pos) {
+void Scanner::SeekForward(int pos) {
// After this call, we will have the token at the given position as
// the "next" token. The "current" token will be invalid.
if (pos == next_.location.beg_pos) return;
@@ -637,7 +610,7 @@ void JavaScriptScanner::SeekForward(int pos) {
}
-void JavaScriptScanner::ScanEscape() {
+void Scanner::ScanEscape() {
uc32 c = c0_;
Advance();
@@ -689,7 +662,7 @@ void JavaScriptScanner::ScanEscape() {
// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
// ECMA-262. Other JS VMs support them.
-uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
+uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
uc32 x = c - '0';
int i = 0;
for (; i < length; i++) {
@@ -712,7 +685,7 @@ uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
}
-Token::Value JavaScriptScanner::ScanString() {
+Token::Value Scanner::ScanString() {
uc32 quote = c0_;
Advance(); // consume quote
@@ -736,13 +709,13 @@ Token::Value JavaScriptScanner::ScanString() {
}
-void JavaScriptScanner::ScanDecimalDigits() {
+void Scanner::ScanDecimalDigits() {
while (IsDecimalDigit(c0_))
AddLiteralCharAdvance();
}
-Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
+Token::Value Scanner::ScanNumber(bool seen_period) {
ASSERT(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
@@ -827,7 +800,7 @@ Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
}
-uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
+uc32 Scanner::ScanIdentifierUnicodeEscape() {
Advance();
if (c0_ != 'u') return -1;
Advance();
@@ -944,7 +917,7 @@ static Token::Value KeywordOrIdentifierToken(const char* input,
}
-Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
+Token::Value Scanner::ScanIdentifierOrKeyword() {
ASSERT(unicode_cache_->IsIdentifierStart(c0_));
LiteralScope literal(this);
// Scan identifier start character.
@@ -989,7 +962,7 @@ Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
}
-Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
+Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
// Scan the rest of the identifier characters.
while (unicode_cache_->IsIdentifierPart(c0_)) {
if (c0_ == '\\') {
@@ -1012,7 +985,7 @@ Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
}
-bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
+bool Scanner::ScanRegExpPattern(bool seen_equal) {
// Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
bool in_character_class = false;
@@ -1059,7 +1032,7 @@ bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
}
-bool JavaScriptScanner::ScanLiteralUnicodeEscape() {
+bool Scanner::ScanLiteralUnicodeEscape() {
ASSERT(c0_ == '\\');
uc32 chars_read[6] = {'\\', 'u', 0, 0, 0, 0};
Advance();
@@ -1089,7 +1062,7 @@ bool JavaScriptScanner::ScanLiteralUnicodeEscape() {
}
-bool JavaScriptScanner::ScanRegExpFlags() {
+bool Scanner::ScanRegExpFlags() {
// Scan regular expression flags.
LiteralScope literal(this);
while (unicode_cache_->IsIdentifierPart(c0_)) {
diff --git a/src/3rdparty/v8/src/scanner.h b/src/3rdparty/v8/src/scanner.h
index a2e64a9..88e3bce 100644
--- a/src/3rdparty/v8/src/scanner.h
+++ b/src/3rdparty/v8/src/scanner.h
@@ -169,7 +169,7 @@ class LiteralBuffer {
}
}
- inline void AddChar(uc16 character) {
+ INLINE(void AddChar(uc16 character)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
if (is_ascii_) {
if (character < kMaxAsciiCharCodeU) {
@@ -260,35 +260,32 @@ class LiteralBuffer {
// ----------------------------------------------------------------------------
-// Scanner base-class.
+// JavaScript Scanner.
-// Generic functionality used by both JSON and JavaScript scanners.
class Scanner {
public:
- // -1 is outside of the range of any real source code.
- static const int kNoOctalLocation = -1;
-
- typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
+ // Scoped helper for literal recording. Automatically drops the literal
+ // if aborting the scanning before it's complete.
class LiteralScope {
public:
- explicit LiteralScope(Scanner* self);
- ~LiteralScope();
- void Complete();
+ explicit LiteralScope(Scanner* self)
+ : scanner_(self), complete_(false) {
+ scanner_->StartLiteral();
+ }
+ ~LiteralScope() {
+ if (!complete_) scanner_->DropLiteral();
+ }
+ void Complete() {
+ scanner_->TerminateLiteral();
+ complete_ = true;
+ }
private:
Scanner* scanner_;
bool complete_;
};
- explicit Scanner(UnicodeCache* scanner_contants);
-
- // Returns the current token again.
- Token::Value current_token() { return current_.token; }
-
- // One token look-ahead (past the token returned by Next()).
- Token::Value peek() const { return next_.token; }
-
+ // Representation of an interval of source positions.
struct Location {
Location(int b, int e) : beg_pos(b), end_pos(e) { }
Location() : beg_pos(0), end_pos(0) { }
@@ -303,21 +300,28 @@ class Scanner {
int end_pos;
};
+ // -1 is outside of the range of any real source code.
+ static const int kNoOctalLocation = -1;
+
+ typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+
+ explicit Scanner(UnicodeCache* scanner_contants);
+
+ void Initialize(UC16CharacterStream* source);
+
+ // Returns the next token and advances input.
+ Token::Value Next();
+ // Returns the current token again.
+ Token::Value current_token() { return current_.token; }
// Returns the location information for the current token
- // (the token returned by Next()).
+ // (the token last returned by Next()).
Location location() const { return current_.location; }
- Location peek_location() const { return next_.location; }
-
// Returns the literal string, if any, for the current token (the
- // token returned by Next()). The string is 0-terminated and in
- // UTF-8 format; they may contain 0-characters. Literal strings are
- // collected for identifiers, strings, and numbers.
+ // token last returned by Next()). The string is 0-terminated.
+ // Literal strings are collected for identifiers, strings, and
+ // numbers.
// These functions only give the correct result if the literal
// was scanned between calls to StartLiteral() and TerminateLiteral().
- bool is_literal_ascii() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_ascii();
- }
Vector<const char> literal_ascii_string() {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->ascii_literal();
@@ -326,6 +330,10 @@ class Scanner {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->uc16_literal();
}
+ bool is_literal_ascii() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->is_ascii();
+ }
int literal_length() const {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->length();
@@ -341,12 +349,15 @@ class Scanner {
return current_.literal_chars->length() != source_length;
}
+ // Similar functions for the upcoming token.
+
+ // One token look-ahead (past the token returned by Next()).
+ Token::Value peek() const { return next_.token; }
+
+ Location peek_location() const { return next_.location; }
+
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
- bool is_next_literal_ascii() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->is_ascii();
- }
Vector<const char> next_literal_ascii_string() {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->ascii_literal();
@@ -355,6 +366,10 @@ class Scanner {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->uc16_literal();
}
+ bool is_next_literal_ascii() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->is_ascii();
+ }
int next_literal_length() const {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->length();
@@ -364,7 +379,46 @@ class Scanner {
static const int kCharacterLookaheadBufferSize = 1;
- protected:
+ // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+ uc32 ScanOctalEscape(uc32 c, int length);
+
+ // Returns the location of the last seen octal literal.
+ Location octal_position() const { return octal_pos_; }
+ void clear_octal_position() { octal_pos_ = Location::invalid(); }
+
+ // Seek forward to the given position. This operation does not
+ // work in general, for instance when there are pushed back
+ // characters, but works for seeking forward until simple delimiter
+ // tokens, which is what it is used for.
+ void SeekForward(int pos);
+
+ bool HarmonyScoping() const {
+ return harmony_scoping_;
+ }
+ void SetHarmonyScoping(bool block_scoping) {
+ harmony_scoping_ = block_scoping;
+ }
+
+
+ // Returns true if there was a line terminator before the peek'ed token,
+ // possibly inside a multi-line comment.
+ bool HasAnyLineTerminatorBeforeNext() const {
+ return has_line_terminator_before_next_ ||
+ has_multiline_comment_before_next_;
+ }
+
+ // Scans the input as a regular expression pattern, previous
+ // character(s) must be /(=). Returns true if a pattern is scanned.
+ bool ScanRegExpPattern(bool seen_equal);
+ // Returns true if regexp flags are scanned (always since flags can
+ // be empty).
+ bool ScanRegExpFlags();
+
+ // Tells whether the buffer contains an identifier (no escapes).
+ // Used for checking if a property name is an identifier.
+ static bool IsIdentifier(unibrow::CharacterStream* buffer);
+
+ private:
// The current and look-ahead token.
struct TokenDesc {
Token::Value token;
@@ -389,7 +443,7 @@ class Scanner {
next_.literal_chars = free_buffer;
}
- inline void AddLiteralChar(uc32 c) {
+ INLINE(void AddLiteralChar(uc32 c)) {
ASSERT_NOT_NULL(next_.literal_chars);
next_.literal_chars->AddChar(c);
}
@@ -434,107 +488,14 @@ class Scanner {
uc32 ScanHexNumber(int expected_length);
- // Return the current source position.
- int source_pos() {
- return source_->pos() - kCharacterLookaheadBufferSize;
- }
-
- UnicodeCache* unicode_cache_;
-
- // Buffers collecting literal strings, numbers, etc.
- LiteralBuffer literal_buffer1_;
- LiteralBuffer literal_buffer2_;
-
- TokenDesc current_; // desc for current token (as returned by Next())
- TokenDesc next_; // desc for next token (one token look-ahead)
-
- // Input stream. Must be initialized to an UC16CharacterStream.
- UC16CharacterStream* source_;
-
- // One Unicode character look-ahead; c0_ < 0 at the end of the input.
- uc32 c0_;
-};
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner - base logic for JavaScript scanning.
-
-class JavaScriptScanner : public Scanner {
- public:
- // A LiteralScope that disables recording of some types of JavaScript
- // literals. If the scanner is configured to not record the specific
- // type of literal, the scope will not call StartLiteral.
- class LiteralScope {
- public:
- explicit LiteralScope(JavaScriptScanner* self)
- : scanner_(self), complete_(false) {
- scanner_->StartLiteral();
- }
- ~LiteralScope() {
- if (!complete_) scanner_->DropLiteral();
- }
- void Complete() {
- scanner_->TerminateLiteral();
- complete_ = true;
- }
-
- private:
- JavaScriptScanner* scanner_;
- bool complete_;
- };
-
- explicit JavaScriptScanner(UnicodeCache* scanner_contants);
-
- void Initialize(UC16CharacterStream* source);
-
- // Returns the next token.
- Token::Value Next();
-
- // Returns true if there was a line terminator before the peek'ed token,
- // possibly inside a multi-line comment.
- bool HasAnyLineTerminatorBeforeNext() const {
- return has_line_terminator_before_next_ ||
- has_multiline_comment_before_next_;
- }
-
- // Scans the input as a regular expression pattern, previous
- // character(s) must be /(=). Returns true if a pattern is scanned.
- bool ScanRegExpPattern(bool seen_equal);
- // Returns true if regexp flags are scanned (always since flags can
- // be empty).
- bool ScanRegExpFlags();
-
- // Tells whether the buffer contains an identifier (no escapes).
- // Used for checking if a property name is an identifier.
- static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
- // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
- uc32 ScanOctalEscape(uc32 c, int length);
-
- // Returns the location of the last seen octal literal
- Location octal_position() const { return octal_pos_; }
- void clear_octal_position() { octal_pos_ = Location::invalid(); }
-
- // Seek forward to the given position. This operation does not
- // work in general, for instance when there are pushed back
- // characters, but works for seeking forward until simple delimiter
- // tokens, which is what it is used for.
- void SeekForward(int pos);
-
- bool HarmonyScoping() const {
- return harmony_scoping_;
- }
- void SetHarmonyScoping(bool block_scoping) {
- harmony_scoping_ = block_scoping;
- }
-
+ // Scans a single JavaScript token.
+ void Scan();
- protected:
bool SkipWhiteSpace();
Token::Value SkipSingleLineComment();
Token::Value SkipMultiLineComment();
-
- // Scans a single JavaScript token.
- void Scan();
+ // Scans a possible HTML comment -- begins with '<!'.
+ Token::Value ScanHtmlComment();
void ScanDecimalDigits();
Token::Value ScanNumber(bool seen_period);
@@ -544,9 +505,6 @@ class JavaScriptScanner : public Scanner {
void ScanEscape();
Token::Value ScanString();
- // Scans a possible HTML comment -- begins with '<!'.
- Token::Value ScanHtmlComment();
-
// Decodes a unicode escape-sequence which is part of an identifier.
// If the escape sequence cannot be decoded the result is kBadChar.
uc32 ScanIdentifierUnicodeEscape();
@@ -555,9 +513,30 @@ class JavaScriptScanner : public Scanner {
// flags.
bool ScanLiteralUnicodeEscape();
+ // Return the current source position.
+ int source_pos() {
+ return source_->pos() - kCharacterLookaheadBufferSize;
+ }
+
+ UnicodeCache* unicode_cache_;
+
+ // Buffers collecting literal strings, numbers, etc.
+ LiteralBuffer literal_buffer1_;
+ LiteralBuffer literal_buffer2_;
+
+ TokenDesc current_; // desc for current token (as returned by Next())
+ TokenDesc next_; // desc for next token (one token look-ahead)
+
+ // Input stream. Must be initialized to an UC16CharacterStream.
+ UC16CharacterStream* source_;
+
+
// Start position of the octal literal last scanned.
Location octal_pos_;
+ // One Unicode character look-ahead; c0_ < 0 at the end of the input.
+ uc32 c0_;
+
// Whether there is a line terminator whitespace character after
// the current token, and before the next. Does not count newlines
// inside multiline comments.
diff --git a/src/3rdparty/v8/src/scopeinfo.cc b/src/3rdparty/v8/src/scopeinfo.cc
index a61b787..8ea5f1e 100644
--- a/src/3rdparty/v8/src/scopeinfo.cc
+++ b/src/3rdparty/v8/src/scopeinfo.cc
@@ -51,7 +51,6 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
: function_name_(FACTORY->empty_symbol()),
calls_eval_(scope->calls_eval()),
is_strict_mode_(scope->is_strict_mode()),
- is_qml_mode_(scope->is_qml_mode()),
type_(scope->type()),
parameters_(scope->num_parameters()),
stack_slots_(scope->num_stack_slots()),
@@ -154,8 +153,6 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
//
// - is strict mode scope
//
-// - is qml mode scope
-//
// - scope type
//
// - number of variables in the context object (smi) (= function context
@@ -255,7 +252,6 @@ ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
p = ReadObject(p, &function_name_);
p = ReadBool(p, &calls_eval_);
p = ReadBool(p, &is_strict_mode_);
- p = ReadBool(p, &is_qml_mode_);
p = ReadInt(p, &type_);
p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
p = ReadList<Allocator>(p, &parameters_);
@@ -311,9 +307,9 @@ static Object** WriteList(Object** p,
template<class Allocator>
Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
- // function name, calls eval, is_strict_mode, is_qml_mode, scope type,
+ // function name, calls eval, is_strict_mode, scope type,
// length for 3 tables:
- const int extra_slots = 1 + 1 + 1 + 1 + 1 + 3;
+ const int extra_slots = 1 + 1 + 1 + 1 + 3;
int length = extra_slots +
context_slots_.length() * 2 +
parameters_.length() +
@@ -328,7 +324,6 @@ Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
p = WriteObject(p, function_name_);
p = WriteBool(p, calls_eval_);
p = WriteBool(p, is_strict_mode_);
- p = WriteBool(p, is_qml_mode_);
p = WriteInt(p, type_);
p = WriteList(p, &context_slots_, &context_modes_);
p = WriteList(p, &parameters_);
@@ -377,8 +372,8 @@ SerializedScopeInfo* SerializedScopeInfo::Empty() {
Object** SerializedScopeInfo::ContextEntriesAddr() {
ASSERT(length() > 0);
- // +5 for function name, calls eval, strict mode, qml mode, scope type.
- return data_start() + 5;
+ // +4 for function name, calls eval, strict mode, scope type.
+ return data_start() + 4;
}
@@ -422,21 +417,10 @@ bool SerializedScopeInfo::IsStrictMode() {
}
-bool SerializedScopeInfo::IsQmlMode() {
- if (length() > 0) {
- Object** p = data_start() + 3; // +3 for function name, calls eval, strict mode.
- bool qml_mode;
- p = ReadBool(p, &qml_mode);
- return qml_mode;
- }
- return false;
-}
-
-
ScopeType SerializedScopeInfo::Type() {
ASSERT(length() > 0);
- // +4 for function name, calls eval, strict mode, qml mode.
- Object** p = data_start() + 4;
+ // +3 for function name, calls eval, strict mode.
+ Object** p = data_start() + 3;
ScopeType type;
p = ReadInt(p, &type);
return type;
diff --git a/src/3rdparty/v8/src/scopeinfo.h b/src/3rdparty/v8/src/scopeinfo.h
index 2ca4503..eeb3047 100644
--- a/src/3rdparty/v8/src/scopeinfo.h
+++ b/src/3rdparty/v8/src/scopeinfo.h
@@ -88,7 +88,6 @@ class ScopeInfo BASE_EMBEDDED {
Handle<String> function_name_;
bool calls_eval_;
bool is_strict_mode_;
- bool is_qml_mode_;
ScopeType type_;
List<Handle<String>, Allocator > parameters_;
List<Handle<String>, Allocator > stack_slots_;
diff --git a/src/3rdparty/v8/src/scopes.cc b/src/3rdparty/v8/src/scopes.cc
index 6503d07..a199da6 100644
--- a/src/3rdparty/v8/src/scopes.cc
+++ b/src/3rdparty/v8/src/scopes.cc
@@ -35,8 +35,6 @@
#include "allocation-inl.h"
-#include "debug.h"
-
namespace v8 {
namespace internal {
@@ -78,9 +76,6 @@ static bool Match(void* key1, void* key2) {
}
-// Dummy constructor
-VariableMap::VariableMap(bool gotta_love_static_overloading) : HashMap() {}
-
VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
VariableMap::~VariableMap() {}
@@ -114,21 +109,6 @@ Variable* VariableMap::Lookup(Handle<String> name) {
// ----------------------------------------------------------------------------
// Implementation of Scope
-
-// Dummy constructor
-Scope::Scope(ScopeType type)
- : isolate_(Isolate::Current()),
- inner_scopes_(0),
- variables_(false),
- temps_(0),
- params_(0),
- unresolved_(0),
- decls_(0),
- already_resolved_(false) {
- SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
-}
-
-
Scope::Scope(Scope* outer_scope, ScopeType type)
: isolate_(Isolate::Current()),
inner_scopes_(4),
@@ -204,7 +184,6 @@ void Scope::SetDefaults(ScopeType type,
// Inherit the strict mode from the parent scope.
strict_mode_flag_ = (outer_scope != NULL)
? outer_scope->strict_mode_flag_ : kNonStrictMode;
- qml_mode_ = (outer_scope != NULL) && outer_scope->qml_mode_;
outer_scope_calls_non_strict_eval_ = false;
inner_scope_calls_eval_ = false;
force_eager_compilation_ = false;
@@ -910,26 +889,6 @@ void Scope::ResolveVariable(Scope* global_scope,
// by 'eval' introduced variable bindings.
if (var->is_global()) {
var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
-
- if (qml_mode_) {
- Handle<GlobalObject> global = isolate_->global();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
- //Get the context before the debugger was entered.
- SaveContext *save = isolate_->save_context();
- while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
- save = save->prev();
-
- global = Handle<GlobalObject>(save->context()->global());
- }
-#endif
-
- if (qml_mode_ && !global->HasProperty(*(proxy->name()))) {
- var->set_is_qml_global(true);
- }
- }
-
} else {
Variable* invalidated = var;
var = NonLocal(proxy->name(), DYNAMIC_LOCAL);
@@ -941,52 +900,12 @@ void Scope::ResolveVariable(Scope* global_scope,
// No binding has been found. Declare a variable in global scope.
ASSERT(global_scope != NULL);
var = global_scope->DeclareGlobal(proxy->name());
-
- if (qml_mode_) {
- Handle<GlobalObject> global = isolate_->global();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
- //Get the context before the debugger was entered.
- SaveContext *save = isolate_->save_context();
- while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
- save = save->prev();
-
- global = Handle<GlobalObject>(save->context()->global());
- }
-#endif
-
- if (!global->HasProperty(*(proxy->name()))) {
- var->set_is_qml_global(true);
- }
- }
-
break;
case UNBOUND_EVAL_SHADOWED:
// No binding has been found. But some scope makes a
// non-strict 'eval' call.
var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
-
- if (qml_mode_) {
- Handle<GlobalObject> global = isolate_->global();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
- //Get the context before the debugger was entered.
- SaveContext *save = isolate_->save_context();
- while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
- save = save->prev();
-
- global = Handle<GlobalObject>(save->context()->global());
- }
-#endif
-
- if (qml_mode_ && !global->HasProperty(*(proxy->name()))) {
- var->set_is_qml_global(true);
- }
- }
-
break;
case DYNAMIC_LOOKUP:
diff --git a/src/3rdparty/v8/src/scopes.h b/src/3rdparty/v8/src/scopes.h
index 41e5f5c..0cd3be5 100644
--- a/src/3rdparty/v8/src/scopes.h
+++ b/src/3rdparty/v8/src/scopes.h
@@ -42,10 +42,6 @@ class VariableMap: public HashMap {
public:
VariableMap();
- // Dummy constructor. This constructor doesn't set up the map
- // properly so don't use it unless you have a good reason.
- explicit VariableMap(bool gotta_love_static_overloading);
-
virtual ~VariableMap();
Variable* Declare(Scope* scope,
@@ -228,11 +224,6 @@ class Scope: public ZoneObject {
end_position_ = statement_pos;
}
- // Enable qml mode for this scope
- void EnableQmlMode() {
- qml_mode_ = true;
- }
-
// ---------------------------------------------------------------------------
// Predicates.
@@ -247,7 +238,6 @@ class Scope: public ZoneObject {
return is_eval_scope() || is_function_scope() || is_global_scope();
}
bool is_strict_mode() const { return strict_mode_flag() == kStrictMode; }
- bool is_qml_mode() const { return qml_mode_; }
bool is_strict_mode_eval_scope() const {
return is_eval_scope() && is_strict_mode();
}
@@ -379,8 +369,6 @@ class Scope: public ZoneObject {
protected:
friend class ParserFactory;
- explicit Scope(ScopeType type);
-
Isolate* const isolate_;
// Scope tree.
@@ -433,8 +421,6 @@ class Scope: public ZoneObject {
// Source positions.
int start_position_;
int end_position_;
- // This scope is a qml mode scope.
- bool qml_mode_;
// Computed via PropagateScopeInfo.
bool outer_scope_calls_non_strict_eval_;
diff --git a/src/3rdparty/v8/src/stub-cache.cc b/src/3rdparty/v8/src/stub-cache.cc
index 139bc2d..8b6e28f 100644
--- a/src/3rdparty/v8/src/stub-cache.cc
+++ b/src/3rdparty/v8/src/stub-cache.cc
@@ -161,18 +161,6 @@ Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
}
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- CALL_HEAP_FUNCTION(isolate(),
- (set_failure(NULL),
- CompileLoadCallback(*name, *object, *holder, *callback)),
- Code);
-}
-
-
Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
@@ -213,16 +201,6 @@ Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
}
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- CALL_HEAP_FUNCTION(isolate(),
- (set_failure(NULL),
- CompileLoadInterceptor(*object, *holder, *name)),
- Code);
-}
-
-
Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder) {
@@ -305,17 +283,6 @@ Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- CALL_HEAP_FUNCTION(isolate(),
- (set_failure(NULL),
- CompileLoadInterceptor(*object, *holder, *name)),
- Code);
-}
-
-
Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder) {
@@ -334,18 +301,6 @@ Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- CALL_HEAP_FUNCTION(isolate(),
- (set_failure(NULL),
- CompileLoadCallback(*name, *object, *holder, *callback)),
- Code);
-}
-
-
Handle<Code> StubCache::ComputeKeyedLoadCallback(
Handle<String> name,
Handle<JSObject> receiver,
@@ -577,19 +532,6 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
#define CALL_LOGGER_TAG(kind, type) \
(kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
- CALL_HEAP_FUNCTION(
- isolate(),
- (set_failure(NULL),
- CompileCallConstant(*object, *holder, *function, *name, check)),
- Code);
-}
-
-
Handle<Code> StubCache::ComputeCallConstant(int argc,
Code::Kind kind,
Code::ExtraICState extra_state,
@@ -669,16 +611,6 @@ Handle<Code> StubCache::ComputeCallField(int argc,
}
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- CALL_HEAP_FUNCTION(
- isolate(),
- (set_failure(NULL), CompileCallInterceptor(*object, *holder, *name)),
- Code);
-}
-
-
Handle<Code> StubCache::ComputeCallInterceptor(int argc,
Code::Kind kind,
Code::ExtraICState extra_state,
@@ -716,20 +648,6 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
}
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- CALL_HEAP_FUNCTION(
- isolate(),
- (set_failure(NULL),
- CompileCallGlobal(*object, *holder, *cell, *function, *name)),
- Code);
-}
-
-
Handle<Code> StubCache::ComputeCallGlobal(int argc,
Code::Kind kind,
Code::ExtraICState extra_state,
@@ -906,33 +824,6 @@ Handle<Code> StubCache::ComputeCallMiss(int argc,
}
-// The CallStubCompiler needs a version of ComputeCallMiss that does not
-// perform GC. This function is temporary, because the stub cache but not
-// yet the stub compiler uses handles.
-MaybeObject* StubCache::TryComputeCallMiss(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
- NORMAL, argc, OWN_MAP);
- NumberDictionary* cache = isolate_->heap()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return cache->ValueAt(entry);
-
- StubCompiler compiler(isolate_);
- Code* code = NULL;
- MaybeObject* maybe_code = compiler.TryCompileCallMiss(flags);
- if (!maybe_code->To(&code)) return maybe_code;
-
- NumberDictionary* new_cache = NULL;
- MaybeObject* maybe_new_cache = cache->AtNumberPut(flags, code);
- if (!maybe_new_cache->To(&new_cache)) return maybe_new_cache;
- isolate_->heap()->public_set_non_monomorphic_cache(new_cache);
-
- return code;
-}
-
-
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
Code::Kind kind) {
@@ -1213,8 +1104,8 @@ RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
JSObject* recv = JSObject::cast(args[0]);
String* name = String::cast(args[1]);
Object* value = args[2];
+ ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
- ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
ASSERT(recv->HasNamedInterceptor());
PropertyAttributes attr = NONE;
MaybeObject* result = recv->SetPropertyWithInterceptor(
@@ -1343,33 +1234,6 @@ Handle<Code> StubCompiler::CompileCallMiss(Code::Flags flags) {
}
-// TODO(kmillikin): This annoying raw pointer implementation should be
-// eliminated when the stub compiler no longer needs it.
-MaybeObject* StubCompiler::TryCompileCallMiss(Code::Flags flags) {
- HandleScope scope(isolate());
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateMiss(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateMiss(masm(), argc);
- }
- Object* result;
- { MaybeObject* maybe_result = TryGetCodeWithFlags(flags, "CompileCallMiss");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- isolate()->counters()->call_megamorphic_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
- return result;
-}
-
-
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Code> StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
Debug::GenerateCallICDebugBreak(masm());
@@ -1427,44 +1291,16 @@ Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
}
-MaybeObject* StubCompiler::TryGetCodeWithFlags(Code::Flags flags,
- const char* name) {
- // Check for allocation failures during stub compilation.
- if (failure_->IsFailure()) return failure_;
-
- // Create code object in the heap.
- CodeDesc desc;
- masm_.GetCode(&desc);
- MaybeObject* result = heap()->CreateCode(desc, flags, masm_.CodeObject());
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs && !result->IsFailure()) {
- Code::cast(result->ToObjectUnchecked())->Disassemble(name);
- }
-#endif
- return result;
-}
-
-
-MaybeObject* StubCompiler::TryGetCodeWithFlags(Code::Flags flags,
- String* name) {
- if (FLAG_print_code_stubs && name != NULL) {
- return TryGetCodeWithFlags(flags, *name->ToCString());
- }
- return TryGetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
-}
+void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup) {
+ holder->LocalLookupRealNamedProperty(*name, lookup);
+ if (lookup->IsProperty()) return;
+ lookup->NotFound();
+ if (holder->GetPrototype()->IsNull()) return;
-void StubCompiler::LookupPostInterceptor(JSObject* holder,
- String* name,
- LookupResult* lookup) {
- holder->LocalLookupRealNamedProperty(name, lookup);
- if (!lookup->IsProperty()) {
- lookup->NotFound();
- Object* proto = holder->GetPrototype();
- if (!proto->IsNull()) {
- proto->Lookup(name, lookup);
- }
- }
+ holder->GetPrototype()->Lookup(*name, lookup);
}
@@ -1477,24 +1313,6 @@ Handle<Code> LoadStubCompiler::GetCode(PropertyType type, Handle<String> name) {
}
-// TODO(ulan): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* LoadStubCompiler::TryGetCode(PropertyType type, String* name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
- MaybeObject* result = TryGetCodeWithFlags(flags, name);
- if (!result->IsFailure()) {
- PROFILE(isolate(),
- CodeCreateEvent(Logger::LOAD_IC_TAG,
- Code::cast(result->ToObjectUnchecked()),
- name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
- name,
- Code::cast(result->ToObjectUnchecked())));
- }
- return result;
-}
-
-
Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
Handle<String> name,
InlineCacheState state) {
@@ -1506,26 +1324,6 @@ Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
return code;
}
-// TODO(ulan): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* KeyedLoadStubCompiler::TryGetCode(PropertyType type,
- String* name,
- InlineCacheState state) {
- Code::Flags flags = Code::ComputeFlags(
- Code::KEYED_LOAD_IC, state, Code::kNoExtraICState, type);
- MaybeObject* result = TryGetCodeWithFlags(flags, name);
- if (!result->IsFailure()) {
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
- Code::cast(result->ToObjectUnchecked()),
- name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
- name,
- Code::cast(result->ToObjectUnchecked())));
- }
- return result;
-}
-
Handle<Code> StoreStubCompiler::GetCode(PropertyType type,
Handle<String> name) {
@@ -1569,39 +1367,36 @@ CallStubCompiler::CallStubCompiler(Isolate* isolate,
}
-bool CallStubCompiler::HasCustomCallGenerator(JSFunction* function) {
- SharedFunctionInfo* info = function->shared();
- if (info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = info->builtin_function_id();
+bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
+ if (function->shared()->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function->shared()->builtin_function_id();
#define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
#undef CALL_GENERATOR_CASE
}
+
CallOptimization optimization(function);
- if (optimization.is_simple_api_call()) {
- return true;
- }
- return false;
+ return optimization.is_simple_api_call();
}
-MaybeObject* CallStubCompiler::CompileCustomCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* fname) {
+Handle<Code> CallStubCompiler::CompileCustomCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> fname) {
ASSERT(HasCustomCallGenerator(function));
- SharedFunctionInfo* info = function->shared();
- if (info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = info->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) \
- if (id == k##name) { \
- return CallStubCompiler::Compile##name##Call(object, \
- holder, \
- cell, \
- function, \
- fname); \
+ if (function->shared()->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function->shared()->builtin_function_id();
+#define CALL_GENERATOR_CASE(name) \
+ if (id == k##name) { \
+ return CallStubCompiler::Compile##name##Call(object, \
+ holder, \
+ cell, \
+ function, \
+ fname); \
}
CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
#undef CALL_GENERATOR_CASE
@@ -1637,104 +1432,79 @@ Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGetCode(PropertyType type, String* name) {
- int argc = arguments_.immediate();
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
- type,
- extra_state_,
- cache_holder_,
- argc);
- return TryGetCodeWithFlags(flags, name);
-}
-
-
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGetCode(JSFunction* function) {
- String* function_name = NULL;
- if (function->shared()->name()->IsString()) {
- function_name = String::cast(function->shared()->name());
- }
- return TryGetCode(CONSTANT_FUNCTION, function_name);
-}
-
-
-MaybeObject* ConstructStubCompiler::GetCode() {
+Handle<Code> ConstructStubCompiler::GetCode() {
Code::Flags flags = Code::ComputeFlags(Code::STUB);
- Object* result;
- { MaybeObject* maybe_result = TryGetCodeWithFlags(flags, "ConstructStub");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
- GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
- return result;
+ Handle<Code> code = GetCodeWithFlags(flags, "ConstructStub");
+ PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, "ConstructStub"));
+ GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", *code));
+ return code;
}
CallOptimization::CallOptimization(LookupResult* lookup) {
- if (!lookup->IsProperty() || !lookup->IsCacheable() ||
+ if (!lookup->IsProperty() ||
+ !lookup->IsCacheable() ||
lookup->type() != CONSTANT_FUNCTION) {
- Initialize(NULL);
+ Initialize(Handle<JSFunction>::null());
} else {
// We only optimize constant function calls.
- Initialize(lookup->GetConstantFunction());
+ Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
}
}
-CallOptimization::CallOptimization(JSFunction* function) {
+CallOptimization::CallOptimization(Handle<JSFunction> function) {
Initialize(function);
}
-int CallOptimization::GetPrototypeDepthOfExpectedType(JSObject* object,
- JSObject* holder) const {
- ASSERT(is_simple_api_call_);
- if (expected_receiver_type_ == NULL) return 0;
+int CallOptimization::GetPrototypeDepthOfExpectedType(
+ Handle<JSObject> object,
+ Handle<JSObject> holder) const {
+ ASSERT(is_simple_api_call());
+ if (expected_receiver_type_.is_null()) return 0;
int depth = 0;
- while (object != holder) {
- if (object->IsInstanceOf(expected_receiver_type_)) return depth;
- object = JSObject::cast(object->GetPrototype());
+ while (!object.is_identical_to(holder)) {
+ if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
+ object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
++depth;
}
- if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
+ if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
return kInvalidProtoDepth;
}
-void CallOptimization::Initialize(JSFunction* function) {
- constant_function_ = NULL;
+void CallOptimization::Initialize(Handle<JSFunction> function) {
+ constant_function_ = Handle<JSFunction>::null();
is_simple_api_call_ = false;
- expected_receiver_type_ = NULL;
- api_call_info_ = NULL;
+ expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
+ api_call_info_ = Handle<CallHandlerInfo>::null();
- if (function == NULL || !function->is_compiled()) return;
+ if (function.is_null() || !function->is_compiled()) return;
constant_function_ = function;
AnalyzePossibleApiFunction(function);
}
-void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
- SharedFunctionInfo* sfi = function->shared();
- if (!sfi->IsApiFunction()) return;
- FunctionTemplateInfo* info = sfi->get_api_func_data();
+void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
+ if (!function->shared()->IsApiFunction()) return;
+ Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
// Require a C++ callback.
if (info->call_code()->IsUndefined()) return;
- api_call_info_ = CallHandlerInfo::cast(info->call_code());
+ api_call_info_ =
+ Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
// Accept signatures that either have no restrictions at all or
// only have restrictions on the receiver.
if (!info->signature()->IsUndefined()) {
- SignatureInfo* signature = SignatureInfo::cast(info->signature());
+ Handle<SignatureInfo> signature =
+ Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
if (!signature->args()->IsUndefined()) return;
if (!signature->receiver()->IsUndefined()) {
expected_receiver_type_ =
- FunctionTemplateInfo::cast(signature->receiver());
+ Handle<FunctionTemplateInfo>(
+ FunctionTemplateInfo::cast(signature->receiver()));
}
}
diff --git a/src/3rdparty/v8/src/stub-cache.h b/src/3rdparty/v8/src/stub-cache.h
index 11fdb89..cc42e05 100644
--- a/src/3rdparty/v8/src/stub-cache.h
+++ b/src/3rdparty/v8/src/stub-cache.h
@@ -230,10 +230,6 @@ class StubCache {
Code::Kind kind,
Code::ExtraICState state);
- MUST_USE_RESULT MaybeObject* TryComputeCallMiss(int argc,
- Code::Kind kind,
- Code::ExtraICState state);
-
// Finds the Code object stored in the Heap::non_monomorphic_cache().
Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
@@ -392,8 +388,6 @@ class StubCompiler BASE_EMBEDDED {
Handle<Code> CompileCallArguments(Code::Flags flags);
Handle<Code> CompileCallMiss(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* TryCompileCallMiss(Code::Flags flags);
-
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Code> CompileCallDebugBreak(Code::Flags flags);
Handle<Code> CompileCallDebugPrepareStepIn(Code::Flags flags);
@@ -490,41 +484,10 @@ class StubCompiler BASE_EMBEDDED {
int save_at_depth,
Label* miss);
- // TODO(kmillikin): Eliminate this function when the stub cache is fully
- // handlified.
- Register CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- Label* miss) {
- return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
- scratch2, name, kInvalidProtoDepth, miss);
- }
-
- // TODO(kmillikin): Eliminate this function when the stub cache is fully
- // handlified.
- Register CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- int save_at_depth,
- Label* miss);
-
protected:
Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name);
- MUST_USE_RESULT MaybeObject* TryGetCodeWithFlags(Code::Flags flags,
- const char* name);
- MUST_USE_RESULT MaybeObject* TryGetCodeWithFlags(Code::Flags flags,
- String* name);
-
MacroAssembler* masm() { return &masm_; }
void set_failure(Failure* failure) { failure_ = failure; }
@@ -538,16 +501,16 @@ class StubCompiler BASE_EMBEDDED {
Handle<String> name,
Label* miss);
- MaybeObject* GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss);
+ void GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss);
void GenerateLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
@@ -559,19 +522,19 @@ class StubCompiler BASE_EMBEDDED {
Handle<String> name,
Label* miss);
- void GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ void GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss);
- static void LookupPostInterceptor(JSObject* holder,
- String* name,
+ static void LookupPostInterceptor(Handle<JSObject> holder,
+ Handle<String> name,
LookupResult* lookup);
Isolate* isolate() { return isolate_; }
@@ -603,11 +566,6 @@ class LoadStubCompiler: public StubCompiler {
Handle<JSObject> holder,
Handle<AccessorInfo> callback);
- MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback);
-
Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Object> value,
@@ -617,10 +575,6 @@ class LoadStubCompiler: public StubCompiler {
Handle<JSObject> holder,
Handle<String> name);
- MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name);
-
Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
Handle<GlobalObject> holder,
Handle<JSGlobalPropertyCell> cell,
@@ -628,8 +582,6 @@ class LoadStubCompiler: public StubCompiler {
bool is_dont_delete);
private:
- MUST_USE_RESULT MaybeObject* TryGetCode(PropertyType type, String* name);
-
Handle<Code> GetCode(PropertyType type, Handle<String> name);
};
@@ -648,11 +600,6 @@ class KeyedLoadStubCompiler: public StubCompiler {
Handle<JSObject> holder,
Handle<AccessorInfo> callback);
- MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback);
-
Handle<Code> CompileLoadConstant(Handle<String> name,
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -662,10 +609,6 @@ class KeyedLoadStubCompiler: public StubCompiler {
Handle<JSObject> holder,
Handle<String> name);
- MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name);
-
Handle<Code> CompileLoadArrayLength(Handle<String> name);
Handle<Code> CompileLoadStringLength(Handle<String> name);
@@ -687,10 +630,6 @@ class KeyedLoadStubCompiler: public StubCompiler {
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
private:
- MaybeObject* TryGetCode(PropertyType type,
- String* name,
- InlineCacheState state = MONOMORPHIC);
-
Handle<Code> GetCode(PropertyType type,
Handle<String> name,
InlineCacheState state = MONOMORPHIC);
@@ -796,95 +735,69 @@ class CallStubCompiler: public StubCompiler {
Handle<String> name,
CheckType check);
- MUST_USE_RESULT MaybeObject* CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check);
-
Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<String> name);
- MUST_USE_RESULT MaybeObject* CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name);
-
Handle<Code> CompileCallGlobal(Handle<JSObject> object,
Handle<GlobalObject> holder,
Handle<JSGlobalPropertyCell> cell,
Handle<JSFunction> function,
Handle<String> name);
- MUST_USE_RESULT MaybeObject* CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name);
-
- static bool HasCustomCallGenerator(JSFunction* function);
+ static bool HasCustomCallGenerator(Handle<JSFunction> function);
private:
- // Compiles a custom call constant/global IC. For constant calls
- // cell is NULL. Returns undefined if there is no custom call code
- // for the given function or it can't be generated.
- MUST_USE_RESULT MaybeObject* CompileCustomCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name);
-
-#define DECLARE_CALL_GENERATOR(name) \
- MUST_USE_RESULT MaybeObject* Compile##name##Call(Object* object, \
- JSObject* holder, \
- JSGlobalPropertyCell* cell, \
- JSFunction* function, \
- String* fname);
+ // Compiles a custom call constant/global IC. For constant calls cell is
+ // NULL. Returns an empty handle if there is no custom call code for the
+ // given function.
+ Handle<Code> CompileCustomCall(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name);
+
+#define DECLARE_CALL_GENERATOR(name) \
+ Handle<Code> Compile##name##Call(Handle<Object> object, \
+ Handle<JSObject> holder, \
+ Handle<JSGlobalPropertyCell> cell, \
+ Handle<JSFunction> function, \
+ Handle<String> fname);
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
#undef DECLARE_CALL_GENERATOR
- MUST_USE_RESULT MaybeObject* CompileFastApiCall(
- const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name);
-
- const ParameterCount arguments_;
- const Code::Kind kind_;
- const Code::ExtraICState extra_state_;
- const InlineCacheHolderFlag cache_holder_;
-
- const ParameterCount& arguments() { return arguments_; }
+ Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name);
Handle<Code> GetCode(PropertyType type, Handle<String> name);
Handle<Code> GetCode(Handle<JSFunction> function);
- // TODO(kmillikin): Eliminate these functions when the stub cache is fully
- // handlified.
- MUST_USE_RESULT MaybeObject* TryGetCode(PropertyType type, String* name);
- MUST_USE_RESULT MaybeObject* TryGetCode(JSFunction* function);
+ const ParameterCount& arguments() { return arguments_; }
void GenerateNameCheck(Handle<String> name, Label* miss);
- void GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+ void GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss);
// Generates code to load the function from the cell checking that
// it still contains the same function.
- void GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
+ void GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
Label* miss);
// Generates a jump to CallIC miss stub.
void GenerateMissBranch();
- // TODO(kmillikin): Eliminate this function when the stub cache is fully
- // handlified.
- MUST_USE_RESULT MaybeObject* TryGenerateMissBranch();
+ const ParameterCount arguments_;
+ const Code::Kind kind_;
+ const Code::ExtraICState extra_state_;
+ const InlineCacheHolderFlag cache_holder_;
};
@@ -892,10 +805,10 @@ class ConstructStubCompiler: public StubCompiler {
public:
explicit ConstructStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
- MUST_USE_RESULT MaybeObject* CompileConstructStub(JSFunction* function);
+ Handle<Code> CompileConstructStub(Handle<JSFunction> function);
private:
- MaybeObject* GetCode();
+ Handle<Code> GetCode();
};
@@ -904,14 +817,14 @@ class CallOptimization BASE_EMBEDDED {
public:
explicit CallOptimization(LookupResult* lookup);
- explicit CallOptimization(JSFunction* function);
+ explicit CallOptimization(Handle<JSFunction> function);
bool is_constant_call() const {
- return constant_function_ != NULL;
+ return !constant_function_.is_null();
}
- JSFunction* constant_function() const {
- ASSERT(constant_function_ != NULL);
+ Handle<JSFunction> constant_function() const {
+ ASSERT(is_constant_call());
return constant_function_;
}
@@ -919,32 +832,32 @@ class CallOptimization BASE_EMBEDDED {
return is_simple_api_call_;
}
- FunctionTemplateInfo* expected_receiver_type() const {
- ASSERT(is_simple_api_call_);
+ Handle<FunctionTemplateInfo> expected_receiver_type() const {
+ ASSERT(is_simple_api_call());
return expected_receiver_type_;
}
- CallHandlerInfo* api_call_info() const {
- ASSERT(is_simple_api_call_);
+ Handle<CallHandlerInfo> api_call_info() const {
+ ASSERT(is_simple_api_call());
return api_call_info_;
}
// Returns the depth of the object having the expected type in the
// prototype chain between the two arguments.
- int GetPrototypeDepthOfExpectedType(JSObject* object,
- JSObject* holder) const;
+ int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
+ Handle<JSObject> holder) const;
private:
- void Initialize(JSFunction* function);
+ void Initialize(Handle<JSFunction> function);
// Determines whether the given function can be called using the
// fast api call builtin.
- void AnalyzePossibleApiFunction(JSFunction* function);
+ void AnalyzePossibleApiFunction(Handle<JSFunction> function);
- JSFunction* constant_function_;
+ Handle<JSFunction> constant_function_;
bool is_simple_api_call_;
- FunctionTemplateInfo* expected_receiver_type_;
- CallHandlerInfo* api_call_info_;
+ Handle<FunctionTemplateInfo> expected_receiver_type_;
+ Handle<CallHandlerInfo> api_call_info_;
};
diff --git a/src/3rdparty/v8/src/v8natives.js b/src/3rdparty/v8/src/v8natives.js
index e6669d5..9595ad1 100644
--- a/src/3rdparty/v8/src/v8natives.js
+++ b/src/3rdparty/v8/src/v8natives.js
@@ -162,28 +162,23 @@ function GlobalParseFloat(string) {
function GlobalEval(x) {
if (!IS_STRING(x)) return x;
- var receiver = this;
var global_receiver = %GlobalReceiver(global);
-
- if (receiver == null && !IS_UNDETECTABLE(receiver)) {
- receiver = global_receiver;
- }
-
- var this_is_global_receiver = (receiver === global_receiver);
var global_is_detached = (global === global_receiver);
// For consistency with JSC we require the global object passed to
// eval to be the global object from which 'eval' originated. This
// is not mandated by the spec.
- if (!this_is_global_receiver || global_is_detached) {
- throw new $EvalError('The "this" object passed to eval must ' +
+ // We only throw if the global has been detached, since we need the
+ // receiver as this-value for the call.
+ if (global_is_detached) {
+ throw new $EvalError('The "this" value passed to eval must ' +
'be the global object from which eval originated');
}
var f = %CompileString(x);
if (!IS_FUNCTION(f)) return f;
- return %_CallFunction(receiver, f);
+ return %_CallFunction(global_receiver, f);
}
diff --git a/src/3rdparty/v8/src/variables.cc b/src/3rdparty/v8/src/variables.cc
index 1887935..d85e1b2 100644
--- a/src/3rdparty/v8/src/variables.cc
+++ b/src/3rdparty/v8/src/variables.cc
@@ -68,8 +68,7 @@ Variable::Variable(Scope* scope,
local_if_not_shadowed_(NULL),
is_valid_LHS_(is_valid_LHS),
is_accessed_from_inner_scope_(false),
- is_used_(false),
- is_qml_global_(false) {
+ is_used_(false) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
}
diff --git a/src/3rdparty/v8/src/variables.h b/src/3rdparty/v8/src/variables.h
index 33561b0..e23e00b 100644
--- a/src/3rdparty/v8/src/variables.h
+++ b/src/3rdparty/v8/src/variables.h
@@ -134,8 +134,7 @@ class Variable: public ZoneObject {
// True if the variable is named eval and not known to be shadowed.
bool is_possibly_eval() const {
- return IsVariable(FACTORY->eval_symbol()) &&
- (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
+ return IsVariable(FACTORY->eval_symbol());
}
Variable* local_if_not_shadowed() const {
@@ -155,8 +154,6 @@ class Variable: public ZoneObject {
index_ = index;
}
- bool is_qml_global() const { return is_qml_global_; }
- void set_is_qml_global(bool is_qml_global) { is_qml_global_ = is_qml_global; }
private:
Scope* scope_;
Handle<String> name_;
@@ -177,9 +174,6 @@ class Variable: public ZoneObject {
// Usage info.
bool is_accessed_from_inner_scope_; // set by variable resolver
bool is_used_;
-
- // QML info
- bool is_qml_global_;
};
diff --git a/src/3rdparty/v8/src/version.cc b/src/3rdparty/v8/src/version.cc
index ecb2fbd..f358be7 100644
--- a/src/3rdparty/v8/src/version.cc
+++ b/src/3rdparty/v8/src/version.cc
@@ -34,11 +34,11 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 7
-#define BUILD_NUMBER 3
+#define BUILD_NUMBER 4
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
-#define IS_CANDIDATE_VERSION 0
+#define IS_CANDIDATE_VERSION 1
// Define SONAME to have the SCons build the put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.cc b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
index ff8337f..8a00b89 100644
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.cc
+++ b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
@@ -139,10 +139,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
- // Copy the qmlglobal object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_INDEX)), rbx);
-
// Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
@@ -3088,37 +3084,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
- {
- Label not_user_equal, user_equal;
- __ JumpIfSmi(rax, &not_user_equal);
- __ JumpIfSmi(rdx, &not_user_equal);
-
- __ CmpObjectType(rax, JS_OBJECT_TYPE, rbx);
- __ j(not_equal, &not_user_equal);
-
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &not_user_equal);
-
- __ testb(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &user_equal);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &user_equal);
-
- __ jmp(&not_user_equal);
-
- __ bind(&user_equal);
-
- __ pop(rbx); // Return address.
- __ push(rax);
- __ push(rdx);
- __ push(rbx);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- __ bind(&not_user_equal);
- }
-
// Two identical objects are equal unless they are both NaN or undefined.
{
Label not_identical;
@@ -4185,7 +4150,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
Factory* factory = masm->isolate()->factory();
@@ -4261,7 +4227,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -4288,7 +4255,8 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -5452,14 +5420,8 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &miss, Label::kNear);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &miss, Label::kNear);
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &miss, Label::kNear);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
__ subq(rax, rdx);
@@ -5561,70 +5523,6 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- String* name,
- Register r0) {
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // r0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r0;
- // Capacity is smi 2^n.
- __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
- __ decl(index);
- __ and_(index,
- Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
-
- Register entity_name = r0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ movq(entity_name, Operand(properties,
- index,
- times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
- __ j(equal, done);
-
- // Stop if found the property.
- __ Cmp(entity_name, Handle<String>(name));
- __ j(equal, miss);
-
- // Check if the entry name is not a symbol.
- __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
- __ j(zero, miss);
- }
-
- StringDictionaryLookupStub stub(properties,
- r0,
- r0,
- StringDictionaryLookupStub::NEGATIVE_LOOKUP);
- __ Push(Handle<Object>(name));
- __ push(Immediate(name->Hash()));
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
- __ testq(r0, r0);
- __ j(not_zero, miss);
- __ jmp(done);
- return result;
-}
-
-
// Probe the string dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found leaving the
// index into the dictionary in |r1|. Jump to the |miss| label
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.h b/src/3rdparty/v8/src/x64/code-stubs-x64.h
index ffa3f4d..34435d7 100644
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.h
+++ b/src/3rdparty/v8/src/x64/code-stubs-x64.h
@@ -430,16 +430,6 @@ class StringDictionaryLookupStub: public CodeStub {
Handle<String> name,
Register r0);
- // TODO(kmillikin): Eliminate this function when the stub cache is fully
- // handlified.
- MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- String* name,
- Register r0);
-
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
diff --git a/src/3rdparty/v8/src/x64/full-codegen-x64.cc b/src/3rdparty/v8/src/x64/full-codegen-x64.cc
index b1e5d35..e8fdef2 100644
--- a/src/3rdparty/v8/src/x64/full-codegen-x64.cc
+++ b/src/3rdparty/v8/src/x64/full-codegen-x64.cc
@@ -44,11 +44,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-static unsigned GetPropertyId(Property* property) {
- return property->id();
-}
-
-
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -173,13 +168,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
__ push(rdi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -384,7 +378,7 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -406,7 +400,7 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -439,7 +433,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -498,7 +492,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -562,7 +556,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -654,7 +648,7 @@ void FullCodeGenerator::SetVar(Variable* var,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
@@ -665,13 +659,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
+ PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, NULL);
@@ -1156,10 +1144,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ movq(rax, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ movq(rax, GlobalObjectOperand());
__ Move(rcx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF || var->is_qml_global())
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
__ call(ic, mode);
@@ -1241,9 +1229,9 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
- __ movq(rax, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ movq(rax, GlobalObjectOperand());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ call(ic, var->is_qml_global() ? RelocInfo::CODE_TARGET : RelocInfo::CODE_TARGET_CONTEXT);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(rax);
break;
}
@@ -1689,14 +1677,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, prop->id());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1835,7 +1823,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(rcx, var->name());
- __ movq(rdx, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ movq(rdx, GlobalObjectOperand());
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
@@ -2100,8 +2088,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ push(Operand(rsp, arg_count * kPointerSize));
@@ -2118,12 +2105,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
__ Push(Smi::FromInt(strict_mode));
- // Push the qml mode flag
- __ Push(Smi::FromInt(is_qml_mode()));
-
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
}
@@ -2154,27 +2136,10 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i));
}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly in
- // generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
- // Push the function and resolve eval.
- __ push(rax);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
// Push a copy of the function (found below the arguments) and resolve
// eval.
__ push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- __ bind(&done);
+ EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
@@ -2192,8 +2157,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (proxy != NULL && proxy->var()->IsUnallocated()) {
// Call to a global variable. Push global object as receiver for the
// call IC lookup.
- __ push(proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
- EmitCallWithIC(expr, proxy->name(), proxy->var()->is_qml_global() ? RelocInfo::CODE_TARGET : RelocInfo::CODE_TARGET_CONTEXT);
+ __ push(GlobalObjectOperand());
+ EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
// Call to a lookup slot (dynamically introduced variable).
Label slow, done;
@@ -2293,7 +2258,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2305,7 +2271,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ JumpIfSmi(rax, if_true);
__ jmp(if_false);
@@ -2313,7 +2279,8 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2325,7 +2292,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
Split(non_negative_smi, if_true, if_false, fall_through);
@@ -2333,7 +2300,8 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2357,14 +2325,15 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
__ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2378,14 +2347,15 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2401,7 +2371,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(not_zero, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2409,7 +2379,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2485,12 +2456,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ jmp(if_true);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2504,14 +2476,15 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2525,14 +2498,15 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2546,7 +2520,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2554,8 +2528,8 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2578,14 +2552,15 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
Smi::FromInt(StackFrame::CONSTRUCT));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2601,14 +2576,15 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(rbx);
__ cmpq(rax, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in rdx and the formal
@@ -2622,8 +2598,8 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
@@ -2645,7 +2621,8 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
@@ -2705,7 +2682,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
@@ -2713,6 +2690,7 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
@@ -2725,8 +2703,8 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
@@ -2770,9 +2748,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2782,9 +2761,10 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2795,7 +2775,8 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -2813,8 +2794,9 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2824,7 +2806,8 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); // Load the object.
@@ -2851,7 +2834,8 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
@@ -2863,7 +2847,8 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2881,7 +2866,8 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -2928,7 +2914,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -2977,7 +2964,8 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -2989,7 +2977,8 @@ void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -3001,10 +2990,11 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3012,10 +3002,11 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3023,10 +3014,11 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3034,8 +3026,9 @@ void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3043,7 +3036,8 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
@@ -3062,8 +3056,9 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3073,7 +3068,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3168,7 +3164,8 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3224,7 +3221,8 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
Register right = rax;
@@ -3262,7 +3260,8 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3276,7 +3275,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ testl(FieldOperand(rax, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ j(zero, if_true);
__ jmp(if_false);
@@ -3284,7 +3283,8 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3300,10 +3300,11 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, return_result, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// We will leave the separator on the stack until the end of the function.
VisitForStackValue(args->at(1));
@@ -3642,7 +3643,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// but "delete this" is allowed.
ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->IsUnallocated()) {
- __ push(var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ push(GlobalObjectOperand());
__ Push(var->name());
__ Push(Smi::FromInt(kNonStrictMode));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
@@ -3682,17 +3683,41 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
} else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+ } else {
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ }
+ __ jmp(&done, Label::kNear);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+ } else {
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ }
+ __ bind(&done);
}
break;
}
@@ -3940,7 +3965,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
__ Move(rcx, proxy->name());
- __ movq(rax, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ movq(rax, GlobalObjectOperand());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
@@ -3964,12 +3989,13 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(rax);
} else {
// This expression cannot throw a reference error at the top level.
- VisitInCurrentContext(expr);
+ VisitInDuplicateContext(expr);
}
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
Handle<String> check) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -3979,9 +4005,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
&if_true, &if_false, &fall_through);
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(expr);
+ VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(rax, if_true);
@@ -4065,7 +4091,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
break;
@@ -4074,7 +4100,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ testq(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
@@ -4126,7 +4152,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ testq(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
@@ -4149,7 +4175,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
index 2a95fee..38a8c18 100644
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
+++ b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
@@ -196,13 +196,12 @@ bool LCodeGen::GeneratePrologue() {
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
__ push(rdi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -2611,7 +2610,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, instr->qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ movq(result, GlobalObjectOperand());
}
@@ -3050,7 +3049,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- RelocInfo::Mode mode = instr->qml_global()?RelocInfo::CODE_TARGET:RelocInfo::CODE_TARGET_CONTEXT;
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ Move(rcx, instr->name());
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.cc b/src/3rdparty/v8/src/x64/lithium-x64.cc
index 9e2731f..598f890 100644
--- a/src/3rdparty/v8/src/x64/lithium-x64.cc
+++ b/src/3rdparty/v8/src/x64/lithium-x64.cc
@@ -745,7 +745,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasSideEffects()) {
+ if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -757,7 +757,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
// Thus we still need to attach environment to this call even if
// call sequence can not deoptimize eagerly.
bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
@@ -815,28 +816,6 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
}
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-}
-
-
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
@@ -1123,7 +1102,7 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new LGlobalObject(instr->qml_global()));
+ return DefineAsRegister(new LGlobalObject);
}
@@ -1193,7 +1172,7 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallGlobal(instr->qml_global()), rax), instr);
+ return MarkAsCall(DefineFixed(new LCallGlobal, rax), instr);
}
@@ -1239,8 +1218,24 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
}
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new LBitI(left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
}
@@ -1253,16 +1248,6 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
}
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.h b/src/3rdparty/v8/src/x64/lithium-x64.h
index 8f1a3d8..b542071 100644
--- a/src/3rdparty/v8/src/x64/lithium-x64.h
+++ b/src/3rdparty/v8/src/x64/lithium-x64.h
@@ -793,18 +793,15 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
+ LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- Token::Value op() const { return op_; }
+ Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
};
@@ -1303,13 +1300,7 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LGlobalObject(bool qml_global) : qml_global_(qml_global) {}
-
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
};
@@ -1399,16 +1390,10 @@ class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {}
-
virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
};
@@ -2193,7 +2178,6 @@ class LChunkBuilder BASE_EMBEDDED {
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
index e3d4634..5b81fa6 100644
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
+++ b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
@@ -530,33 +530,12 @@ void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
}
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- MaybeObject* result = stub->TryGetCode();
- if (!result->IsFailure()) {
- call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
- RelocInfo::CODE_TARGET);
- }
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
- MaybeObject* result = stub->TryGetCode();
- if (!result->IsFailure()) {
- jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
- RelocInfo::CODE_TARGET);
- }
- return result;
-}
-
-
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
@@ -610,12 +589,6 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
}
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
- int num_arguments) {
- return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments) {
// If the expected number of arguments of the runtime function is
@@ -637,26 +610,6 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
}
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
- int num_arguments) {
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- // Since we did not call the stub, there was no allocation failure.
- // Return some non-failure object.
- return HEAP->undefined_value();
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
- return TryCallStub(&ces);
-}
-
-
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments) {
Set(rax, num_arguments);
@@ -686,24 +639,6 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : argument num_arguments - 1
- // ...
- // -- rsp[8 * num_arguments] : argument 0 (receiver)
- // -----------------------------------
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- return TryJumpToExternalReference(ext, result_size);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -713,15 +648,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- return TryTailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
static int Offset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
// Check that fits into int.
@@ -744,8 +670,8 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
}
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ApiFunction* function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+ int stack_space) {
Label empty_result;
Label prologue;
Label promote_scheduled_exception;
@@ -775,8 +701,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
// Call the api function!
- movq(rax,
- reinterpret_cast<int64_t>(function->address()),
+ movq(rax, reinterpret_cast<int64_t>(function_address),
RelocInfo::RUNTIME_ENTRY);
call(rax);
@@ -808,11 +733,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
- 0, 1);
- if (result->IsFailure()) {
- return result;
- }
+ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
bind(&empty_result);
// It was zero; the result is undefined.
@@ -833,8 +754,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
call(rax);
movq(rax, prev_limit_reg);
jmp(&leave_exit_frame);
-
- return result;
}
@@ -847,15 +766,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& ext, int result_size) {
- // Set the entry point and jump to the C entry runtime stub.
- LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- return TryTailCallStub(&ces);
-}
-
-
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -3112,7 +3022,7 @@ void MacroAssembler::InvokeFunction(Register function,
}
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -3121,7 +3031,7 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- Move(rdi, Handle<JSFunction>(function));
+ Move(rdi, function);
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.h b/src/3rdparty/v8/src/x64/macro-assembler-x64.h
index 5caa6cf..3a655ef 100644
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.h
+++ b/src/3rdparty/v8/src/x64/macro-assembler-x64.h
@@ -357,7 +357,7 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- void InvokeFunction(JSFunction* function,
+ void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -1107,19 +1107,9 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@@ -1129,19 +1119,9 @@ class MacroAssembler: public Assembler {
// Call a runtime function and save the value of XMM registers.
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
- // Call a runtime function, returning the CodeStub object called.
- // Try to generate the stub code if necessary. Do not perform a GC
- // but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
- int num_arguments);
-
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments);
- // Convenience function: Same as above, but takes the fid instead.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
- int num_arguments);
-
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
@@ -1153,38 +1133,26 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
- MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext, int result_size);
- // Jump to a runtime routine.
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext,
- int result_size);
-
- // Prepares stack to put arguments (aligns and so on).
- // WIN64 calling convention requires to put the pointer to the return value
- // slot into rcx (rcx must be preserverd until TryCallApiFunctionAndReturn).
- // Saves context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
+ // Prepares stack to put arguments (aligns and so on). WIN64 calling
+ // convention requires to put the pointer to the return value slot into
+ // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
+ // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
// inside the exit frame (not GCed) accessible via StackSpaceOperand.
void PrepareCallApiFunction(int arg_stack_space);
- // Calls an API function. Allocates HandleScope, extracts
- // returned value from handle and propagates exceptions.
- // Clobbers r14, r15, rbx and caller-save registers. Restores context.
- // On return removes stack_space * kPointerSize (GCed).
- MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
- ApiFunction* function, int stack_space);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Clobbers r14, r15, rbx and
+ // caller-save registers. Restores context. On return removes
+ // stack_space * kPointerSize (GCed).
+ void CallApiFunctionAndReturn(Address function_address, int stack_space);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -1418,11 +1386,6 @@ static inline Operand GlobalObjectOperand() {
}
-static inline Operand QmlGlobalObjectOperand() {
- return ContextOperand(rsi, Context::QML_GLOBAL_INDEX);
-}
-
-
// Provides access to exit frame stack space (not GCed).
static inline Operand StackSpaceOperand(int index) {
#ifdef _WIN64
diff --git a/src/3rdparty/v8/src/x64/stub-cache-x64.cc b/src/3rdparty/v8/src/x64/stub-cache-x64.cc
index 8af1bf2..867c71a 100644
--- a/src/3rdparty/v8/src/x64/stub-cache-x64.cc
+++ b/src/3rdparty/v8/src/x64/stub-cache-x64.cc
@@ -128,60 +128,6 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateDictionaryNegativeLookup(
- MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register r0,
- Register r1) {
- ASSERT(name->IsSymbol());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ testb(FieldOperand(r0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = r0;
- __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, miss_label);
-
- Label done;
- MaybeObject* result = StringDictionaryLookupStub::TryGenerateNegativeLookup(
- masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
- if (result->IsFailure()) return result;
-
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-
- return result;
-}
-
-
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@@ -259,7 +205,10 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ Move(prototype, isolate->global());
@@ -267,8 +216,8 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
prototype);
__ j(not_equal, miss);
// Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(isolate->global_context()->get(index));
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->global_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -383,11 +332,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
- JSObject* holder_obj) {
+ Handle<JSObject> holder_obj) {
__ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
- __ Move(kScratchRegister, Handle<Object>(interceptor));
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ __ Move(kScratchRegister, interceptor);
__ push(kScratchRegister);
__ push(receiver);
__ push(holder);
@@ -395,11 +344,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
}
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
@@ -453,9 +403,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : object passing the type check
@@ -470,29 +420,25 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
// -- rsp[(argc + 4) * 8] : receiver
// -----------------------------------
// Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ Move(rdi, Handle<JSFunction>(function));
+ Handle<JSFunction> function = optimization.constant_function();
+ __ Move(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Pass the additional arguments.
__ movq(Operand(rsp, 2 * kPointerSize), rdi);
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ Move(rcx, api_call_info_handle);
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data());
+ if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+ __ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
__ movq(Operand(rsp, 3 * kPointerSize), rbx);
} else {
- __ Move(Operand(rsp, 3 * kPointerSize), Handle<Object>(call_data));
+ __ Move(Operand(rsp, 3 * kPointerSize), call_data);
}
// Prepare arguments.
__ lea(rbx, Operand(rsp, 3 * kPointerSize));
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
#ifdef _WIN64
// Win64 uses first register--rcx--for returned value.
Register arguments_arg = rdx;
@@ -515,12 +461,11 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm->TryCallApiFunctionAndReturn(&fun,
- argc + kFastApiCallArguments + 1);
+
+ // Function address is a foreign pointer outside V8's heap.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ CallApiFunctionAndReturn(function_address,
+ argc + kFastApiCallArguments + 1);
}
@@ -535,16 +480,16 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name_(name),
extra_ic_state_(extra_ic_state) {}
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ void Compile(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -552,45 +497,27 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
-
if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+ holder, lookup, name, optimization, miss);
} else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value(); // Success.
+ CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+ name, holder, miss);
}
}
private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ void CompileCacheable(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -599,16 +526,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
!lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(
+ object, interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(
+ interceptor_holder, Handle<JSObject>(lookup->holder()));
}
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
+ can_do_fast_api_call =
+ depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
Counters* counters = masm->isolate()->counters();
@@ -624,9 +549,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -639,10 +564,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
+ Handle<JSObject>(lookup->holder()),
+ scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -653,10 +579,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- MaybeObject* result = GenerateFastApiCall(masm,
- optimization,
- arguments_.immediate());
- if (result->IsFailure()) return result;
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -677,33 +600,27 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm, scratch1);
}
-
- return masm->isolate()->heap()->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
- JSObject* interceptor_holder,
+ Handle<String> name,
+ Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
+ scratch1, scratch2, scratch3,
+ name, miss_label);
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
+ PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -719,7 +636,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- JSObject* holder_obj,
+ Handle<JSObject> holder_obj,
Label* interceptor_succeeded) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -758,10 +675,9 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Code* code = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -867,28 +783,6 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
- ASSERT(cell->value()->IsTheHole());
- __ Move(scratch, Handle<Object>(cell));
- __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, miss);
- return cell;
-}
-
-
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
static void GenerateCheckPropertyCells(MacroAssembler* masm,
@@ -910,35 +804,6 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
}
-
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCells(
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- Register scratch,
- Label* miss) {
- JSObject* current = object;
- while (current != holder) {
- if (current->IsGlobalObject()) {
- // Returns a cell or a failure.
- MaybeObject* result = TryGenerateCheckPropertyCell(
- masm,
- GlobalObject::cast(current),
- name,
- scratch,
- miss);
- if (result->IsFailure()) return result;
- }
- ASSERT(current->IsJSObject());
- current = JSObject::cast(current->GetPrototype());
- }
- return NULL;
-}
-
-
#undef __
#define __ ACCESS_MASM((masm()))
@@ -1055,148 +920,6 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-Register StubCompiler::CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- String* name,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg. On the first
- // iteration, reg is an alias for object_reg, on later iterations,
- // it is an alias for holder_reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), object_reg);
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- JSObject* current = object;
- while (current != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- JSObject* prototype = JSObject::cast(current->GetPrototype());
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- MaybeObject* lookup_result = heap()->LookupSymbol(name);
- if (lookup_result->IsFailure()) {
- set_failure(Failure::cast(lookup_result));
- return reg;
- } else {
- name = String::cast(lookup_result->ToObjectUnchecked());
- }
- }
- ASSERT(current->property_dictionary()->FindEntry(name) ==
- StringDictionary::kNotFound);
-
- MaybeObject* negative_lookup =
- TryGenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- if (negative_lookup->IsFailure()) {
- set_failure(Failure::cast(negative_lookup));
- return reg;
- }
-
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ Cmp(scratch1, Handle<Map>(current->map()));
- // Branch on the result of the map check.
- __ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-
- } else {
- // Check the map of the current object.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- // Branch on the result of the map check.
- __ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ Move(reg, Handle<JSObject>(prototype));
- }
-
- if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
-
- // Check the holder map.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
- __ j(not_equal, miss);
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object and return
- // the holder register.
- ASSERT(current == holder);
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- MaybeObject* result = TryGenerateCheckPropertyCells(masm(),
- object,
- holder,
- name,
- scratch1,
- miss);
- if (result->IsFailure()) set_failure(Failure::cast(result));
-
- // Return the register containing the holder.
- return reg;
-}
-
-
void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1219,25 +942,22 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
}
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
-
- Handle<AccessorInfo> callback_handle(callback);
+ Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+ scratch2, scratch3, name, miss);
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch2.is(reg));
@@ -1245,11 +965,11 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ push(receiver); // receiver
__ push(reg); // holder
- if (heap()->InNewSpace(callback_handle->data())) {
- __ Move(scratch1, callback_handle);
+ if (heap()->InNewSpace(callback->data())) {
+ __ Move(scratch1, callback);
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data
} else {
- __ Push(Handle<Object>(callback_handle->data()));
+ __ Push(Handle<Object>(callback->data()));
}
__ push(name_reg); // name
// Save a pointer to where we pushed the arguments pointer.
@@ -1268,10 +988,6 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ movq(name_arg, rsp);
__ push(scratch2); // Restore return address.
- // Do call through the api.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
// 3 elements array for v8::Agruments::values_ and handler for name.
const int kStackSpace = 4;
@@ -1288,11 +1004,8 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
// could be used to pass arguments.
__ lea(accessor_info_arg, StackSpaceOperand(0));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ __ CallApiFunctionAndReturn(getter_address, kStackSpace);
}
@@ -1318,15 +1031,15 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1342,9 +1055,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
+ lookup->GetCallbackObject()->IsAccessorInfo()) {
+ compile_followup_inline =
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
}
}
@@ -1398,10 +1111,10 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
- lookup->holder(),
+ Handle<JSObject>(lookup->holder()),
scratch1,
scratch2,
scratch3,
@@ -1420,9 +1133,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
+ Handle<AccessorInfo> callback(
+ AccessorInfo::cast(lookup->GetCallbackObject()));
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
@@ -1431,7 +1143,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ pop(scratch2); // return address
__ push(receiver);
__ push(holder_reg);
- __ Move(holder_reg, Handle<AccessorInfo>(callback));
+ __ Move(holder_reg, callback);
__ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
__ push(holder_reg);
__ push(name_reg);
@@ -1468,9 +1180,9 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1483,7 +1195,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(rdx, miss);
}
@@ -1492,15 +1204,16 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
}
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Label* miss) {
// Get the value from the cell.
- __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
+ __ Move(rdi, cell);
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (heap()->InNewSpace(function)) {
+ if (heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1513,11 +1226,10 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
// Check the shared function info. Make sure it hasn't changed.
__ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
__ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
- __ j(not_equal, miss);
} else {
- __ Cmp(rdi, Handle<JSFunction>(function));
- __ j(not_equal, miss);
+ __ Cmp(rdi, function);
}
+ __ j(not_equal, miss);
}
@@ -1530,20 +1242,6 @@ void CallStubCompiler::GenerateMissBranch() {
}
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGenerateMissBranch() {
- MaybeObject* maybe_obj =
- isolate()->stub_cache()->TryComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
-}
-
-
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
int index,
@@ -1602,11 +1300,12 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1616,11 +1315,10 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -1629,14 +1327,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
- CheckPrototypes(JSObject::cast(object),
- rdx,
- holder,
- rbx,
- rax,
- rdi,
- name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
if (argc == 0) {
// Noop, return the length.
@@ -1698,8 +1390,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movq(Operand(rdx, 0), rcx);
- __ RecordWrite(
- rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ RecordWrite(rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
@@ -1779,19 +1471,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1801,11 +1493,10 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss, return_undefined, call_builtin;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -1814,9 +1505,8 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
- CheckPrototypes(JSObject::cast(object), rdx,
- holder, rbx,
- rax, rdi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1862,20 +1552,19 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1885,7 +1574,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
@@ -1893,23 +1582,21 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
-
if (kind_ == Code::CALL_IC &&
(CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
- GenerateNameCheck(Handle<String>(name), &name_miss);
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
rax,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
Register receiver = rbx;
Register index = rdi;
@@ -1922,19 +1609,19 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
+ StringCharCodeAtGenerator generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1944,22 +1631,21 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in rcx.
- __ Move(rcx, Handle<String>(name));
+ __ Move(rcx, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1969,31 +1655,28 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
-
if (kind_ == Code::CALL_IC &&
(CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
- GenerateNameCheck(Handle<String>(name), &name_miss);
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
rax,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
Register receiver = rax;
Register index = rdi;
@@ -2007,45 +1690,43 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
+ StringCharAtGenerator generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(rax, Heap::kEmptyStringRootIndex);
__ ret((argc + 1) * kPointerSize);
}
-
__ bind(&miss);
// Restore function name in rcx.
- __ Move(rcx, Handle<String>(name));
+ __ Move(rcx, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -2054,25 +1735,23 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ const int argc = arguments().immediate();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
__ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2087,12 +1766,12 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Convert the smi code to uint16.
__ SmiAndConstant(code, code, Smi::FromInt(0xffff));
- StringCharFromCodeGenerator char_from_code_generator(code, rax);
- char_from_code_generator.GenerateFast(masm());
+ StringCharFromCodeGenerator generator(code, rax);
+ generator.GenerateFast(masm());
__ ret(2 * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
@@ -2105,29 +1784,30 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ bind(&miss);
// rcx: function name.
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// TODO(872): implement this.
- return heap()->undefined_value();
+ return Handle<Code>::null();
}
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -2136,28 +1816,25 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ const int argc = arguments().immediate();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
__ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
-
// Load the (only) argument into rax.
__ movq(rax, Operand(rsp, 1 * kPointerSize));
@@ -2222,34 +1899,32 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ bind(&miss);
// rcx: function name.
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- if (!object->IsJSObject()) return heap()->undefined_value();
+ if (object->IsGlobalObject()) return Handle<Code>::null();
+ if (!cell.is_null()) return Handle<Code>::null();
+ if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+ Handle<JSObject>::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(Handle<String>(name), &miss_before_stack_reserved);
+ GenerateNameCheck(name, &miss_before_stack_reserved);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2267,32 +1942,30 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
__ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, rdi, name, depth, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, depth, &miss);
// Move the return address on top of the stack.
__ movq(rax, Operand(rsp, 3 * kPointerSize));
__ movq(Operand(rsp, 0 * kPointerSize), rax);
- MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
+ GenerateFastApiCall(masm(), optimization, argc);
__ bind(&miss);
__ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
CheckType check) {
// ----------- S t a t e -------------
// rcx : function name
@@ -2305,17 +1978,15 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2331,14 +2002,13 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
Counters* counters = isolate()->counters();
- SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(counters->call_const(), 1);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, rdi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
+ rdi, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2349,28 +2019,25 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
// Check that the object is a two-byte string or a symbol.
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
__ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case NUMBER_CHECK:
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(rdx, &fast);
@@ -2380,18 +2047,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case BOOLEAN_CHECK:
+ if (function->IsBuiltin() || function->shared()->strict_mode()) {
Label fast;
// Check that the object is a boolean.
__ CompareRoot(rdx, Heap::kTrueValueRootIndex);
@@ -2402,14 +2069,15 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ } else {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
}
break;
- }
-
- default:
- UNREACHABLE();
}
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
@@ -2420,17 +2088,16 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(function);
+ return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -2441,8 +2108,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -2454,17 +2120,8 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- rdx,
- rbx,
- rdi,
- rax,
- &miss);
- if (result->IsFailure()) return result;
+ compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
+ &miss);
// Restore receiver.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2491,19 +2148,19 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle load cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -2515,23 +2172,17 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
- GenerateNameCheck(Handle<String>(name), &miss);
+ GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy.
@@ -2560,11 +2211,10 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1);
- MaybeObject* maybe_result = TryGenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return TryGetCode(NORMAL, name);
+ return GetCode(NORMAL, name);
}
@@ -2909,29 +2559,24 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
}
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
- rdi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, callback,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return TryGetCode(CALLBACKS, name);
+ return GetCode(CALLBACKS, name);
}
@@ -2955,37 +2600,27 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
}
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- rax,
- rcx,
- rdx,
- rbx,
- rdi,
- name,
- &miss);
-
+ GenerateLoadInterceptor(receiver, holder, &lookup, rax, rcx, rdx, rbx, rdi,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
@@ -3068,39 +2703,32 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
- MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
- rcx, rdi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, callback,
+ name, &miss);
__ bind(&miss);
-
__ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return TryGetCode(CALLBACKS, name);
+ return GetCode(CALLBACKS, name);
}
@@ -3120,7 +2748,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
__ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
@@ -3134,41 +2762,33 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- name,
- &miss);
+ GenerateLoadInterceptor(receiver, holder, &lookup, rdx, rax, rcx, rbx, rdi,
+ name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_interceptor(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return TryGetCode(INTERCEPTOR, name);
+ return GetCode(INTERCEPTOR, name);
}
@@ -3300,7 +2920,8 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+ Handle<JSFunction> function) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rdi : constructor
@@ -3343,12 +2964,8 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// rbx: initial map
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ shl(rcx, Immediate(kPointerSizeLog2));
- __ AllocateInNewSpace(rcx,
- rdx,
- rcx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(rcx, rdx, rcx, no_reg,
+ &generic_stub_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// rbx: initial map
@@ -3373,7 +2990,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// r9: first in-object property of the JSObject
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
+ Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
// Check if the argument assigned to the property is actually passed.
@@ -3421,10 +3038,8 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Code* code =
- isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(code, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode();