summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src')
-rwxr-xr-xdeps/v8/src/SConscript1
-rw-r--r--deps/v8/src/api.cc87
-rw-r--r--deps/v8/src/arm/assembler-arm.h40
-rw-r--r--deps/v8/src/arm/builtins-arm.cc65
-rw-r--r--deps/v8/src/arm/codegen-arm.cc62
-rw-r--r--deps/v8/src/arm/codegen-arm.h6
-rw-r--r--deps/v8/src/arm/frames-arm.cc13
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc1037
-rw-r--r--deps/v8/src/arm/ic-arm.cc20
-rw-r--r--deps/v8/src/arm/simulator-arm.cc12
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc150
-rw-r--r--deps/v8/src/assembler.cc4
-rw-r--r--deps/v8/src/assembler.h59
-rw-r--r--deps/v8/src/ast.cc10
-rw-r--r--deps/v8/src/ast.h254
-rw-r--r--deps/v8/src/bootstrapper.cc63
-rw-r--r--deps/v8/src/builtins.h2
-rw-r--r--deps/v8/src/codegen.cc4
-rw-r--r--deps/v8/src/compilation-cache.cc49
-rwxr-xr-xdeps/v8/src/compiler.cc47
-rw-r--r--deps/v8/src/compiler.h155
-rw-r--r--deps/v8/src/contexts.cc5
-rw-r--r--deps/v8/src/conversions.cc70
-rw-r--r--deps/v8/src/cpu-profiler-inl.h5
-rw-r--r--deps/v8/src/cpu-profiler.cc66
-rw-r--r--deps/v8/src/cpu-profiler.h18
-rw-r--r--deps/v8/src/data-flow.cc4
-rw-r--r--deps/v8/src/debug-debugger.js36
-rw-r--r--deps/v8/src/debug.cc19
-rw-r--r--deps/v8/src/debug.h1
-rw-r--r--deps/v8/src/disassembler.cc5
-rw-r--r--deps/v8/src/dtoa.cc7
-rw-r--r--deps/v8/src/fast-dtoa.cc347
-rw-r--r--deps/v8/src/fast-dtoa.h47
-rw-r--r--deps/v8/src/flag-definitions.h6
-rw-r--r--deps/v8/src/frames.cc52
-rw-r--r--deps/v8/src/frames.h47
-rw-r--r--deps/v8/src/full-codegen.cc273
-rw-r--r--deps/v8/src/full-codegen.h278
-rw-r--r--deps/v8/src/global-handles.cc8
-rw-r--r--deps/v8/src/globals.h10
-rw-r--r--deps/v8/src/handles.cc37
-rw-r--r--deps/v8/src/handles.h8
-rw-r--r--deps/v8/src/hashmap.h6
-rw-r--r--deps/v8/src/heap-inl.h7
-rw-r--r--deps/v8/src/heap.cc223
-rw-r--r--deps/v8/src/heap.h75
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc135
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h37
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc42
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc102
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc162
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc57
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc12
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc1138
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc43
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc18
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h5
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc266
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc2
-rw-r--r--deps/v8/src/ic.cc76
-rw-r--r--deps/v8/src/ic.h4
-rw-r--r--deps/v8/src/list.h4
-rw-r--r--deps/v8/src/liveedit.cc10
-rw-r--r--deps/v8/src/log.cc43
-rw-r--r--deps/v8/src/log.h6
-rw-r--r--deps/v8/src/mark-compact.cc40
-rw-r--r--deps/v8/src/messages.js42
-rw-r--r--deps/v8/src/mips/assembler-mips.h17
-rw-r--r--deps/v8/src/mips/codegen-mips.h8
-rw-r--r--deps/v8/src/mips/frames-mips.cc10
-rw-r--r--deps/v8/src/objects-debug.cc23
-rw-r--r--deps/v8/src/objects-inl.h71
-rw-r--r--deps/v8/src/objects.cc287
-rw-r--r--deps/v8/src/objects.h193
-rw-r--r--deps/v8/src/parser.cc167
-rw-r--r--deps/v8/src/parser.h67
-rw-r--r--deps/v8/src/profile-generator-inl.h6
-rw-r--r--deps/v8/src/profile-generator.cc32
-rw-r--r--deps/v8/src/profile-generator.h20
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc57
-rw-r--r--deps/v8/src/regexp.js8
-rw-r--r--deps/v8/src/rewriter.cc6
-rw-r--r--deps/v8/src/runtime.cc379
-rw-r--r--deps/v8/src/runtime.h2
-rwxr-xr-xdeps/v8/src/scanner.cc9
-rw-r--r--deps/v8/src/scanner.h12
-rw-r--r--deps/v8/src/scopeinfo.cc16
-rw-r--r--deps/v8/src/scopeinfo.h10
-rw-r--r--deps/v8/src/scopes.cc38
-rw-r--r--deps/v8/src/scopes.h84
-rw-r--r--deps/v8/src/spaces.cc44
-rw-r--r--deps/v8/src/spaces.h102
-rw-r--r--deps/v8/src/string-search.cc40
-rw-r--r--deps/v8/src/string-search.h773
-rw-r--r--deps/v8/src/stub-cache.cc29
-rw-r--r--deps/v8/src/stub-cache.h55
-rw-r--r--deps/v8/src/utils.h20
-rw-r--r--deps/v8/src/utils.h.orig962
-rw-r--r--deps/v8/src/v8-counters.h2
-rw-r--r--deps/v8/src/variables.cc15
-rw-r--r--deps/v8/src/variables.h16
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/x64/assembler-x64.cc54
-rw-r--r--deps/v8/src/x64/assembler-x64.h25
-rw-r--r--deps/v8/src/x64/builtins-x64.cc42
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc71
-rw-r--r--deps/v8/src/x64/codegen-x64.cc79
-rw-r--r--deps/v8/src/x64/disasm-x64.cc31
-rw-r--r--deps/v8/src/x64/frames-x64.cc14
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc1088
-rw-r--r--deps/v8/src/x64/ic-x64.cc33
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc747
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h804
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc151
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc2
116 files changed, 7077 insertions, 5694 deletions
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index e2b01aaf9..05ccae4a9 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -100,6 +100,7 @@ SOURCES = {
serialize.cc
snapshot-common.cc
spaces.cc
+ string-search.cc
string-stream.cc
stub-cache.cc
token.cc
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index e09d4c954..b7d85c68a 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -134,27 +134,27 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.new_space_size = &new_space_size;
int new_space_capacity;
heap_stats.new_space_capacity = &new_space_capacity;
- int old_pointer_space_size;
+ intptr_t old_pointer_space_size;
heap_stats.old_pointer_space_size = &old_pointer_space_size;
- int old_pointer_space_capacity;
+ intptr_t old_pointer_space_capacity;
heap_stats.old_pointer_space_capacity = &old_pointer_space_capacity;
- int old_data_space_size;
+ intptr_t old_data_space_size;
heap_stats.old_data_space_size = &old_data_space_size;
- int old_data_space_capacity;
+ intptr_t old_data_space_capacity;
heap_stats.old_data_space_capacity = &old_data_space_capacity;
- int code_space_size;
+ intptr_t code_space_size;
heap_stats.code_space_size = &code_space_size;
- int code_space_capacity;
+ intptr_t code_space_capacity;
heap_stats.code_space_capacity = &code_space_capacity;
- int map_space_size;
+ intptr_t map_space_size;
heap_stats.map_space_size = &map_space_size;
- int map_space_capacity;
+ intptr_t map_space_capacity;
heap_stats.map_space_capacity = &map_space_capacity;
- int cell_space_size;
+ intptr_t cell_space_size;
heap_stats.cell_space_size = &cell_space_size;
- int cell_space_capacity;
+ intptr_t cell_space_capacity;
heap_stats.cell_space_capacity = &cell_space_capacity;
- int lo_space_size;
+ intptr_t lo_space_size;
heap_stats.lo_space_size = &lo_space_size;
int global_handle_count;
heap_stats.global_handle_count = &global_handle_count;
@@ -166,9 +166,9 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
int destroyed_global_handle_count;
heap_stats.destroyed_global_handle_count = &destroyed_global_handle_count;
- int memory_allocator_size;
+ intptr_t memory_allocator_size;
heap_stats.memory_allocator_size = &memory_allocator_size;
- int memory_allocator_capacity;
+ intptr_t memory_allocator_capacity;
heap_stats.memory_allocator_capacity = &memory_allocator_capacity;
int objects_per_type[LAST_TYPE + 1] = {0};
heap_stats.objects_per_type = objects_per_type;
@@ -767,6 +767,12 @@ int TypeSwitch::match(v8::Handle<Value> value) {
}
+#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
+ i::Handle<i::Object> proxy = FromCData(cdata); \
+ (obj)->setter(*proxy); \
+ } while (false)
+
+
void FunctionTemplate::SetCallHandler(InvocationCallback callback,
v8::Handle<Value> data) {
if (IsDeadCheck("v8::FunctionTemplate::SetCallHandler()")) return;
@@ -776,7 +782,7 @@ void FunctionTemplate::SetCallHandler(InvocationCallback callback,
i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- obj->set_callback(*FromCData(callback));
+ SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_call_code(*obj);
@@ -792,8 +798,8 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::PropertyAttribute attributes) {
i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
ASSERT(getter != NULL);
- obj->set_getter(*FromCData(getter));
- obj->set_setter(*FromCData(setter));
+ SET_FIELD_WRAPPED(obj, set_getter, getter);
+ SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
obj->set_name(*Utils::OpenHandle(*name));
@@ -877,11 +883,13 @@ void FunctionTemplate::SetNamedInstancePropertyHandler(
i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
- if (getter != 0) obj->set_getter(*FromCData(getter));
- if (setter != 0) obj->set_setter(*FromCData(setter));
- if (query != 0) obj->set_query(*FromCData(query));
- if (remover != 0) obj->set_deleter(*FromCData(remover));
- if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator));
+
+ if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+ if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+ if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+ if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
+
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_named_property_handler(*obj);
@@ -905,11 +913,13 @@ void FunctionTemplate::SetIndexedInstancePropertyHandler(
i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
- if (getter != 0) obj->set_getter(*FromCData(getter));
- if (setter != 0) obj->set_setter(*FromCData(setter));
- if (query != 0) obj->set_query(*FromCData(query));
- if (remover != 0) obj->set_deleter(*FromCData(remover));
- if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator));
+
+ if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+ if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+ if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+ if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
+
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_indexed_property_handler(*obj);
@@ -928,7 +938,7 @@ void FunctionTemplate::SetInstanceCallAsFunctionHandler(
i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- obj->set_callback(*FromCData(callback));
+ SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_instance_call_handler(*obj);
@@ -1043,8 +1053,10 @@ void ObjectTemplate::SetAccessCheckCallbacks(
i::Factory::NewStruct(i::ACCESS_CHECK_INFO_TYPE);
i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info);
- info->set_named_callback(*FromCData(named_callback));
- info->set_indexed_callback(*FromCData(indexed_callback));
+
+ SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
+ SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
+
if (data.IsEmpty()) data = v8::Undefined();
info->set_data(*Utils::OpenHandle(*data));
@@ -2646,8 +2658,9 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
return;
}
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
- self->set_map(
- *i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
+ i::Handle<i::Map> slow_map =
+ i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
+ self->set_map(*slow_map);
self->set_elements(*pixels);
}
@@ -2701,8 +2714,9 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
}
i::Handle<i::ExternalArray> array =
i::Factory::NewExternalArray(length, array_type, data);
- self->set_map(
- *i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
+ i::Handle<i::Map> slow_map =
+ i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
+ self->set_map(*slow_map);
self->set_elements(*array);
}
@@ -4251,6 +4265,11 @@ void Debug::DebugBreak() {
}
+void Debug::CancelDebugBreak() {
+ i::StackGuard::Continue(i::DEBUGBREAK);
+}
+
+
void Debug::DebugBreakForCommand(ClientData* data) {
if (!i::V8::IsRunning()) return;
i::Debugger::EnqueueDebugCommand(data);
@@ -4433,7 +4452,7 @@ double CpuProfileNode::GetSelfSamplesCount() const {
unsigned CpuProfileNode::GetCallUid() const {
IsDeadCheck("v8::CpuProfileNode::GetCallUid");
- return reinterpret_cast<const i::ProfileNode*>(this)->entry()->call_uid();
+ return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index be9aa92f1..1c4fd6094 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -69,13 +69,13 @@ namespace internal {
//
// Core register
struct Register {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
ASSERT(is_valid());
return code_;
}
- int bit() const {
+ int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
@@ -110,17 +110,17 @@ const Register pc = { 15 };
// Single word VFP register.
struct SwVfpRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 32; }
- bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
- int code() const {
+ bool is_valid() const { return 0 <= code_ && code_ < 32; }
+ bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
ASSERT(is_valid());
return code_;
}
- int bit() const {
+ int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
- void split_code(int* vm, int* m) const {
+ void split_code(int* vm, int* m) const {
ASSERT(is_valid());
*m = code_ & 0x1;
*vm = code_ >> 1;
@@ -133,31 +133,31 @@ struct SwVfpRegister {
// Double word VFP register.
struct DwVfpRegister {
// Supporting d0 to d15, can be later extended to d31.
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- SwVfpRegister low() const {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ SwVfpRegister low() const {
SwVfpRegister reg;
reg.code_ = code_ * 2;
ASSERT(reg.is_valid());
return reg;
}
- SwVfpRegister high() const {
+ SwVfpRegister high() const {
SwVfpRegister reg;
reg.code_ = (code_ * 2) + 1;
ASSERT(reg.is_valid());
return reg;
}
- int code() const {
+ int code() const {
ASSERT(is_valid());
return code_;
}
- int bit() const {
+ int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
- void split_code(int* vm, int* m) const {
+ void split_code(int* vm, int* m) const {
ASSERT(is_valid());
*m = (code_ & 0x10) >> 4;
*vm = code_ & 0x0F;
@@ -222,13 +222,13 @@ const DwVfpRegister d15 = { 15 };
// Coprocessor register
struct CRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(CRegister creg) const { return code_ == creg.code_; }
- int code() const {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(CRegister creg) const { return code_ == creg.code_; }
+ int code() const {
ASSERT(is_valid());
return code_;
}
- int bit() const {
+ int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 8b2155816..cf2f42624 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -521,7 +521,11 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool count_constructions) {
+ // Should never count constructions for api objects.
+ ASSERT(!is_api_function || !count_constructions);
+
// Enter a construct frame.
__ EnterConstructFrame();
@@ -530,9 +534,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(r0); // Smi-tagged arguments count.
__ push(r1); // Constructor function.
- // Use r7 for holding undefined which is used in several places below.
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-
// Try to allocate the object without transitioning into C code. If any of the
// preconditions is not met, the code bails out to the runtime call.
Label rt_call, allocated;
@@ -549,7 +550,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
- // r7: undefined value
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &rt_call);
@@ -561,14 +561,35 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// instance type would be JS_FUNCTION_TYPE.
// r1: constructor function
// r2: initial map
- // r7: undefined value
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
+ __ ldrb(r4, constructor_count);
+ __ sub(r4, r4, Operand(1), SetCC);
+ __ strb(r4, constructor_count);
+ __ b(ne, &allocate);
+
+ __ Push(r1, r2);
+
+ __ push(r1); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(r2);
+ __ pop(r1);
+
+ __ bind(&allocate);
+ }
+
// Now allocate the JSObject on the heap.
// r1: constructor function
// r2: initial map
- // r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
@@ -578,7 +599,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r2: initial map
// r3: object size
// r4: JSObject (not tagged)
- // r7: undefined value
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
@@ -588,16 +608,21 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- // Fill all the in-object properties with undefined.
+ // Fill all the in-object properties with the appropriate filler.
// r1: constructor function
// r2: initial map
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
- // r7: undefined value
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
{ Label loop, entry;
+ if (count_constructions) {
+ // To allow for truncation.
+ __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
+ } else {
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ }
__ b(&entry);
__ bind(&loop);
__ str(r7, MemOperand(r5, kPointerSize, PostIndex));
@@ -617,7 +642,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r4: JSObject
// r5: start of next object (not tagged)
- // r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
@@ -637,7 +661,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: start of next object
- // r7: undefined value
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
__ AllocateInNewSpace(
r0,
@@ -652,7 +675,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: FixedArray (not tagged)
- // r7: undefined value
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
__ mov(r2, r5);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
@@ -667,10 +689,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: FixedArray (not tagged)
- // r7: undefined
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ cmp(r7, r8);
+ __ Assert(eq, "Undefined value not loaded.");
+ }
__ b(&entry);
__ bind(&loop);
__ str(r7, MemOperand(r2, kPointerSize, PostIndex));
@@ -822,13 +850,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true);
+}
+
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, false);
}
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 6ba166f44..d273e7598 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -246,7 +246,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AssertIsSpilled();
for (int i = 0; i < scope()->num_parameters(); i++) {
Variable* par = scope()->parameter(i);
- Slot* slot = par->slot();
+ Slot* slot = par->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
ASSERT(!scope()->is_global_scope()); // No params in global scope.
__ ldr(r1, frame_->ParameterAt(i));
@@ -270,7 +270,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) {
frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
- StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
+ StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
}
// Initialize the function return target after the locals are set
@@ -608,24 +608,24 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
frame_->EmitPush(r0);
}
- Variable* arguments = scope()->arguments()->var();
- Variable* shadow = scope()->arguments_shadow()->var();
- ASSERT(arguments != NULL && arguments->slot() != NULL);
- ASSERT(shadow != NULL && shadow->slot() != NULL);
+ Variable* arguments = scope()->arguments();
+ Variable* shadow = scope()->arguments_shadow();
+ ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
+ ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
JumpTarget done;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
// We have to skip storing into the arguments slot if it has
// already been written to. This can happen if the a function
// has a local variable named 'arguments'.
- LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
Register arguments = frame_->PopToRegister();
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(arguments, ip);
done.Branch(ne);
}
- StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+ StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
}
@@ -641,10 +641,10 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
Property property(&global, &key, RelocInfo::kNoPosition);
Reference ref(this, &property);
ref.GetValue();
- } else if (variable != NULL && variable->slot() != NULL) {
+ } else if (variable != NULL && variable->AsSlot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+ LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
} else {
// Anything else can be handled normally.
Load(expr);
@@ -695,7 +695,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
LoadGlobal();
ref->set_type(Reference::NAMED);
} else {
- ASSERT(var->slot() != NULL);
+ ASSERT(var->AsSlot() != NULL);
ref->set_type(Reference::SLOT);
}
} else {
@@ -1718,7 +1718,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
Load(receiver);
- LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
// At this point the top two stack elements are probably in registers
// since they were just loaded. Ensure they are in regs and get the
@@ -1950,7 +1950,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile time,
// we need to "declare" it at runtime to make sure it actually
@@ -2480,8 +2480,8 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// the bottom check of the loop condition.
TypeInfoCodeGenState type_info_scope(this,
node->is_fast_smi_loop() ?
- node->loop_variable()->slot() :
- NULL,
+ node->loop_variable()->AsSlot() :
+ NULL,
TypeInfo::Smi());
// If there is no update statement, label the top of the loop with the
@@ -2794,8 +2794,8 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// Store the caught exception in the catch variable.
Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->slot() != NULL);
- StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
+ ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
+ StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
// Remove the exception from the stack.
frame_->Drop();
@@ -3420,7 +3420,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
frame_->SpillAll();
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
if (potential_slot != NULL) {
// Generate fast case for locals that rewrite to slots.
@@ -3449,7 +3449,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
// variables. Then load the argument from the arguments
// object using keyed load.
__ ldr(r0,
- ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
r1,
r2,
slow));
@@ -3735,7 +3735,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
Comment cmnt(masm(), "[ Variable Assignment");
Variable* var = node->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
ASSERT(slot != NULL);
// Evaluate the right-hand side.
@@ -4136,14 +4136,14 @@ void CodeGenerator::VisitCall(Call* node) {
// in generated code. If we succeed, there is no need to perform a
// context lookup in the runtime system.
JumpTarget done;
- if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->slot()->type() == Slot::LOOKUP);
+ if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
JumpTarget slow;
// Prepare the stack for the call to
// ResolvePossiblyDirectEvalNoLookup by pushing the loaded
// function, the first argument to the eval call and the
// receiver.
- LoadFromGlobalSlotCheckExtensions(var->slot(),
+ LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&slow);
frame_->EmitPush(r0);
@@ -4225,8 +4225,8 @@ void CodeGenerator::VisitCall(Call* node) {
__ ldr(cp, frame_->Context());
frame_->EmitPush(r0);
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
+ } else if (var != NULL && var->AsSlot() != NULL &&
+ var->AsSlot()->type() == Slot::LOOKUP) {
// ----------------------------------
// JavaScript examples:
//
@@ -4244,7 +4244,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Generate fast case for loading functions from slots that
// correspond to local/global variables or arguments unless they
// are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->slot(),
+ EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&slow,
&done);
@@ -5928,7 +5928,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
} else if (variable != NULL) {
- Slot* slot = variable->slot();
+ Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->EmitPush(Operand(variable->name()));
@@ -6062,7 +6062,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
bool is_const = (var != NULL && var->mode() == Variable::CONST);
bool is_slot = (var != NULL && var->mode() == Variable::VAR);
- if (!is_const && is_slot && type_info(var->slot()).IsSmi()) {
+ if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
// The type info declares that this variable is always a Smi. That
// means it is a Smi both before and after the increment/decrement.
// Lets make use of that to make a very minimal count.
@@ -7207,7 +7207,7 @@ void Reference::GetValue() {
switch (type_) {
case SLOT: {
Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL);
DupIfPersist();
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
@@ -7251,7 +7251,7 @@ void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
switch (type_) {
case SLOT: {
Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
cgen_->StoreToSlot(slot, init_state);
set_unloaded();
break;
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index d4c352260..1483c0b54 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -307,9 +307,9 @@ class CodeGenerator: public AstVisitor {
int NumberOfSlot(Slot* slot);
// State
- bool has_cc() const { return cc_reg_ != al; }
- JumpTarget* true_target() const { return state_->true_target(); }
- JumpTarget* false_target() const { return state_->false_target(); }
+ bool has_cc() const { return cc_reg_ != al; }
+ JumpTarget* true_target() const { return state_->true_target(); }
+ JumpTarget* false_target() const { return state_->false_target(); }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index 47434392d..b0c099030 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -37,17 +37,8 @@ namespace v8 {
namespace internal {
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
- if (fp == 0) return NONE;
- // Compute frame type and stack pointer.
- Address sp = fp + ExitFrameConstants::kSPOffset;
-
- // Fill in the state.
- state->sp = sp;
- state->fp = fp;
- state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- ASSERT(*state->pc_address != NULL);
- return EXIT;
+Address ExitFrame::ComputeStackPointer(Address fp) {
+ return fp + ExitFrameConstants::kSPOffset;
}
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index c776d67cc..9fc0c096b 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -100,7 +100,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->slot();
+ Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -118,7 +118,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
}
- Variable* arguments = scope()->arguments()->AsVariable();
+ Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
@@ -143,9 +143,8 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ CallStub(&stub);
// Duplicate the value; move-to-slot operation might clobber registers.
__ mov(r3, r0);
- Move(arguments->slot(), r0, r1, r2);
- Slot* dot_arguments_slot =
- scope()->arguments_shadow()->AsVariable()->slot();
+ Move(arguments->AsSlot(), r0, r1, r2);
+ Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
Move(dot_arguments_slot, r3, r1, r2);
}
@@ -253,205 +252,202 @@ FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
}
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
+void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
+}
- case Expression::kEffect:
- // Nothing to do.
- break;
- case Expression::kValue:
- // Move value into place.
- switch (location_) {
- case kAccumulator:
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- break;
- case kStack:
- __ push(reg);
- break;
- }
- break;
+void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
+ codegen()->Move(result_register(), slot);
+}
- case Expression::kTest:
- // For simplicity we always test the accumulator register.
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- DoTest(true_label_, false_label_, fall_through_);
- break;
- }
+
+void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
+ codegen()->Move(result_register(), slot);
+ __ push(result_register());
}
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Nothing to do.
- break;
- case Expression::kValue:
- case Expression::kTest:
- // On ARM we have to move the value into a register to do anything
- // with it.
- Move(result_register(), slot);
- Apply(context, result_register());
- break;
- }
+void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
+ // For simplicity we always test the accumulator register.
+ codegen()->Move(result_register(), slot);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
}
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- // Nothing to do.
- case Expression::kValue:
- case Expression::kTest:
- // On ARM we have to move the value into a register to do anything
- // with it.
- __ mov(result_register(), Operand(lit->handle()));
- Apply(context, result_register());
- break;
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ __ b(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ __ b(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
}
}
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
- case Expression::kEffect:
- __ Drop(1);
- break;
- case Expression::kValue:
- switch (location_) {
- case kAccumulator:
- __ pop(result_register());
- break;
- case kStack:
- break;
- }
- break;
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ mov(result_register(), Operand(lit));
+}
- case Expression::kTest:
- __ pop(result_register());
- DoTest(true_label_, false_label_, fall_through_);
- break;
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates can be pushed directly.
+ __ mov(result_register(), Operand(lit));
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ __ b(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ __ b(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ __ b(false_label_);
+ } else {
+ __ b(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ __ b(false_label_);
+ } else {
+ __ b(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ mov(result_register(), Operand(lit));
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
}
}
-void FullCodeGenerator::DropAndApply(int count,
- Expression::Context context,
- Register reg) {
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
ASSERT(count > 0);
- ASSERT(!reg.is(sp));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
+ __ Drop(count);
+}
- case Expression::kEffect:
- __ Drop(count);
- break;
- case Expression::kValue:
- switch (location_) {
- case kAccumulator:
- __ Drop(count);
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- break;
- case kStack:
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp));
- break;
- }
- break;
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
- case Expression::kTest:
- __ Drop(count);
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- DoTest(true_label_, false_label_, fall_through_);
- break;
- }
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ str(reg, MemOperand(sp, 0));
}
-void FullCodeGenerator::Apply(Expression::Context context,
- Label* materialize_true,
- Label* materialize_false) {
- switch (context) {
- case Expression::kUninitialized:
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Move(result_register(), reg);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
- case Expression::kEffect:
- ASSERT_EQ(materialize_true, materialize_false);
- __ bind(materialize_true);
- break;
- case Expression::kValue: {
- Label done;
- switch (location_) {
- case kAccumulator:
- __ bind(materialize_true);
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(materialize_false);
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- break;
- case kStack:
- __ bind(materialize_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(&done);
- __ bind(materialize_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- break;
- }
- __ bind(&done);
- break;
- }
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT_EQ(materialize_true, materialize_false);
+ __ bind(materialize_true);
+}
- case Expression::kTest:
- break;
- }
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
-// Convert constant control flow (true or false) to the result expected for
-// a given expression context.
-void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- break;
- case Expression::kValue: {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- switch (location_) {
- case kAccumulator:
- __ LoadRoot(result_register(), value_root_index);
- break;
- case kStack:
- __ LoadRoot(ip, value_root_index);
- __ push(ip);
- break;
- }
- break;
- }
- case Expression::kTest:
- if (flag) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else {
- if (false_label_ != fall_through_) __ b(false_label_);
- }
- break;
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_false == false_label_);
+ ASSERT(materialize_true == true_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(ip, value_root_index);
+ __ push(ip);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ if (flag) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ b(false_label_);
}
}
@@ -533,7 +529,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
FunctionLiteral* function) {
Comment cmnt(masm_, "[ Declaration");
ASSERT(variable != NULL); // Must have been resolved.
- Slot* slot = variable->slot();
+ Slot* slot = variable->AsSlot();
Property* prop = variable->AsProperty();
if (slot != NULL) {
@@ -544,7 +540,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, MemOperand(fp, SlotOffset(slot)));
} else if (function != NULL) {
- VisitForValue(function, kAccumulator);
+ VisitForAccumulatorValue(function);
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
}
break;
@@ -566,7 +562,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ str(ip, ContextOperand(cp, slot->index()));
// No write barrier since the_hole_value is in old space.
} else if (function != NULL) {
- VisitForValue(function, kAccumulator);
+ VisitForAccumulatorValue(function);
__ str(result_register(), ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index());
// We know that we have written a function, which is not a smi.
@@ -593,7 +589,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
} else if (function != NULL) {
__ Push(cp, r2, r1);
// Push initial value for function declaration.
- VisitForValue(function, kStack);
+ VisitForStackValue(function);
} else {
__ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
__ Push(cp, r2, r1, r0);
@@ -607,20 +603,20 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
- VisitForValue(prop->obj(), kStack);
+ VisitForStackValue(prop->obj());
if (function != NULL) {
- VisitForValue(prop->key(), kStack);
- VisitForValue(function, kAccumulator);
+ VisitForStackValue(prop->key());
+ VisitForAccumulatorValue(function);
__ pop(r1); // Key.
} else {
- VisitForValue(prop->key(), kAccumulator);
+ VisitForAccumulatorValue(prop->key());
__ mov(r1, result_register()); // Key.
__ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
}
__ pop(r2); // Receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Value in r0 is ignored (declarations are statements).
}
}
@@ -648,7 +644,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
// Keep the switch value on the stack until a case matches.
- VisitForValue(stmt->tag(), kStack);
+ VisitForStackValue(stmt->tag());
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -668,7 +664,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
next_test.Unuse();
// Compile the label expression.
- VisitForValue(clause->label(), kAccumulator);
+ VisitForAccumulatorValue(clause->label());
// Perform the comparison as if via '==='.
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
@@ -729,7 +725,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. Both SpiderMonkey and JSC
// ignore null and undefined in contrast to the specification; see
// ECMA-262 section 12.6.4.
- VisitForValue(stmt->enumerable(), kAccumulator);
+ VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
__ b(eq, &exit);
@@ -872,13 +868,13 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
__ Push(cp, r0);
__ CallRuntime(Runtime::kNewClosure, 2);
}
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var(), context_);
+ EmitVariableLoad(expr->var());
}
@@ -927,7 +923,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
__ jmp(done);
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
if (potential_slot != NULL) {
// Generate fast case for locals that rewrite to slots.
@@ -952,11 +948,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
// variables. Then load the argument from the arguments
// object using keyed load.
__ ldr(r1,
- ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
__ mov(r0, Operand(key_literal->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ jmp(done);
}
}
@@ -1022,16 +1018,15 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, mode);
+ EmitCallIC(ic, mode);
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
+void FullCodeGenerator::EmitVariableLoad(Variable* var) {
// Four cases: non-this global variables, lookup slots, all other
// types of slots, and parameters that rewrite to explicit property
// accesses on the arguments object.
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
Property* property = var->AsProperty();
if (var->is_global() && !var->is_this()) {
@@ -1041,8 +1036,8 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ ldr(r0, CodeGenerator::GlobalObject());
__ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- Apply(context, r0);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ context()->Plug(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
Label done, slow;
@@ -1058,24 +1053,24 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ CallRuntime(Runtime::kLoadContextSlot, 2);
__ bind(&done);
- Apply(context, r0);
+ context()->Plug(r0);
} else if (slot != NULL) {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
if (var->mode() == Variable::CONST) {
- // Constants may be the hole value if they have not been initialized.
- // Unhole them.
- MemOperand slot_operand = EmitSlotSearch(slot, r0);
- __ ldr(r0, slot_operand);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- Apply(context, r0);
- } else {
- Apply(context, slot);
- }
+ // Constants may be the hole value if they have not been initialized.
+ // Unhole them.
+ MemOperand slot_operand = EmitSlotSearch(slot, r0);
+ __ ldr(r0, slot_operand);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r0, ip);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ context()->Plug(r0);
+ } else {
+ context()->Plug(slot);
+ }
} else {
Comment cmnt(masm_, "Rewritten parameter");
ASSERT_NOT_NULL(property);
@@ -1084,7 +1079,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->slot();
+ Slot* object_slot = object_var->AsSlot();
ASSERT_NOT_NULL(object_slot);
// Load the object.
@@ -1100,8 +1095,8 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Apply(context, r0);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ context()->Plug(r0);
}
}
@@ -1145,7 +1140,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// r2: temp.
__ pop(r1);
__ CopyFields(r0, r1, r2.bit(), size / kPointerSize);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -1185,11 +1180,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Fall through.
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
- VisitForValue(value, kAccumulator);
+ VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->handle()));
__ ldr(r1, MemOperand(sp));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
// Fall through.
@@ -1197,8 +1192,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Duplicate receiver on stack.
__ ldr(r0, MemOperand(sp));
__ push(r0);
- VisitForValue(key, kStack);
- VisitForValue(value, kStack);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
__ CallRuntime(Runtime::kSetProperty, 3);
break;
case ObjectLiteral::Property::GETTER:
@@ -1206,21 +1201,21 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Duplicate receiver on stack.
__ ldr(r0, MemOperand(sp));
__ push(r0);
- VisitForValue(key, kStack);
+ VisitForStackValue(key);
__ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
__ push(r1);
- VisitForValue(value, kStack);
+ VisitForStackValue(value);
__ CallRuntime(Runtime::kDefineAccessor, 4);
break;
}
}
if (result_saved) {
- ApplyTOS(context_);
+ context()->PlugTOS();
} else {
- Apply(context_, r0);
+ context()->Plug(r0);
}
}
@@ -1268,7 +1263,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(r0);
result_saved = true;
}
- VisitForValue(subexpr, kAccumulator);
+ VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
__ ldr(r1, MemOperand(sp)); // Copy of array literal.
@@ -1282,9 +1277,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
- ApplyTOS(context_);
+ context()->PlugTOS();
} else {
- Apply(context_, r0);
+ context()->Plug(r0);
}
}
@@ -1317,39 +1312,38 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
- VisitForValue(property->obj(), kAccumulator);
+ VisitForAccumulatorValue(property->obj());
__ push(result_register());
} else {
- VisitForValue(property->obj(), kStack);
+ VisitForStackValue(property->obj());
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- VisitForValue(property->obj(), kStack);
- VisitForValue(property->key(), kAccumulator);
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
} else {
- VisitForValue(property->obj(), kStack);
- VisitForValue(property->key(), kStack);
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
- Expression::kValue);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- break;
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ break;
+ }
}
Token::Value op = expr->binary_op();
@@ -1359,28 +1353,26 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
ASSERT(constant == kRightConstant || constant == kNoConstants);
if (constant == kNoConstants) {
__ push(r0); // Left operand goes on the stack.
- VisitForValue(expr->value(), kAccumulator);
+ VisitForAccumulatorValue(expr->value());
}
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT
: NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr,
op,
- Expression::kValue,
mode,
expr->target(),
expr->value(),
constant);
} else {
- EmitBinaryOp(op, Expression::kValue, mode);
+ EmitBinaryOp(op, mode);
}
- location_ = saved_location;
-
} else {
- VisitForValue(expr->value(), kAccumulator);
+ VisitForAccumulatorValue(expr->value());
}
// Record source position before possible IC call.
@@ -1390,8 +1382,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(),
- context_);
+ expr->op());
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
@@ -1409,7 +1400,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
@@ -1417,29 +1408,27 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
- Expression::Context context,
OverwriteMode mode,
Expression* left,
Expression* right,
ConstantOperand constant) {
ASSERT(constant == kNoConstants); // Only handled case.
- EmitBinaryOp(op, context, mode);
+ EmitBinaryOp(op, mode);
}
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context,
OverwriteMode mode) {
__ pop(r1);
GenericBinaryOpStub stub(op, mode, r1, r0);
__ CallStub(&stub);
- Apply(context, r0);
+ context()->Plug(r0);
}
@@ -1465,28 +1454,29 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
- EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
break;
}
case NAMED_PROPERTY: {
__ push(r0); // Preserve value.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForAccumulatorValue(prop->obj());
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
__ pop(r2);
__ pop(r0); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
}
@@ -1494,12 +1484,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op,
- Expression::Context context) {
+ Token::Value op) {
// Left-hand sides that rewrite to explicit property accesses do not reach
// here.
ASSERT(var != NULL);
- ASSERT(var->is_global() || var->slot() != NULL);
+ ASSERT(var->is_global() || var->AsSlot() != NULL);
if (var->is_global()) {
ASSERT(!var->is_this());
@@ -1509,13 +1498,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(r2, Operand(var->name()));
__ ldr(r1, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
// of const variables. Const assignments are simply skipped.
Label done;
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -1565,7 +1554,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ bind(&done);
}
- Apply(context, result_register());
+ context()->Plug(result_register());
}
@@ -1598,7 +1587,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1608,9 +1597,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
- DropAndApply(1, context_, r0);
+ context()->DropAndPlug(1, r0);
} else {
- Apply(context_, r0);
+ context()->Plug(r0);
}
}
@@ -1642,7 +1631,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1652,9 +1641,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
- DropAndApply(1, context_, r0);
+ context()->DropAndPlug(1, r0);
} else {
- Apply(context_, r0);
+ context()->Plug(r0);
}
}
@@ -1664,16 +1653,15 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForValue(expr->obj(), kAccumulator);
+ VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
- Apply(context_, r0);
} else {
- VisitForValue(expr->obj(), kStack);
- VisitForValue(expr->key(), kAccumulator);
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
__ pop(r1);
EmitKeyedPropertyLoad(expr);
- Apply(context_, r0);
}
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
@@ -1683,7 +1671,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
__ mov(r2, Operand(name));
// Record source position for debugger.
@@ -1691,10 +1679,10 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- __ Call(ic, mode);
+ EmitCallIC(ic, mode);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -1705,9 +1693,9 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
- VisitForValue(key, kAccumulator);
+ VisitForAccumulatorValue(key);
__ mov(r2, r0);
// Record source position for debugger.
SetSourcePosition(expr->position());
@@ -1715,10 +1703,10 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
in_loop);
- __ Call(ic, mode);
+ EmitCallIC(ic, mode);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -1727,7 +1715,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
// Record source position for debugger.
SetSourcePosition(expr->position());
@@ -1736,7 +1724,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
__ CallStub(&stub);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- DropAndApply(1, context_, r0);
+ context()->DropAndPlug(1, r0);
}
@@ -1750,7 +1738,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
- VisitForValue(fun, kStack);
+ VisitForStackValue(fun);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ push(r2); // Reserved receiver slot.
@@ -1758,7 +1746,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
// Push copy of the function - found below the arguments.
@@ -1790,26 +1778,26 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ CallStub(&stub);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- DropAndApply(1, context_, r0);
+ context()->DropAndPlug(1, r0);
} else if (var != NULL && !var->is_this() && var->is_global()) {
// Push global object as receiver for the call IC.
__ ldr(r0, CodeGenerator::GlobalObject());
__ push(r0);
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
+ } else if (var != NULL && var->AsSlot() != NULL &&
+ var->AsSlot()->type() == Slot::LOOKUP) {
// Call to a lookup slot (dynamically introduced variable).
Label slow, done;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLoadFromSlotFastCase(var->slot(),
+ EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&slow,
&done);
__ bind(&slow);
- // Call the runtime to find the function to call (returned in eax)
+ // Call the runtime to find the function to call (returned in r0)
// and the object holding it (returned in edx).
__ push(context_register());
__ mov(r2, Operand(var->name()));
@@ -1840,21 +1828,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
- VisitForValue(prop->obj(), kStack);
+ VisitForStackValue(prop->obj());
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed CallIC.
- VisitForValue(prop->obj(), kStack);
+ VisitForStackValue(prop->obj());
if (prop->is_synthetic()) {
- VisitForValue(prop->key(), kAccumulator);
+ VisitForAccumulatorValue(prop->key());
// Record source code position for IC call.
SetSourcePosition(prop->position());
__ pop(r1); // We do not need to keep the receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ ldr(r1, CodeGenerator::GlobalObject());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ Push(r0, r1); // Function, receiver.
@@ -1873,7 +1861,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
loop_depth() == 0) {
lit->set_try_full_codegen(true);
}
- VisitForValue(fun, kStack);
+ VisitForStackValue(fun);
// Load global receiver object.
__ ldr(r1, CodeGenerator::GlobalObject());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
@@ -1893,13 +1881,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- VisitForValue(expr->expression(), kStack);
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
// Call the construct call builtin that handles allocation and
@@ -1912,59 +1900,59 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_true);
__ b(if_false);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ tst(r0, Operand(kSmiTagMask | 0x80000000));
Split(eq, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
@@ -1981,41 +1969,41 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
Split(le, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
Split(ge, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -2023,7 +2011,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(ne, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2032,80 +2020,80 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
// this compiler.
__ jmp(if_false);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
Split(eq, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
Split(eq, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
Split(eq, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2117,8 +2105,8 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2136,7 +2124,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
Split(eq, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2144,21 +2132,21 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ pop(r1);
__ cmp(r0, r1);
Split(eq, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2166,13 +2154,13 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
- VisitForValue(args->at(0), kAccumulator);
+ // parameter count in r0.
+ VisitForAccumulatorValue(args->at(0));
__ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2194,7 +2182,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
__ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2202,7 +2190,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null.
__ BranchOnSmi(r0, &null);
@@ -2248,7 +2236,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// All done.
__ bind(&done);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2263,14 +2251,14 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2320,7 +2308,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
ExternalReference::fill_heap_number_with_random_function(), 1);
}
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2328,11 +2316,11 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
ASSERT(args->length() == 3);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallStub(&stub);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2340,19 +2328,19 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
ASSERT(args->length() == 4);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
- VisitForValue(args->at(3), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
__ CallStub(&stub);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator); // Load the object.
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
Label done;
// If the object is a smi return the object.
@@ -2363,25 +2351,25 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
__ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
__ bind(&done);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
// Load the arguments on the stack and call the runtime function.
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
__ CallRuntime(Runtime::kMath_pow, 2);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack); // Load the object.
- VisitForValue(args->at(1), kAccumulator); // Load the value.
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(r1); // r0 = value. r1 = object.
Label done;
@@ -2399,7 +2387,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ bind(&done);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2407,18 +2395,18 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label done;
StringCharFromCodeGenerator generator(r0, r1);
@@ -2429,15 +2417,15 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, r1);
+ context()->Plug(r1);
}
void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
Register object = r1;
Register index = r0;
@@ -2476,15 +2464,15 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, result);
+ context()->Plug(result);
}
void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
Register object = r1;
Register index = r0;
@@ -2525,58 +2513,58 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, result);
+ context()->Plug(result);
}
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
StringAddStub stub(NO_STRING_ADD_FLAGS);
__ CallStub(&stub);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
StringCompareStub stub;
__ CallStub(&stub);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the runtime.
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sin, 1);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the runtime.
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_cos, 1);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the runtime function.
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2584,38 +2572,38 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // For receiver and function.
- VisitForValue(args->at(0), kStack); // Receiver.
+ VisitForStackValue(args->at(0)); // Receiver.
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i + 1), kStack);
+ VisitForStackValue(args->at(i + 1));
}
- VisitForValue(args->at(arg_count + 1), kAccumulator); // Function.
+ VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
// InvokeFunction requires function in r1. Move it in there.
if (!result_register().is(r1)) __ mov(r1, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(r1, count, CALL_FUNCTION);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kRegExpConstructResult, 3);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kSwapElements, 3);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2630,11 +2618,11 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- Apply(context_, r0);
+ context()->Plug(r0);
return;
}
- VisitForValue(args->at(1), kAccumulator);
+ VisitForAccumulatorValue(args->at(1));
Register key = r0;
Register cache = r1;
@@ -2666,7 +2654,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
__ CallRuntime(Runtime::kGetFromCache, 2);
__ bind(&done);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2678,8 +2666,8 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
Register tmp = r2;
Register tmp2 = r3;
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
__ pop(left);
Label done, fail, ok;
@@ -2707,19 +2695,19 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ LoadRoot(r0, Heap::kTrueValueRootIndex);
__ bind(&done);
- Apply(context_, r0);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
@@ -2727,16 +2715,16 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ b(eq, if_true);
__ b(if_false);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ IndexFromHash(r0, r0);
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2761,7 +2749,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
if (expr->is_jsruntime()) {
@@ -2769,14 +2757,14 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ mov(r2, Operand(expr->name()));
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
}
- Apply(context_, r0);
+ context()->Plug(r0);
}
@@ -2790,20 +2778,20 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
- Apply(context_, true);
+ context()->Plug(true);
} else if (var != NULL &&
!var->is_global() &&
- var->slot() != NULL &&
- var->slot()->type() != Slot::LOOKUP) {
+ var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- Apply(context_, false);
+ context()->Plug(false);
} else {
// Property or variable reference. Call the delete builtin with
// object and property name as arguments.
if (prop != NULL) {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
} else if (var->is_global()) {
__ ldr(r1, CodeGenerator::GlobalObject());
__ mov(r0, Operand(var->name()));
@@ -2820,7 +2808,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ push(r2);
}
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
- Apply(context_, r0);
+ context()->Plug(r0);
}
break;
}
@@ -2828,26 +2816,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
VisitForEffect(expr->expression());
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- break;
- case Expression::kValue:
- __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- break;
- case Expression::kTest:
- __ jmp(false_label_);
- break;
- }
+ context()->Plug(Heap::kUndefinedValueRootIndex);
break;
}
@@ -2859,31 +2828,33 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label* fall_through = NULL;
// Notice that the labels are swapped.
- PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
- Apply(context_, if_false, if_true); // Labels swapped.
+ context()->Plug(if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- VisitForTypeofValue(expr->expression(), kStack);
+ { StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
__ CallRuntime(Runtime::kTypeof, 1);
- Apply(context_, r0);
+ context()->Plug(r0);
break;
}
case Token::ADD: {
Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
Label no_conversion;
__ tst(result_register(), Operand(kSmiTagMask));
__ b(eq, &no_conversion);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
__ bind(&no_conversion);
- Apply(context_, result_register());
+ context()->Plug(result_register());
break;
}
@@ -2897,9 +2868,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
NO_UNARY_FLAGS);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register r0.
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
__ CallStub(&stub);
- Apply(context_, r0);
+ context()->Plug(r0);
break;
}
@@ -2907,7 +2878,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
// The generic unary operation stub expects the argument to be
// in the accumulator register r0.
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
Label done;
bool inline_smi_code = ShouldInlineSmiCase(expr->op());
if (inline_smi_code) {
@@ -2928,7 +2899,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
__ CallStub(&stub);
__ bind(&done);
- Apply(context_, r0);
+ context()->Plug(r0);
break;
}
@@ -2964,25 +2935,22 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
- Expression::kValue);
- location_ = saved_location;
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
} else {
// Reserve space for result of postfix operation.
- if (expr->is_postfix() && context_ != Expression::kEffect) {
+ if (expr->is_postfix() && !context()->IsEffect()) {
__ mov(ip, Operand(Smi::FromInt(0)));
__ push(ip);
}
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the accumulator.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForAccumulatorValue(prop->obj());
__ push(r0);
EmitNamedPropertyLoad(prop);
} else {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
EmitKeyedPropertyLoad(prop);
@@ -2998,29 +2966,21 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Save result for postfix expressions.
if (expr->is_postfix()) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Do not save result.
- break;
- case Expression::kValue:
- case Expression::kTest:
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- break;
- case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- }
- break;
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r0);
+ break;
+ case NAMED_PROPERTY:
+ __ str(r0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
}
}
@@ -3047,31 +3007,31 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
switch (assign_type) {
case VARIABLE:
if (expr->is_postfix()) {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN,
- Expression::kEffect);
- // For all contexts except kEffect: We have the result on
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ }
+ // For all contexts except EffectConstant We have the result on
// top of the stack.
- if (context_ != Expression::kEffect) {
- ApplyTOS(context_);
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
}
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN,
- context_);
+ Token::ASSIGN);
}
break;
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
__ pop(r1);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
- if (context_ != Expression::kEffect) {
- ApplyTOS(context_);
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
}
} else {
- Apply(context_, r0);
+ context()->Plug(r0);
}
break;
}
@@ -3079,13 +3039,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(r1); // Key.
__ pop(r2); // Receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
- if (context_ != Expression::kEffect) {
- ApplyTOS(context_);
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
}
} else {
- Apply(context_, r0);
+ context()->Plug(r0);
}
break;
}
@@ -3093,7 +3053,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
Comment cmnt(masm_, "Global variable");
@@ -3102,16 +3064,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
- __ Call(ic, RelocInfo::CODE_TARGET);
- if (where == kStack) __ push(r0);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ context()->Plug(r0);
} else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
+ proxy->var()->AsSlot() != NULL &&
+ proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- Slot* slot = proxy->var()->slot();
+ Slot* slot = proxy->var()->AsSlot();
EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
@@ -3120,10 +3082,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
__ bind(&done);
- if (where == kStack) __ push(r0);
+ context()->Plug(r0);
} else {
// This expression cannot throw a reference error at the top level.
- VisitForValue(expr, where);
+ Visit(expr);
}
}
@@ -3145,7 +3107,9 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
Handle<String> check = Handle<String>::cast(right_literal_value);
- VisitForTypeofValue(left_unary->expression(), kAccumulator);
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(left_unary->expression());
+ }
if (check->Equals(Heap::number_symbol())) {
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, if_true);
@@ -3231,8 +3195,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3240,14 +3204,14 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Expression* left = expr->left();
Expression* right = expr->right();
if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
return;
}
- VisitForValue(expr->left(), kStack);
+ VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
- VisitForValue(expr->right(), kStack);
+ VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_JS);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
@@ -3255,7 +3219,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
break;
case Token::INSTANCEOF: {
- VisitForValue(expr->right(), kStack);
+ VisitForStackValue(expr->right());
InstanceofStub stub;
__ CallStub(&stub);
// The stub returns 0 for true.
@@ -3265,7 +3229,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
default: {
- VisitForValue(expr->right(), kAccumulator);
+ VisitForAccumulatorValue(expr->right());
Condition cc = eq;
bool strict = false;
switch (op) {
@@ -3323,7 +3287,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Convert the result of the comparison into one expected for this
// expression's context.
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -3333,10 +3297,10 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
if (expr->is_strict()) {
@@ -3355,20 +3319,31 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
__ cmp(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through);
}
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- Apply(context_, r0);
+ context()->Plug(r0);
}
-Register FullCodeGenerator::result_register() { return r0; }
+Register FullCodeGenerator::result_register() {
+ return r0;
+}
-Register FullCodeGenerator::context_register() { return cp; }
+Register FullCodeGenerator::context_register() {
+ return cp;
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+ ASSERT(mode == RelocInfo::CODE_TARGET ||
+ mode == RelocInfo::CODE_TARGET_CONTEXT);
+ __ Call(ic, mode);
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 1a76db2ce..eab4c6e8e 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -967,6 +967,14 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
}
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+ Object* map,
+ Object* cell) {
+ // TODO(<bug#>): implement this.
+ return false;
+}
+
+
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// Find the end of the inlined code for the store if there is an
// inlined version of the store.
@@ -1236,7 +1244,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- r1 : receiver
// -----------------------------------
Label miss;
- Label index_out_of_range;
Register receiver = r1;
Register index = r0;
@@ -1251,7 +1258,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
result,
&miss, // When not a string.
&miss, // When not a number.
- &index_out_of_range,
+ &miss, // When index out of range.
STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm);
__ Ret();
@@ -1259,10 +1266,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Ret();
-
__ bind(&miss);
GenerateMiss(masm);
}
@@ -1581,8 +1584,9 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Check that the receiver isn't a smi.
__ BranchOnSmi(r1, &slow);
- // Check that the key is a smi.
- __ BranchOnNotSmi(r0, &slow);
+ // Check that the key is an array index, that is Uint32.
+ __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
+ __ b(ne, &slow);
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 64262b2b8..40be9bb81 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -294,7 +294,7 @@ void Debugger::Debug() {
} else if (GetVFPSingleValue(arg1, &svalue)) {
PrintF("%s: %f \n", arg1, svalue);
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
- PrintF("%s: %lf \n", arg1, dvalue);
+ PrintF("%s: %f \n", arg1, dvalue);
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -349,7 +349,8 @@ void Debugger::Debug() {
end = cur + words;
while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d\n", cur, *cur, *cur);
+ PrintF(" 0x%08x: 0x%08x %10d\n",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
cur++;
}
} else if (strcmp(cmd, "disasm") == 0) {
@@ -382,7 +383,8 @@ void Debugger::Debug() {
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n", cur, buffer.start());
+ PrintF(" 0x%08x %s\n",
+ reinterpret_cast<intptr_t>(cur), buffer.start());
cur += Instr::kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
@@ -1061,7 +1063,7 @@ uintptr_t Simulator::StackLimit() const {
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instr* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
- instr, format);
+ reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED();
}
@@ -2650,7 +2652,7 @@ void Simulator::InstructionDecode(Instr* instr) {
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
reinterpret_cast<byte*>(instr));
- PrintF(" 0x%08x %s\n", instr, buffer.start());
+ PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
}
if (instr->ConditionField() == special_condition) {
DecodeUnconditional(instr);
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 0da5f6469..97f94952d 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -266,7 +266,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype) {
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ // Check we're still in the same context.
+ __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ Move(ip, Top::global());
+ __ cmp(prototype, ip);
+ __ b(ne, miss);
// Get the global function with the given index.
JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
@@ -1434,7 +1439,8 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- r0);
+ r0,
+ &miss);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
r1, r3, r4, name, &miss);
@@ -1505,7 +1511,8 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- r0);
+ r0,
+ &miss);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
r1, r3, r4, name, &miss);
@@ -1626,6 +1633,118 @@ Object* CallStubCompiler::CompileStringFromCharCodeCall(
}
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // TODO(872): implement this.
+ return Heap::undefined_value();
+}
+
+
+Object* CallStubCompiler::CompileMathAbsCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r2 : function name
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into r0.
+ __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ BranchOnNotSmi(r0, &not_smi);
+
+ // Do bitwise not or do nothing depending on the sign of the
+ // argument.
+ __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
+
+ // Add 1 or do nothing depending on the sign of the argument.
+ __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
+
+ // If the result is still negative, go to the slow case.
+ // This only happens for the most negative smi.
+ Label slow;
+ __ b(mi, &slow);
+
+ // Smi case done.
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // Check if the argument is a heap number and load its exponent and
+ // sign.
+ __ bind(&not_smi);
+ __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+
+ // Check the sign of the argument. If the argument is positive,
+ // just return it.
+ Label negative_sign;
+ __ tst(r1, Operand(HeapNumber::kSignMask));
+ __ b(ne, &negative_sign);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // If the argument is negative, clear the sign, and return a new
+ // number.
+ __ bind(&negative_sign);
+ __ eor(r1, r1, Operand(HeapNumber::kSignMask));
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
+ __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+ __ bind(&miss);
+ // r2: function name.
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -1705,7 +1824,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ b(hs, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r0);
+ masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, r4, name, &miss);
}
@@ -1725,7 +1844,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r0);
+ masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, r4, name, &miss);
}
@@ -1748,7 +1867,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r0);
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, r4, name, &miss);
}
@@ -2067,7 +2186,10 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
name,
r1,
&miss);
- if (cell->IsFailure()) return cell;
+ if (cell->IsFailure()) {
+ miss.Unuse();
+ return cell;
+ }
}
// Return undefined if maps of the full prototype chain are still the
@@ -2117,7 +2239,10 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
callback, name, &miss, &failure);
- if (!success) return failure;
+ if (!success) {
+ miss.Unuse();
+ return failure;
+ }
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2212,11 +2337,11 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
__ mov(r0, r4);
- __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
+ __ IncrementCounter(&Counters::named_load_global_stub, 1, r1, r3);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3);
+ __ IncrementCounter(&Counters::named_load_global_stub_miss, 1, r1, r3);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
@@ -2265,7 +2390,10 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4,
callback, name, &miss, &failure);
- if (!success) return failure;
+ if (!success) {
+ miss.Unuse();
+ return failure;
+ }
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 6a46f6159..b6efdb97e 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -465,7 +465,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
void RelocInfo::Print() {
PrintF("%p %s", pc_, RelocModeName(rmode_));
if (IsComment(rmode_)) {
- PrintF(" (%s)", data_);
+ PrintF(" (%s)", reinterpret_cast<char*>(data_));
} else if (rmode_ == EMBEDDED_OBJECT) {
PrintF(" (");
target_object()->ShortPrint();
@@ -479,7 +479,7 @@ void RelocInfo::Print() {
Code* code = Code::GetCodeFromTargetAddress(target_address());
PrintF(" (%s) (%p)", Code::Kind2String(code->kind()), target_address());
} else if (IsPosition(rmode_)) {
- PrintF(" (%d)", data());
+ PrintF(" (%" V8_PTR_PREFIX "d)", data());
}
PrintF("\n");
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 157743359..d28bf4370 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -57,7 +57,7 @@ class Label BASE_EMBEDDED {
INLINE(void Unuse()) { pos_ = 0; }
- INLINE(bool is_bound() const) { return pos_ < 0; }
+ INLINE(bool is_bound() const) { return pos_ < 0; }
INLINE(bool is_unused() const) { return pos_ == 0; }
INLINE(bool is_linked() const) { return pos_ > 0; }
@@ -92,6 +92,57 @@ class Label BASE_EMBEDDED {
// -----------------------------------------------------------------------------
+// NearLabels are labels used for short jumps (in Intel jargon).
+// NearLabels should be used if it can be guaranteed that the jump range is
+// within -128 to +127. We already use short jumps when jumping backwards,
+// so using a NearLabel will only have performance impact if used for forward
+// jumps.
+class NearLabel BASE_EMBEDDED {
+ public:
+ NearLabel() { Unuse(); }
+ ~NearLabel() { ASSERT(!is_linked()); }
+
+ void Unuse() {
+ pos_ = -1;
+ unresolved_branches_ = 0;
+#ifdef DEBUG
+ for (int i = 0; i < kMaxUnresolvedBranches; i++) {
+ unresolved_positions_[i] = -1;
+ }
+#endif
+ }
+
+ int pos() {
+ ASSERT(is_bound());
+ return pos_;
+ }
+
+ bool is_bound() { return pos_ >= 0; }
+ bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
+ bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
+
+ void bind_to(int position) {
+ ASSERT(!is_bound());
+ pos_ = position;
+ }
+
+ void link_to(int position) {
+ ASSERT(!is_bound());
+ ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
+ unresolved_positions_[unresolved_branches_++] = position;
+ }
+
+ private:
+ static const int kMaxUnresolvedBranches = 8;
+ int pos_;
+ int unresolved_branches_;
+ int unresolved_positions_[kMaxUnresolvedBranches];
+
+ friend class Assembler;
+};
+
+
+// -----------------------------------------------------------------------------
// Relocation information
@@ -181,10 +232,10 @@ class RelocInfo BASE_EMBEDDED {
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
- byte* pc() const { return pc_; }
+ byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
- intptr_t data() const { return data_; }
+ intptr_t data() const { return data_; }
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta));
@@ -339,7 +390,7 @@ class RelocIterator: public Malloced {
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
// Iteration
- bool done() const { return done_; }
+ bool done() const { return done_; }
void next();
// Return pointer valid until next next().
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 9ff1be73b..f47dffd8d 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -70,6 +70,16 @@ CountOperation* ExpressionStatement::StatementAsCountOperation() {
}
+VariableProxy::VariableProxy(Variable* var)
+ : name_(var->name()),
+ var_(NULL), // Will be set by the call to BindTo.
+ is_this_(var->is_this()),
+ inside_with_(false),
+ is_trivial_(false) {
+ BindTo(var);
+}
+
+
VariableProxy::VariableProxy(Handle<String> name,
bool is_this,
bool inside_with)
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 9fcf25672..fc34fd461 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -118,35 +118,38 @@ typedef ZoneList<Handle<String> > ZoneStringList;
typedef ZoneList<Handle<Object> > ZoneObjectList;
+#define DECLARE_NODE_TYPE(type) \
+ virtual void Accept(AstVisitor* v); \
+ virtual AstNode::Type node_type() const { return AstNode::k##type; } \
+ virtual type* As##type() { return this; }
+
+
class AstNode: public ZoneObject {
public:
+#define DECLARE_TYPE_ENUM(type) k##type,
+ enum Type {
+ AST_NODE_LIST(DECLARE_TYPE_ENUM)
+ kInvalid = -1
+ };
+#undef DECLARE_TYPE_ENUM
+
virtual ~AstNode() { }
+
virtual void Accept(AstVisitor* v) = 0;
+ virtual Type node_type() const { return kInvalid; }
+
+ // Type testing & conversion functions overridden by concrete subclasses.
+#define DECLARE_NODE_FUNCTIONS(type) \
+ virtual type* As##type() { return NULL; }
+ AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
+#undef DECLARE_NODE_FUNCTIONS
- // Type testing & conversion.
virtual Statement* AsStatement() { return NULL; }
- virtual Block* AsBlock() { return NULL; }
- virtual ExpressionStatement* AsExpressionStatement() { return NULL; }
- virtual EmptyStatement* AsEmptyStatement() { return NULL; }
virtual Expression* AsExpression() { return NULL; }
- virtual Literal* AsLiteral() { return NULL; }
- virtual Slot* AsSlot() { return NULL; }
- virtual VariableProxy* AsVariableProxy() { return NULL; }
- virtual Property* AsProperty() { return NULL; }
- virtual Call* AsCall() { return NULL; }
virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
- virtual ForStatement* AsForStatement() { return NULL; }
- virtual UnaryOperation* AsUnaryOperation() { return NULL; }
- virtual CountOperation* AsCountOperation() { return NULL; }
- virtual BinaryOperation* AsBinaryOperation() { return NULL; }
- virtual Assignment* AsAssignment() { return NULL; }
- virtual FunctionLiteral* AsFunctionLiteral() { return NULL; }
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
- virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
- virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
- virtual CompareOperation* AsCompareOperation() { return NULL; }
};
@@ -155,7 +158,6 @@ class Statement: public AstNode {
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
virtual Statement* AsStatement() { return this; }
- virtual ReturnStatement* AsReturnStatement() { return NULL; }
virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
virtual CountOperation* StatementAsCountOperation() { return NULL; }
@@ -172,18 +174,6 @@ class Statement: public AstNode {
class Expression: public AstNode {
public:
- enum Context {
- // Not assigned a context yet, or else will not be visited during
- // code generation.
- kUninitialized,
- // Evaluated for its side effects.
- kEffect,
- // Evaluated for its value (and side effects).
- kValue,
- // Evaluated for control flow (and side effects).
- kTest
- };
-
Expression() : bitfields_(0) {}
virtual Expression* AsExpression() { return this; }
@@ -325,9 +315,7 @@ class Block: public BreakableStatement {
public:
inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
- virtual void Accept(AstVisitor* v);
-
- virtual Block* AsBlock() { return this; }
+ DECLARE_NODE_TYPE(Block)
virtual Assignment* StatementAsSimpleAssignment() {
if (statements_.length() != 1) return NULL;
@@ -342,7 +330,7 @@ class Block: public BreakableStatement {
void AddStatement(Statement* statement) { statements_.Add(statement); }
ZoneList<Statement*>* statements() { return &statements_; }
- bool is_initializer_block() const { return is_initializer_block_; }
+ bool is_initializer_block() const { return is_initializer_block_; }
private:
ZoneList<Statement*> statements_;
@@ -361,11 +349,11 @@ class Declaration: public AstNode {
ASSERT(fun == NULL || mode == Variable::VAR);
}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(Declaration)
- VariableProxy* proxy() const { return proxy_; }
- Variable::Mode mode() const { return mode_; }
- FunctionLiteral* fun() const { return fun_; } // may be NULL
+ VariableProxy* proxy() const { return proxy_; }
+ Variable::Mode mode() const { return mode_; }
+ FunctionLiteral* fun() const { return fun_; } // may be NULL
private:
VariableProxy* proxy_;
@@ -402,13 +390,13 @@ class DoWhileStatement: public IterationStatement {
public:
explicit inline DoWhileStatement(ZoneStringList* labels);
+ DECLARE_NODE_TYPE(DoWhileStatement)
+
void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body);
cond_ = cond;
}
- virtual void Accept(AstVisitor* v);
-
Expression* cond() const { return cond_; }
// Position where condition expression starts. We need it to make
@@ -426,13 +414,13 @@ class WhileStatement: public IterationStatement {
public:
explicit WhileStatement(ZoneStringList* labels);
+ DECLARE_NODE_TYPE(WhileStatement)
+
void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body);
cond_ = cond;
}
- virtual void Accept(AstVisitor* v);
-
Expression* cond() const { return cond_; }
bool may_have_function_literal() const {
return may_have_function_literal_;
@@ -452,7 +440,7 @@ class ForStatement: public IterationStatement {
public:
explicit inline ForStatement(ZoneStringList* labels);
- virtual ForStatement* AsForStatement() { return this; }
+ DECLARE_NODE_TYPE(ForStatement)
void Initialize(Statement* init,
Expression* cond,
@@ -464,13 +452,11 @@ class ForStatement: public IterationStatement {
next_ = next;
}
- virtual void Accept(AstVisitor* v);
-
- Statement* init() const { return init_; }
+ Statement* init() const { return init_; }
void set_init(Statement* stmt) { init_ = stmt; }
- Expression* cond() const { return cond_; }
+ Expression* cond() const { return cond_; }
void set_cond(Expression* expr) { cond_ = expr; }
- Statement* next() const { return next_; }
+ Statement* next() const { return next_; }
void set_next(Statement* stmt) { next_ = stmt; }
bool may_have_function_literal() const {
@@ -498,14 +484,14 @@ class ForInStatement: public IterationStatement {
public:
explicit inline ForInStatement(ZoneStringList* labels);
+ DECLARE_NODE_TYPE(ForInStatement)
+
void Initialize(Expression* each, Expression* enumerable, Statement* body) {
IterationStatement::Initialize(body);
each_ = each;
enumerable_ = enumerable;
}
- virtual void Accept(AstVisitor* v);
-
Expression* each() const { return each_; }
Expression* enumerable() const { return enumerable_; }
@@ -520,10 +506,7 @@ class ExpressionStatement: public Statement {
explicit ExpressionStatement(Expression* expression)
: expression_(expression) { }
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion.
- virtual ExpressionStatement* AsExpressionStatement() { return this; }
+ DECLARE_NODE_TYPE(ExpressionStatement)
virtual Assignment* StatementAsSimpleAssignment();
virtual CountOperation* StatementAsCountOperation();
@@ -541,9 +524,9 @@ class ContinueStatement: public Statement {
explicit ContinueStatement(IterationStatement* target)
: target_(target) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(ContinueStatement)
- IterationStatement* target() const { return target_; }
+ IterationStatement* target() const { return target_; }
private:
IterationStatement* target_;
@@ -555,9 +538,9 @@ class BreakStatement: public Statement {
explicit BreakStatement(BreakableStatement* target)
: target_(target) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(BreakStatement)
- BreakableStatement* target() const { return target_; }
+ BreakableStatement* target() const { return target_; }
private:
BreakableStatement* target_;
@@ -569,10 +552,7 @@ class ReturnStatement: public Statement {
explicit ReturnStatement(Expression* expression)
: expression_(expression) { }
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion.
- virtual ReturnStatement* AsReturnStatement() { return this; }
+ DECLARE_NODE_TYPE(ReturnStatement)
Expression* expression() { return expression_; }
@@ -586,9 +566,9 @@ class WithEnterStatement: public Statement {
explicit WithEnterStatement(Expression* expression, bool is_catch_block)
: expression_(expression), is_catch_block_(is_catch_block) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(WithEnterStatement)
- Expression* expression() const { return expression_; }
+ Expression* expression() const { return expression_; }
bool is_catch_block() const { return is_catch_block_; }
@@ -602,7 +582,7 @@ class WithExitStatement: public Statement {
public:
WithExitStatement() { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(WithExitStatement)
};
@@ -610,13 +590,13 @@ class CaseClause: public ZoneObject {
public:
CaseClause(Expression* label, ZoneList<Statement*>* statements);
- bool is_default() const { return label_ == NULL; }
- Expression* label() const {
+ bool is_default() const { return label_ == NULL; }
+ Expression* label() const {
CHECK(!is_default());
return label_;
}
JumpTarget* body_target() { return &body_target_; }
- ZoneList<Statement*>* statements() const { return statements_; }
+ ZoneList<Statement*>* statements() const { return statements_; }
private:
Expression* label_;
@@ -629,15 +609,15 @@ class SwitchStatement: public BreakableStatement {
public:
explicit inline SwitchStatement(ZoneStringList* labels);
+ DECLARE_NODE_TYPE(SwitchStatement)
+
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag;
cases_ = cases;
}
- virtual void Accept(AstVisitor* v);
-
- Expression* tag() const { return tag_; }
- ZoneList<CaseClause*>* cases() const { return cases_; }
+ Expression* tag() const { return tag_; }
+ ZoneList<CaseClause*>* cases() const { return cases_; }
private:
Expression* tag_;
@@ -659,7 +639,7 @@ class IfStatement: public Statement {
then_statement_(then_statement),
else_statement_(else_statement) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(IfStatement)
bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
@@ -729,10 +709,10 @@ class TryCatchStatement: public TryStatement {
catch_block_(catch_block) {
}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(TryCatchStatement)
- VariableProxy* catch_var() const { return catch_var_; }
- Block* catch_block() const { return catch_block_; }
+ VariableProxy* catch_var() const { return catch_var_; }
+ Block* catch_block() const { return catch_block_; }
private:
VariableProxy* catch_var_;
@@ -746,7 +726,7 @@ class TryFinallyStatement: public TryStatement {
: TryStatement(try_block),
finally_block_(finally_block) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(TryFinallyStatement)
Block* finally_block() const { return finally_block_; }
@@ -757,18 +737,13 @@ class TryFinallyStatement: public TryStatement {
class DebuggerStatement: public Statement {
public:
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(DebuggerStatement)
};
class EmptyStatement: public Statement {
public:
- EmptyStatement() {}
-
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion.
- virtual EmptyStatement* AsEmptyStatement() { return this; }
+ DECLARE_NODE_TYPE(EmptyStatement)
};
@@ -776,13 +751,11 @@ class Literal: public Expression {
public:
explicit Literal(Handle<Object> handle) : handle_(handle) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(Literal)
+
virtual bool IsTrivial() { return true; }
virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
- // Type testing & conversion.
- virtual Literal* AsLiteral() { return this; }
-
// Check if this literal is identical to the other literal.
bool IsIdenticalTo(const Literal* other) const {
return handle_.is_identical_to(other->handle_);
@@ -876,8 +849,7 @@ class ObjectLiteral: public MaterializedLiteral {
properties_(properties),
fast_elements_(fast_elements) {}
- virtual ObjectLiteral* AsObjectLiteral() { return this; }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(ObjectLiteral)
Handle<FixedArray> constant_properties() const {
return constant_properties_;
@@ -903,7 +875,7 @@ class RegExpLiteral: public MaterializedLiteral {
pattern_(pattern),
flags_(flags) {}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(RegExpLiteral)
Handle<String> pattern() const { return pattern_; }
Handle<String> flags() const { return flags_; }
@@ -926,8 +898,7 @@ class ArrayLiteral: public MaterializedLiteral {
constant_elements_(constant_elements),
values_(values) {}
- virtual void Accept(AstVisitor* v);
- virtual ArrayLiteral* AsArrayLiteral() { return this; }
+ DECLARE_NODE_TYPE(ArrayLiteral)
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
@@ -947,7 +918,7 @@ class CatchExtensionObject: public Expression {
: key_(key), value_(value) {
}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(CatchExtensionObject)
Literal* key() const { return key_; }
VariableProxy* value() const { return value_; }
@@ -960,19 +931,20 @@ class CatchExtensionObject: public Expression {
class VariableProxy: public Expression {
public:
- virtual void Accept(AstVisitor* v);
+ explicit VariableProxy(Variable* var);
+
+ DECLARE_NODE_TYPE(VariableProxy)
// Type testing & conversion
virtual Property* AsProperty() {
return var_ == NULL ? NULL : var_->AsProperty();
}
- virtual VariableProxy* AsVariableProxy() {
- return this;
- }
-
Variable* AsVariable() {
- return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
+ if (this == NULL || var_ == NULL) return NULL;
+ Expression* rewrite = var_->rewrite();
+ if (rewrite == NULL || rewrite->AsSlot() != NULL) return var_;
+ return NULL;
}
virtual bool IsValidLeftHandSide() {
@@ -994,10 +966,10 @@ class VariableProxy: public Expression {
return (variable == NULL) ? false : variable->is_arguments();
}
- Handle<String> name() const { return name_; }
- Variable* var() const { return var_; }
- bool is_this() const { return is_this_; }
- bool inside_with() const { return inside_with_; }
+ Handle<String> name() const { return name_; }
+ Variable* var() const { return var_; }
+ bool is_this() const { return is_this_; }
+ bool inside_with() const { return inside_with_; }
void MarkAsTrivial() { is_trivial_ = true; }
@@ -1062,10 +1034,7 @@ class Slot: public Expression {
ASSERT(var != NULL);
}
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion
- virtual Slot* AsSlot() { return this; }
+ DECLARE_NODE_TYPE(Slot)
bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
@@ -1092,10 +1061,7 @@ class Property: public Expression {
Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
: obj_(obj), key_(key), pos_(pos), type_(type) { }
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion
- virtual Property* AsProperty() { return this; }
+ DECLARE_NODE_TYPE(Property)
virtual bool IsValidLeftHandSide() { return true; }
@@ -1124,10 +1090,7 @@ class Call: public Expression {
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { }
- virtual void Accept(AstVisitor* v);
-
- // Type testing and conversion.
- virtual Call* AsCall() { return this; }
+ DECLARE_NODE_TYPE(Call)
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1149,7 +1112,7 @@ class CallNew: public Expression {
CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(CallNew)
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1173,7 +1136,7 @@ class CallRuntime: public Expression {
ZoneList<Expression*>* arguments)
: name_(name), function_(function), arguments_(arguments) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(CallRuntime)
Handle<String> name() const { return name_; }
Runtime::Function* function() const { return function_; }
@@ -1194,11 +1157,9 @@ class UnaryOperation: public Expression {
ASSERT(Token::IsUnaryOp(op));
}
- virtual void Accept(AstVisitor* v);
- virtual bool ResultOverwriteAllowed();
+ DECLARE_NODE_TYPE(UnaryOperation)
- // Type testing & conversion
- virtual UnaryOperation* AsUnaryOperation() { return this; }
+ virtual bool ResultOverwriteAllowed();
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
@@ -1222,11 +1183,9 @@ class BinaryOperation: public Expression {
// Create the binary operation corresponding to a compound assignment.
explicit BinaryOperation(Assignment* assignment);
- virtual void Accept(AstVisitor* v);
- virtual bool ResultOverwriteAllowed();
+ DECLARE_NODE_TYPE(BinaryOperation)
- // Type testing & conversion
- virtual BinaryOperation* AsBinaryOperation() { return this; }
+ virtual bool ResultOverwriteAllowed();
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
@@ -1248,12 +1207,12 @@ class IncrementOperation: public Expression {
ASSERT(Token::IsCountOp(op));
}
+ DECLARE_NODE_TYPE(IncrementOperation)
+
Token::Value op() const { return op_; }
bool is_increment() { return op_ == Token::INC; }
Expression* expression() const { return expression_; }
- virtual void Accept(AstVisitor* v);
-
private:
Token::Value op_;
Expression* expression_;
@@ -1266,9 +1225,7 @@ class CountOperation: public Expression {
CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
: is_prefix_(is_prefix), increment_(increment), pos_(pos) { }
- virtual void Accept(AstVisitor* v);
-
- virtual CountOperation* AsCountOperation() { return this; }
+ DECLARE_NODE_TYPE(CountOperation)
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
@@ -1301,16 +1258,13 @@ class CompareOperation: public Expression {
ASSERT(Token::IsCompareOp(op));
}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(CompareOperation)
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
int position() const { return pos_; }
- // Type testing & conversion
- virtual CompareOperation* AsCompareOperation() { return this; }
-
private:
Token::Value op_;
Expression* left_;
@@ -1324,7 +1278,7 @@ class CompareToNull: public Expression {
CompareToNull(bool is_strict, Expression* expression)
: is_strict_(is_strict), expression_(expression) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(CompareToNull)
bool is_strict() const { return is_strict_; }
Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
@@ -1349,7 +1303,7 @@ class Conditional: public Expression {
then_expression_position_(then_expression_position),
else_expression_position_(else_expression_position) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(Conditional)
Expression* condition() const { return condition_; }
Expression* then_expression() const { return then_expression_; }
@@ -1375,8 +1329,7 @@ class Assignment: public Expression {
ASSERT(Token::IsAssignmentOp(op));
}
- virtual void Accept(AstVisitor* v);
- virtual Assignment* AsAssignment() { return this; }
+ DECLARE_NODE_TYPE(Assignment)
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
@@ -1413,7 +1366,7 @@ class Throw: public Expression {
Throw(Expression* exception, int pos)
: exception_(exception), pos_(pos) {}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
int position() const { return pos_; }
@@ -1459,14 +1412,11 @@ class FunctionLiteral: public Expression {
#endif
}
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion
- virtual FunctionLiteral* AsFunctionLiteral() { return this; }
+ DECLARE_NODE_TYPE(FunctionLiteral)
- Handle<String> name() const { return name_; }
- Scope* scope() const { return scope_; }
- ZoneList<Statement*>* body() const { return body_; }
+ Handle<String> name() const { return name_; }
+ Scope* scope() const { return scope_; }
+ ZoneList<Statement*>* body() const { return body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; }
int function_token_position() const { return function_token_position_; }
int start_position() const { return start_position_; }
@@ -1486,7 +1436,7 @@ class FunctionLiteral: public Expression {
bool AllowsLazyCompilation();
- Handle<String> inferred_name() const { return inferred_name_; }
+ Handle<String> inferred_name() const { return inferred_name_; }
void set_inferred_name(Handle<String> inferred_name) {
inferred_name_ = inferred_name;
}
@@ -1529,12 +1479,12 @@ class SharedFunctionInfoLiteral: public Expression {
Handle<SharedFunctionInfo> shared_function_info)
: shared_function_info_(shared_function_info) { }
+ DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
+
Handle<SharedFunctionInfo> shared_function_info() const {
return shared_function_info_;
}
- virtual void Accept(AstVisitor* v);
-
private:
Handle<SharedFunctionInfo> shared_function_info_;
};
@@ -1542,7 +1492,7 @@ class SharedFunctionInfoLiteral: public Expression {
class ThisFunction: public Expression {
public:
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(ThisFunction)
};
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 6e6c2c639..aa8d8e5ac 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -1064,8 +1064,11 @@ bool Genesis::InstallNatives() {
// global object.
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- SetProperty(builtins, Factory::LookupAsciiSymbol("global"),
- Handle<Object>(global_context()->global()), attributes);
+ Handle<String> global_symbol = Factory::LookupAsciiSymbol("global");
+ SetProperty(builtins,
+ global_symbol,
+ Handle<Object>(global_context()->global()),
+ attributes);
// Setup the reference from the global object to the builtins object.
JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
@@ -1344,33 +1347,41 @@ bool Genesis::InstallNatives() {
}
-static void InstallCustomCallGenerator(
- Handle<JSFunction> holder_function,
- CallStubCompiler::CustomGeneratorOwner owner_flag,
- const char* function_name,
- int id) {
- Handle<JSObject> owner;
- if (owner_flag == CallStubCompiler::FUNCTION) {
- owner = Handle<JSObject>::cast(holder_function);
- } else {
- ASSERT(owner_flag == CallStubCompiler::INSTANCE_PROTOTYPE);
- owner = Handle<JSObject>(
- JSObject::cast(holder_function->instance_prototype()));
+static Handle<JSObject> ResolveCustomCallGeneratorHolder(
+ Handle<Context> global_context,
+ const char* holder_expr) {
+ Handle<GlobalObject> global(global_context->global());
+ const char* period_pos = strchr(holder_expr, '.');
+ if (period_pos == NULL) {
+ return Handle<JSObject>::cast(
+ GetProperty(global, Factory::LookupAsciiSymbol(holder_expr)));
}
+ ASSERT_EQ(".prototype", period_pos);
+ Vector<const char> property(holder_expr,
+ static_cast<int>(period_pos - holder_expr));
+ Handle<JSFunction> function = Handle<JSFunction>::cast(
+ GetProperty(global, Factory::LookupSymbol(property)));
+ return Handle<JSObject>(JSObject::cast(function->prototype()));
+}
+
+
+static void InstallCustomCallGenerator(Handle<JSObject> holder,
+ const char* function_name,
+ int id) {
Handle<String> name = Factory::LookupAsciiSymbol(function_name);
- Handle<JSFunction> function(JSFunction::cast(owner->GetProperty(*name)));
+ Handle<JSFunction> function(JSFunction::cast(holder->GetProperty(*name)));
function->shared()->set_function_data(Smi::FromInt(id));
}
void Genesis::InstallCustomCallGenerators() {
HandleScope scope;
-#define INSTALL_CALL_GENERATOR(holder_fun, owner_flag, fun_name, name) \
- { \
- Handle<JSFunction> holder(global_context()->holder_fun##_function()); \
- const int id = CallStubCompiler::k##name##CallGenerator; \
- InstallCustomCallGenerator(holder, CallStubCompiler::owner_flag, \
- #fun_name, id); \
+#define INSTALL_CALL_GENERATOR(holder_expr, fun_name, name) \
+ { \
+ Handle<JSObject> holder = ResolveCustomCallGeneratorHolder( \
+ global_context(), #holder_expr); \
+ const int id = CallStubCompiler::k##name##CallGenerator; \
+ InstallCustomCallGenerator(holder, #fun_name, id); \
}
CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
#undef INSTALL_CALL_GENERATOR
@@ -1405,8 +1416,14 @@ void Genesis::InstallJSFunctionResultCaches() {
Handle<FixedArray> caches = Factory::NewFixedArray(kNumberOfCaches, TENURED);
int index = 0;
-#define F(size, func) caches->set(index++, CreateCache(size, func));
- JSFUNCTION_RESULT_CACHE_LIST(F)
+
+#define F(size, func) do { \
+ FixedArray* cache = CreateCache((size), (func)); \
+ caches->set(index++, cache); \
+ } while (false)
+
+ JSFUNCTION_RESULT_CACHE_LIST(F);
+
#undef F
global_context()->set_jsfunction_result_caches(*caches);
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 7e49f3133..b5e8c4e8f 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -65,6 +65,7 @@ enum BuiltinExtraArguments {
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructCall, BUILTIN, UNINITIALIZED) \
+ V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
@@ -249,6 +250,7 @@ class Builtins : public AllStatic {
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_JSConstructCall(MacroAssembler* masm);
+ static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index daf1c0db6..92241d1c5 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -289,7 +289,7 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile
// time, we need to "declare" it at runtime to make sure it
@@ -310,7 +310,7 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
for (int j = 0, i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
// Skip - already processed.
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 7402e6857..b0449c47f 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -110,6 +110,9 @@ class CompilationCacheScript : public CompilationSubCache {
void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
private:
+ MUST_USE_RESULT Object* TryTablePut(
+ Handle<String> source, Handle<SharedFunctionInfo> function_info);
+
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
@@ -137,6 +140,12 @@ class CompilationCacheEval: public CompilationSubCache {
Handle<SharedFunctionInfo> function_info);
private:
+ MUST_USE_RESULT Object* TryTablePut(
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
+
+
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source,
@@ -159,6 +168,10 @@ class CompilationCacheRegExp: public CompilationSubCache {
JSRegExp::Flags flags,
Handle<FixedArray> data);
private:
+ MUST_USE_RESULT Object* TryTablePut(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
+
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source,
JSRegExp::Flags flags,
@@ -320,11 +333,18 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
}
+Object* CompilationCacheScript::TryTablePut(
+ Handle<String> source,
+ Handle<SharedFunctionInfo> function_info) {
+ Handle<CompilationCacheTable> table = GetFirstTable();
+ return table->Put(*source, *function_info);
+}
+
+
Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source,
Handle<SharedFunctionInfo> function_info) {
- CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *function_info),
- CompilationCacheTable);
+ CALL_HEAP_FUNCTION(TryTablePut(source, function_info), CompilationCacheTable);
}
@@ -366,13 +386,20 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
}
+Object* CompilationCacheEval::TryTablePut(
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info) {
+ Handle<CompilationCacheTable> table = GetFirstTable();
+ return table->PutEval(*source, *context, *function_info);
+}
+
+
Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
- CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source,
- *context,
- *function_info),
+ CALL_HEAP_FUNCTION(TryTablePut(source, context, function_info),
CompilationCacheTable);
}
@@ -415,12 +442,20 @@ Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
}
+Object* CompilationCacheRegExp::TryTablePut(
+ Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data) {
+ Handle<CompilationCacheTable> table = GetFirstTable();
+ return table->PutRegExp(*source, flags, *data);
+}
+
+
Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
- CALL_HEAP_FUNCTION(GetFirstTable()->PutRegExp(*source, flags, *data),
- CompilationCacheTable);
+ CALL_HEAP_FUNCTION(TryTablePut(source, flags, data), CompilationCacheTable);
}
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index f65f94156..6ef5a1c50 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -120,8 +120,9 @@ Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info) {
Handle<Context> context = Handle<Context>::null();
Handle<Code> code = MakeCode(context, info);
if (!info->shared_info().is_null()) {
- info->shared_info()->set_scope_info(
- *SerializedScopeInfo::Create(info->scope()));
+ Handle<SerializedScopeInfo> scope_info =
+ SerializedScopeInfo::Create(info->scope());
+ info->shared_info()->set_scope_info(*scope_info);
}
return code;
}
@@ -145,9 +146,10 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
bool is_json = (validate == Compiler::VALIDATE_JSON);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (is_eval || is_json) {
- script->set_compilation_type(
- is_json ? Smi::FromInt(Script::COMPILATION_TYPE_JSON) :
- Smi::FromInt(Script::COMPILATION_TYPE_EVAL));
+ Script::CompilationType compilation_type = is_json
+ ? Script::COMPILATION_TYPE_JSON
+ : Script::COMPILATION_TYPE_EVAL;
+ script->set_compilation_type(Smi::FromInt(compilation_type));
// For eval scripts add information on the function from which eval was
// called.
if (is_eval) {
@@ -170,16 +172,16 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
ASSERT(is_eval || is_global);
// Build AST.
+ EagerCompilationInfo info(script, is_eval);
FunctionLiteral* lit =
MakeAST(is_global, script, extension, pre_data, is_json);
- LiveEditFunctionTracker live_edit_tracker(lit);
-
// Check for parse errors.
if (lit == NULL) {
ASSERT(Top::has_pending_exception());
return Handle<SharedFunctionInfo>::null();
}
+ info.set_function(lit);
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
@@ -190,7 +192,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
HistogramTimerScope timer(rate);
// Compile the code.
- CompilationInfo info(lit, script, is_eval);
+ LiveEditFunctionTracker live_edit_tracker(lit);
Handle<Code> code = MakeCode(context, &info);
// Check for stack-overflow exceptions.
@@ -375,20 +377,12 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Compute name, source code and script data.
Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<String> name(String::cast(shared->name()));
-
- int start_position = shared->start_position();
- int end_position = shared->end_position();
- bool is_expression = shared->is_expression();
- Counters::total_compile_size.Increment(end_position - start_position);
+ int compiled_size = shared->end_position() - shared->start_position();
+ Counters::total_compile_size.Increment(compiled_size);
// Generate the AST for the lazily compiled function. The AST may be
// NULL in case of parser stack overflow.
- FunctionLiteral* lit = MakeLazyAST(info->script(),
- name,
- start_position,
- end_position,
- is_expression);
+ FunctionLiteral* lit = MakeLazyAST(shared);
// Check for parse errors.
if (lit == NULL) {
@@ -412,18 +406,20 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
}
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
- name,
+ Handle<String>(String::cast(shared->name())),
Handle<String>(shared->inferred_name()),
- start_position,
+ shared->start_position(),
info->script(),
code);
// Update the shared function info with the compiled code and the scope info.
// Please note, that the order of the sharedfunction initialization is
- // important since set_scope_info might trigger a GC, causing the ASSERT
- // below to be invalid if the code was flushed. By settting the code
+ // important since SerializedScopeInfo::Create might trigger a GC, causing
+ // the ASSERT below to be invalid if the code was flushed. By setting the code
// object last we avoid this.
- shared->set_scope_info(*SerializedScopeInfo::Create(info->scope()));
+ Handle<SerializedScopeInfo> scope_info =
+ SerializedScopeInfo::Create(info->scope());
+ shared->set_scope_info(*scope_info);
shared->set_code(*code);
if (!info->closure().is_null()) {
info->closure()->set_code(*code);
@@ -479,7 +475,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in
// the static helper function MakeCode.
- CompilationInfo info(literal, script, false);
+ EagerCompilationInfo info(script, false);
+ info.set_function(literal);
bool is_run_once = literal->try_full_codegen();
bool use_full = FLAG_full_compiler && !literal->contains_loops();
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index ed26603f4..ae0d6def6 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -41,118 +41,109 @@ namespace internal {
// is constructed based on the resources available at compile-time.
class CompilationInfo BASE_EMBEDDED {
public:
- // Lazy compilation of a JSFunction.
- CompilationInfo(Handle<JSFunction> closure,
- int loop_nesting,
- Handle<Object> receiver)
- : closure_(closure),
- function_(NULL),
- is_eval_(false),
- loop_nesting_(loop_nesting),
- receiver_(receiver) {
- Initialize();
- ASSERT(!closure_.is_null() &&
- shared_info_.is_null() &&
- script_.is_null());
- }
+ virtual ~CompilationInfo() {}
+
+ // Dispatched behavior.
+ virtual Handle<SharedFunctionInfo> shared_info() const = 0;
- // Lazy compilation based on SharedFunctionInfo.
- explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info)
- : shared_info_(shared_info),
- function_(NULL),
- is_eval_(false),
- loop_nesting_(0) {
- Initialize();
- ASSERT(closure_.is_null() &&
- !shared_info_.is_null() &&
- script_.is_null());
+ virtual Handle<Script> script() const {
+ return Handle<Script>(Script::cast(shared_info()->script()));
}
- // Eager compilation.
- CompilationInfo(FunctionLiteral* literal, Handle<Script> script, bool is_eval)
- : script_(script),
- function_(literal),
- is_eval_(is_eval),
- loop_nesting_(0) {
- Initialize();
- ASSERT(closure_.is_null() &&
- shared_info_.is_null() &&
- !script_.is_null());
+ virtual Handle<JSFunction> closure() const {
+ return Handle<JSFunction>::null();
}
- // We can only get a JSFunction if we actually have one.
- Handle<JSFunction> closure() { return closure_; }
+ virtual bool is_eval() const { return false; }
- // We can get a SharedFunctionInfo from a JSFunction or if we actually
- // have one.
- Handle<SharedFunctionInfo> shared_info() {
- if (!closure().is_null()) {
- return Handle<SharedFunctionInfo>(closure()->shared());
- } else {
- return shared_info_;
- }
- }
+ virtual int loop_nesting() const { return 0; }
- // We can always get a script. Either we have one or we can get a shared
- // function info.
- Handle<Script> script() {
- if (!script_.is_null()) {
- return script_;
- } else {
- ASSERT(shared_info()->script()->IsScript());
- return Handle<Script>(Script::cast(shared_info()->script()));
- }
- }
+ virtual bool has_global_object() const { return false; }
+ virtual GlobalObject* global_object() const { return NULL; }
// There should always be a function literal, but it may be set after
// construction (for lazy compilation).
FunctionLiteral* function() { return function_; }
void set_function(FunctionLiteral* literal) { function_ = literal; }
- // Simple accessors.
- bool is_eval() { return is_eval_; }
- int loop_nesting() { return loop_nesting_; }
- bool has_receiver() { return !receiver_.is_null(); }
- Handle<Object> receiver() { return receiver_; }
+ // Derived accessors.
+ Scope* scope() { return function()->scope(); }
+
+ protected:
+ CompilationInfo() : function_(NULL) {}
+
+ private:
+ FunctionLiteral* function_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
+};
- bool has_this_properties() { return has_this_properties_; }
- void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
- bool has_global_object() {
- return !closure().is_null() && (closure()->context()->global() != NULL);
+class EagerCompilationInfo: public CompilationInfo {
+ public:
+ EagerCompilationInfo(Handle<Script> script, bool is_eval)
+ : script_(script), is_eval_(is_eval) {
+ ASSERT(!script.is_null());
}
- GlobalObject* global_object() {
- return has_global_object() ? closure()->context()->global() : NULL;
+ // Overridden functions from the base class.
+ virtual Handle<SharedFunctionInfo> shared_info() const {
+ return Handle<SharedFunctionInfo>::null();
}
- bool has_globals() { return has_globals_; }
- void set_has_globals(bool flag) { has_globals_ = flag; }
+ virtual Handle<Script> script() const { return script_; }
- // Derived accessors.
- Scope* scope() { return function()->scope(); }
+ virtual bool is_eval() const { return is_eval_; }
private:
- void Initialize() {
- has_this_properties_ = false;
- has_globals_ = false;
+ Handle<Script> script_;
+ bool is_eval_;
+};
+
+
+class LazySharedCompilationInfo: public CompilationInfo {
+ public:
+ explicit LazySharedCompilationInfo(Handle<SharedFunctionInfo> shared_info)
+ : shared_info_(shared_info) {
+ ASSERT(!shared_info.is_null());
}
- Handle<JSFunction> closure_;
+ // Overridden functions from the base class.
+ virtual Handle<SharedFunctionInfo> shared_info() const {
+ return shared_info_;
+ }
+
+ private:
Handle<SharedFunctionInfo> shared_info_;
- Handle<Script> script_;
+};
- FunctionLiteral* function_;
- bool is_eval_;
- int loop_nesting_;
+class LazyFunctionCompilationInfo: public CompilationInfo {
+ public:
+ LazyFunctionCompilationInfo(Handle<JSFunction> closure,
+ int loop_nesting)
+ : closure_(closure), loop_nesting_(loop_nesting) {
+ ASSERT(!closure.is_null());
+ }
+
+ // Overridden functions from the base class.
+ virtual Handle<SharedFunctionInfo> shared_info() const {
+ return Handle<SharedFunctionInfo>(closure_->shared());
+ }
- Handle<Object> receiver_;
+ virtual int loop_nesting() const { return loop_nesting_; }
- bool has_this_properties_;
- bool has_globals_;
+ virtual bool has_global_object() const {
+ return closure_->context()->global() != NULL;
+ }
- DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
+ virtual GlobalObject* global_object() const {
+ return closure_->context()->global();
+ }
+
+ private:
+ Handle<JSFunction> closure_;
+ int loop_nesting_;
};
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 723354fc8..1ce5007d8 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -90,7 +90,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
do {
if (FLAG_trace_contexts) {
- PrintF(" - looking in context %p", *context);
+ PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
if (context->IsGlobalContext()) PrintF(" (global context)");
PrintF("\n");
}
@@ -110,7 +110,8 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
if (*attributes != ABSENT) {
// property found
if (FLAG_trace_contexts) {
- PrintF("=> found property in context object %p\n", *extension);
+ PrintF("=> found property in context object %p\n",
+ reinterpret_cast<void*>(*extension));
}
return extension;
}
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 90cdc773e..f15a804ef 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -956,8 +956,9 @@ static char* CreateExponentialRepresentation(char* decimal_rep,
char* DoubleToExponentialCString(double value, int f) {
+ const int kMaxDigitsAfterPoint = 20;
// f might be -1 to signal that f was undefined in JavaScript.
- ASSERT(f >= -1 && f <= 20);
+ ASSERT(f >= -1 && f <= kMaxDigitsAfterPoint);
bool negative = false;
if (value < 0) {
@@ -969,29 +970,60 @@ char* DoubleToExponentialCString(double value, int f) {
int decimal_point;
int sign;
char* decimal_rep = NULL;
+ bool used_gay_dtoa = false;
+ // f corresponds to the digits after the point. There is always one digit
+ // before the point. The number of requested_digits equals hence f + 1.
+ // And we have to add one character for the null-terminator.
+ const int kV8DtoaBufferCapacity = kMaxDigitsAfterPoint + 1 + 1;
+ // Make sure that the buffer is big enough, even if we fall back to the
+ // shortest representation (which happens when f equals -1).
+ ASSERT(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1);
+ char v8_dtoa_buffer[kV8DtoaBufferCapacity];
+ int decimal_rep_length;
+
if (f == -1) {
- decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
- f = StrLength(decimal_rep) - 1;
+ if (DoubleToAscii(value, DTOA_SHORTEST, 0,
+ Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
+ &sign, &decimal_rep_length, &decimal_point)) {
+ f = decimal_rep_length - 1;
+ decimal_rep = v8_dtoa_buffer;
+ } else {
+ decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
+ decimal_rep_length = StrLength(decimal_rep);
+ f = decimal_rep_length - 1;
+ used_gay_dtoa = true;
+ }
} else {
- decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
+ if (DoubleToAscii(value, DTOA_PRECISION, f + 1,
+ Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
+ &sign, &decimal_rep_length, &decimal_point)) {
+ decimal_rep = v8_dtoa_buffer;
+ } else {
+ decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
+ decimal_rep_length = StrLength(decimal_rep);
+ used_gay_dtoa = true;
+ }
}
- int decimal_rep_length = StrLength(decimal_rep);
ASSERT(decimal_rep_length > 0);
ASSERT(decimal_rep_length <= f + 1);
- USE(decimal_rep_length);
int exponent = decimal_point - 1;
char* result =
CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1);
- freedtoa(decimal_rep);
+ if (used_gay_dtoa) {
+ freedtoa(decimal_rep);
+ }
return result;
}
char* DoubleToPrecisionCString(double value, int p) {
- ASSERT(p >= 1 && p <= 21);
+ const int kMinimalDigits = 1;
+ const int kMaximalDigits = 21;
+ ASSERT(p >= kMinimalDigits && p <= kMaximalDigits);
+ USE(kMinimalDigits);
bool negative = false;
if (value < 0) {
@@ -1002,8 +1034,22 @@ char* DoubleToPrecisionCString(double value, int p) {
// Find a sufficiently precise decimal representation of n.
int decimal_point;
int sign;
- char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
- int decimal_rep_length = StrLength(decimal_rep);
+ char* decimal_rep = NULL;
+ bool used_gay_dtoa = false;
+ // Add one for the terminating null character.
+ const int kV8DtoaBufferCapacity = kMaximalDigits + 1;
+ char v8_dtoa_buffer[kV8DtoaBufferCapacity];
+ int decimal_rep_length;
+
+ if (DoubleToAscii(value, DTOA_PRECISION, p,
+ Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
+ &sign, &decimal_rep_length, &decimal_point)) {
+ decimal_rep = v8_dtoa_buffer;
+ } else {
+ decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
+ decimal_rep_length = StrLength(decimal_rep);
+ used_gay_dtoa = true;
+ }
ASSERT(decimal_rep_length <= p);
int exponent = decimal_point - 1;
@@ -1047,7 +1093,9 @@ char* DoubleToPrecisionCString(double value, int p) {
result = builder.Finalize();
}
- freedtoa(decimal_rep);
+ if (used_gay_dtoa) {
+ freedtoa(decimal_rep);
+ }
return result;
}
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index cb7fdd8ff..5df5893f8 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -82,14 +82,11 @@ TickSample* ProfilerEventsProcessor::TickSampleEvent() {
bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
Logger::LogEventsAndTags tag) {
- // In browser mode, leave only callbacks and non-native JS entries.
- // We filter out regular expressions as currently we can't tell
- // whether they origin from native scripts, so let's not confise people by
- // showing them weird regexes they didn't wrote.
return FLAG_prof_browser_mode
&& (tag != Logger::CALLBACK_TAG
&& tag != Logger::FUNCTION_TAG
&& tag != Logger::LAZY_COMPILE_TAG
+ && tag != Logger::REG_EXP_TAG
&& tag != Logger::SCRIPT_TAG);
}
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index 4248a6433..acf3349be 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -32,6 +32,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "frames-inl.h"
+#include "hashmap.h"
#include "log-inl.h"
#include "../include/v8-profiler.h"
@@ -50,7 +51,13 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
- enqueue_order_(0) {
+ enqueue_order_(0),
+ known_functions_(new HashMap(AddressesMatch)) {
+}
+
+
+ProfilerEventsProcessor::~ProfilerEventsProcessor() {
+ delete known_functions_;
}
@@ -152,16 +159,32 @@ void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
rec->entry = generator_->NewCodeEntry(security_token_id);
rec->code_start = start;
events_buffer_.Enqueue(evt_rec);
+
+ known_functions_->Lookup(alias, AddressHash(alias), true);
}
void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) {
CodeMoveEvent(from, to);
+
+ if (IsKnownFunction(from)) {
+ known_functions_->Remove(from, AddressHash(from));
+ known_functions_->Lookup(to, AddressHash(to), true);
+ }
}
void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
CodeDeleteEvent(from);
+
+ known_functions_->Remove(from, AddressHash(from));
+}
+
+
+bool ProfilerEventsProcessor::IsKnownFunction(Address start) {
+ HashMap::Entry* entry =
+ known_functions_->Lookup(start, AddressHash(start), false);
+ return entry != NULL;
}
@@ -403,6 +426,40 @@ void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
}
+void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function,
+ HeapObject* source) {
+ // This function is called from GC iterators (during Scavenge,
+ // MC, and MS), so marking bits can be set on objects. That's
+ // why unchecked accessors are used here.
+
+ // The same function can be reported several times.
+ if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
+ || singleton_->processor_->IsKnownFunction(function->address())) return;
+
+ int security_token_id = TokenEnumerator::kNoSecurityToken;
+ // In debug mode, assertions may fail for contexts,
+ // and we can live without security tokens in debug mode.
+#ifndef DEBUG
+ if (function->unchecked_context()->IsContext()) {
+ security_token_id = singleton_->token_enumerator_->GetTokenId(
+ function->context()->global_context()->security_token());
+ }
+ // Security token may not be moved yet.
+ if (security_token_id == TokenEnumerator::kNoSecurityToken) {
+ JSFunction* old_function = reinterpret_cast<JSFunction*>(source);
+ if (old_function->unchecked_context()->IsContext()) {
+ security_token_id = singleton_->token_enumerator_->GetTokenId(
+ old_function->context()->global_context()->security_token());
+ }
+ }
+#endif
+ singleton_->processor_->FunctionCreateEvent(
+ function->address(),
+ function->unchecked_code()->address(),
+ security_token_id);
+}
+
+
void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
singleton_->processor_->FunctionMoveEvent(from, to);
}
@@ -473,7 +530,12 @@ void CpuProfiler::StartProcessorIfNotStarted() {
processor_->Start();
// Enumerate stuff we already have in the heap.
if (Heap::HasBeenSetup()) {
- Logger::LogCodeObjects();
+ if (!FLAG_prof_browser_mode) {
+ bool saved_log_code_flag = FLAG_log_code;
+ FLAG_log_code = true;
+ Logger::LogCodeObjects();
+ FLAG_log_code = saved_log_code_flag;
+ }
Logger::LogCompiledFunctions();
Logger::LogFunctionObjects();
Logger::LogAccessorCallbacks();
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 4d5559e4f..86f9f6711 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -41,6 +41,7 @@ class CodeEntry;
class CodeMap;
class CpuProfile;
class CpuProfilesCollection;
+class HashMap;
class ProfileGenerator;
class TokenEnumerator;
@@ -132,7 +133,7 @@ class TickSampleEventRecord BASE_EMBEDDED {
class ProfilerEventsProcessor : public Thread {
public:
explicit ProfilerEventsProcessor(ProfileGenerator* generator);
- virtual ~ProfilerEventsProcessor() { }
+ virtual ~ProfilerEventsProcessor();
// Thread control.
virtual void Run();
@@ -163,6 +164,7 @@ class ProfilerEventsProcessor : public Thread {
Address start, unsigned size);
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
+ bool IsKnownFunction(Address start);
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
@@ -183,6 +185,13 @@ class ProfilerEventsProcessor : public Thread {
bool ProcessTicks(unsigned dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
+ INLINE(static bool AddressesMatch(void* key1, void* key2)) {
+ return key1 == key2;
+ }
+ INLINE(static uint32_t AddressHash(Address addr)) {
+ return ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
+ }
ProfileGenerator* generator_;
bool running_;
@@ -190,6 +199,9 @@ class ProfilerEventsProcessor : public Thread {
SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
+
+ // Used from the VM thread.
+ HashMap* known_functions_;
};
} } // namespace v8::internal
@@ -242,6 +254,10 @@ class CpuProfiler {
static void CodeMoveEvent(Address from, Address to);
static void CodeDeleteEvent(Address from);
static void FunctionCreateEvent(JSFunction* function);
+ // Reports function creation in case we had missed it (e.g.
+ // if it was created from compiled code).
+ static void FunctionCreateEventFromMove(JSFunction* function,
+ HeapObject* source);
static void FunctionMoveEvent(Address from, Address to);
static void FunctionDeleteEvent(Address from);
static void GetterCallbackEvent(String* name, Address entry_point);
diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc
index d480c1bcf..02aacb5b8 100644
--- a/deps/v8/src/data-flow.cc
+++ b/deps/v8/src/data-flow.cc
@@ -42,7 +42,7 @@ void BitVector::Print() {
if (Contains(i)) {
if (!first) PrintF(",");
first = false;
- PrintF("%d");
+ PrintF("%d", i);
}
}
PrintF("}");
@@ -125,7 +125,7 @@ Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
int AssignedVariablesAnalyzer::BitIndex(Variable* var) {
ASSERT(var != NULL);
ASSERT(var->IsStackAllocated());
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
if (slot->type() == Slot::PARAMETER) {
return slot->index();
} else {
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index 0b02e2102..34eb0f0ec 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -45,7 +45,7 @@ Debug.DebugEvent = { Break: 1,
ScriptCollected: 6 };
// Types of exceptions that can be broken upon.
-Debug.ExceptionBreak = { All : 0,
+Debug.ExceptionBreak = { Caught : 0,
Uncaught: 1 };
// The different types of steps.
@@ -87,7 +87,27 @@ var debugger_flags = {
this.value = !!value;
%SetDisableBreak(!this.value);
}
- }
+ },
+ breakOnCaughtException: {
+ getValue: function() { return Debug.isBreakOnException(); },
+ setValue: function(value) {
+ if (value) {
+ Debug.setBreakOnException();
+ } else {
+ Debug.clearBreakOnException();
+ }
+ }
+ },
+ breakOnUncaughtException: {
+ getValue: function() { return Debug.isBreakOnUncaughtException(); },
+ setValue: function(value) {
+ if (value) {
+ Debug.setBreakOnUncaughtException();
+ } else {
+ Debug.clearBreakOnUncaughtException();
+ }
+ }
+ },
};
@@ -781,11 +801,15 @@ Debug.clearStepping = function() {
}
Debug.setBreakOnException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.All, true);
+ return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
};
Debug.clearBreakOnException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.All, false);
+ return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
+};
+
+Debug.isBreakOnException = function() {
+ return !!%IsBreakOnException(Debug.ExceptionBreak.Caught);
};
Debug.setBreakOnUncaughtException = function() {
@@ -796,6 +820,10 @@ Debug.clearBreakOnUncaughtException = function() {
return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
};
+Debug.isBreakOnUncaughtException = function() {
+ return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
+};
+
Debug.showBreakPoints = function(f, full) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
var source = full ? this.scriptSource(f) : this.source(f);
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 87780d350..53773acaf 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -1034,10 +1034,12 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
if (!break_point_object->IsJSObject()) return true;
// Get the function CheckBreakPoint (defined in debug.js).
+ Handle<String> is_break_point_triggered_symbol =
+ Factory::LookupAsciiSymbol("IsBreakPointTriggered");
Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast(
- debug_context()->global()->GetProperty(
- *Factory::LookupAsciiSymbol("IsBreakPointTriggered"))));
+ debug_context()->global()->GetProperty(
+ *is_break_point_triggered_symbol)));
// Get the break id as an object.
Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
@@ -1200,6 +1202,15 @@ void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
}
+bool Debug::IsBreakOnException(ExceptionBreakType type) {
+ if (type == BreakUncaughtException) {
+ return break_on_uncaught_exception_;
+ } else {
+ return break_on_exception_;
+ }
+}
+
+
void Debug::PrepareStep(StepAction step_action, int step_count) {
HandleScope scope;
ASSERT(Debug::InDebugger());
@@ -2167,9 +2178,11 @@ void Debugger::OnAfterCompile(Handle<Script> script,
// script. Make sure that these break points are set.
// Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
+ Handle<String> update_script_break_points_symbol =
+ Factory::LookupAsciiSymbol("UpdateScriptBreakPoints");
Handle<Object> update_script_break_points =
Handle<Object>(Debug::debug_context()->global()->GetProperty(
- *Factory::LookupAsciiSymbol("UpdateScriptBreakPoints")));
+ *update_script_break_points_symbol));
if (!update_script_break_points->IsJSFunction()) {
return;
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 8b3b29e63..0d63085f1 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -236,6 +236,7 @@ class Debug {
static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
static void FloodHandlerWithOneShot();
static void ChangeBreakOnException(ExceptionBreakType type, bool enable);
+ static bool IsBreakOnException(ExceptionBreakType type);
static void PrepareStep(StepAction step_action, int step_count);
static void ClearStepping();
static bool StepNextContinue(BreakLocationIterator* break_location_iterator,
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index e79421fe2..2a4ea74e7 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -44,7 +44,10 @@ namespace internal {
void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
for (byte* pc = begin; pc < end; pc++) {
if (f == NULL) {
- PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n", pc, pc - begin, *pc);
+ PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
+ reinterpret_cast<intptr_t>(pc),
+ pc - begin,
+ *pc);
} else {
fprintf(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
diff --git a/deps/v8/src/dtoa.cc b/deps/v8/src/dtoa.cc
index e3dcbf2d6..f4141eb61 100644
--- a/deps/v8/src/dtoa.cc
+++ b/deps/v8/src/dtoa.cc
@@ -65,11 +65,12 @@ bool DoubleToAscii(double v, DtoaMode mode, int requested_digits,
switch (mode) {
case DTOA_SHORTEST:
- return FastDtoa(v, buffer, length, point);
+ return FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, length, point);
case DTOA_FIXED:
return FastFixedDtoa(v, requested_digits, buffer, length, point);
- default:
- break;
+ case DTOA_PRECISION:
+ return FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
+ buffer, length, point);
}
return false;
}
diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/fast-dtoa.cc
index b4b7be053..d2a00cc62 100644
--- a/deps/v8/src/fast-dtoa.cc
+++ b/deps/v8/src/fast-dtoa.cc
@@ -42,8 +42,8 @@ namespace internal {
//
// A different range might be chosen on a different platform, to optimize digit
// generation, but a smaller range requires more powers of ten to be cached.
-static const int minimal_target_exponent = -60;
-static const int maximal_target_exponent = -32;
+static const int kMinimalTargetExponent = -60;
+static const int kMaximalTargetExponent = -32;
// Adjusts the last digit of the generated number, and screens out generated
@@ -61,13 +61,13 @@ static const int maximal_target_exponent = -32;
// Output: returns true if the buffer is guaranteed to contain the closest
// representable number to the input.
// Modifies the generated digits in the buffer to approach (round towards) w.
-bool RoundWeed(Vector<char> buffer,
- int length,
- uint64_t distance_too_high_w,
- uint64_t unsafe_interval,
- uint64_t rest,
- uint64_t ten_kappa,
- uint64_t unit) {
+static bool RoundWeed(Vector<char> buffer,
+ int length,
+ uint64_t distance_too_high_w,
+ uint64_t unsafe_interval,
+ uint64_t rest,
+ uint64_t ten_kappa,
+ uint64_t unit) {
uint64_t small_distance = distance_too_high_w - unit;
uint64_t big_distance = distance_too_high_w + unit;
// Let w_low = too_high - big_distance, and
@@ -75,7 +75,7 @@ bool RoundWeed(Vector<char> buffer,
// Note: w_low < w < w_high
//
// The real w (* unit) must lie somewhere inside the interval
- // ]w_low; w_low[ (often written as "(w_low; w_low)")
+ // ]w_low; w_high[ (often written as "(w_low; w_high)")
// Basically the buffer currently contains a number in the unsafe interval
// ]too_low; too_high[ with too_low < w < too_high
@@ -122,10 +122,10 @@ bool RoundWeed(Vector<char> buffer,
// inside the safe interval then we simply do not know and bail out (returning
// false).
//
- // Similarly we have to take into account the imprecision of 'w' when rounding
- // the buffer. If we have two potential representations we need to make sure
- // that the chosen one is closer to w_low and w_high since v can be anywhere
- // between them.
+ // Similarly we have to take into account the imprecision of 'w' when finding
+ // the closest representation of 'w'. If we have two potential
+ // representations, and one is closer to both w_low and w_high, then we know
+ // it is closer to the actual value v.
//
// By generating the digits of too_high we got the largest (closest to
// too_high) buffer that is still in the unsafe interval. In the case where
@@ -139,6 +139,9 @@ bool RoundWeed(Vector<char> buffer,
// (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
// Instead of using the buffer directly we use its distance to too_high.
// Conceptually rest ~= too_high - buffer
+ // We need to do the following tests in this order to avoid over- and
+ // underflows.
+ ASSERT(rest <= unsafe_interval);
while (rest < small_distance && // Negated condition 1
unsafe_interval - rest >= ten_kappa && // Negated condition 2
(rest + ten_kappa < small_distance || // buffer{-1} > w_high
@@ -166,6 +169,62 @@ bool RoundWeed(Vector<char> buffer,
}
+// Rounds the buffer upwards if the result is closer to v by possibly adding
+// 1 to the buffer. If the precision of the calculation is not sufficient to
+// round correctly, return false.
+// The rounding might shift the whole buffer in which case the kappa is
+// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
+//
+// If 2*rest > ten_kappa then the buffer needs to be round up.
+// rest can have an error of +/- 1 unit. This function accounts for the
+// imprecision and returns false, if the rounding direction cannot be
+// unambiguously determined.
+//
+// Precondition: rest < ten_kappa.
+static bool RoundWeedCounted(Vector<char> buffer,
+ int length,
+ uint64_t rest,
+ uint64_t ten_kappa,
+ uint64_t unit,
+ int* kappa) {
+ ASSERT(rest < ten_kappa);
+ // The following tests are done in a specific order to avoid overflows. They
+ // will work correctly with any uint64 values of rest < ten_kappa and unit.
+ //
+ // If the unit is too big, then we don't know which way to round. For example
+ // a unit of 50 means that the real number lies within rest +/- 50. If
+ // 10^kappa == 40 then there is no way to tell which way to round.
+ if (unit >= ten_kappa) return false;
+ // Even if unit is just half the size of 10^kappa we are already completely
+ // lost. (And after the previous test we know that the expression will not
+ // over/underflow.)
+ if (ten_kappa - unit <= unit) return false;
+ // If 2 * (rest + unit) <= 10^kappa we can safely round down.
+ if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
+ return true;
+ }
+ // If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
+ if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
+ // Increment the last digit recursively until we find a non '9' digit.
+ buffer[length - 1]++;
+ for (int i = length - 1; i > 0; --i) {
+ if (buffer[i] != '0' + 10) break;
+ buffer[i] = '0';
+ buffer[i - 1]++;
+ }
+ // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
+ // exception of the first digit all digits are now '0'. Simply switch the
+ // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
+ // the power (the kappa) is increased.
+ if (buffer[0] == '0' + 10) {
+ buffer[0] = '1';
+ (*kappa) += 1;
+ }
+ return true;
+ }
+ return false;
+}
+
static const uint32_t kTen4 = 10000;
static const uint32_t kTen5 = 100000;
@@ -178,7 +237,7 @@ static const uint32_t kTen9 = 1000000000;
// number. We furthermore receive the maximum number of bits 'number' has.
// If number_bits == 0 then 0^-1 is returned
// The number of bits must be <= 32.
-// Precondition: (1 << number_bits) <= number < (1 << (number_bits + 1)).
+// Precondition: number < (1 << (number_bits + 1)).
static void BiggestPowerTen(uint32_t number,
int number_bits,
uint32_t* power,
@@ -281,18 +340,18 @@ static void BiggestPowerTen(uint32_t number,
// Generates the digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
-// exponent. Its exponent is bounded by minimal_target_exponent and
-// maximal_target_exponent.
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
// Hence -60 <= w.e() <= -32.
//
// Returns false if it fails, in which case the generated digits in the buffer
// should not be used.
// Preconditions:
// * low, w and high are correct up to 1 ulp (unit in the last place). That
-// is, their error must be less that a unit of their last digits.
+// is, their error must be less than a unit of their last digits.
// * low.e() == w.e() == high.e()
// * low < w < high, and taking into account their error: low~ <= high~
-// * minimal_target_exponent <= w.e() <= maximal_target_exponent
+// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
// Postconditions: returns false if procedure fails.
// otherwise:
// * buffer is not null-terminated, but len contains the number of digits.
@@ -321,15 +380,15 @@ static void BiggestPowerTen(uint32_t number,
// represent 'w' we can stop. Everything inside the interval low - high
// represents w. However we have to pay attention to low, high and w's
// imprecision.
-bool DigitGen(DiyFp low,
- DiyFp w,
- DiyFp high,
- Vector<char> buffer,
- int* length,
- int* kappa) {
+static bool DigitGen(DiyFp low,
+ DiyFp w,
+ DiyFp high,
+ Vector<char> buffer,
+ int* length,
+ int* kappa) {
ASSERT(low.e() == w.e() && w.e() == high.e());
ASSERT(low.f() + 1 <= high.f() - 1);
- ASSERT(minimal_target_exponent <= w.e() && w.e() <= maximal_target_exponent);
+ ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
// low, w and high are imprecise, but by less than one ulp (unit in the last
// place).
// If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
@@ -359,23 +418,23 @@ bool DigitGen(DiyFp low,
uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
// Modulo by one is an and.
uint64_t fractionals = too_high.f() & (one.f() - 1);
- uint32_t divider;
- int divider_exponent;
+ uint32_t divisor;
+ int divisor_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
- &divider, &divider_exponent);
- *kappa = divider_exponent + 1;
+ &divisor, &divisor_exponent);
+ *kappa = divisor_exponent + 1;
*length = 0;
// Loop invariant: buffer = too_high / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized
- // with the divider exponent + 1. And the divider is the biggest power of ten
+ // with the divisor exponent + 1. And the divisor is the biggest power of ten
// that is smaller than integrals.
while (*kappa > 0) {
- int digit = integrals / divider;
+ int digit = integrals / divisor;
buffer[*length] = '0' + digit;
(*length)++;
- integrals %= divider;
+ integrals %= divisor;
(*kappa)--;
- // Note that kappa now equals the exponent of the divider and that the
+ // Note that kappa now equals the exponent of the divisor and that the
// invariant thus holds again.
uint64_t rest =
(static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
@@ -386,32 +445,24 @@ bool DigitGen(DiyFp low,
// that lies within the unsafe interval.
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
unsafe_interval.f(), rest,
- static_cast<uint64_t>(divider) << -one.e(), unit);
+ static_cast<uint64_t>(divisor) << -one.e(), unit);
}
- divider /= 10;
+ divisor /= 10;
}
// The integrals have been generated. We are at the point of the decimal
// separator. In the following loop we simply multiply the remaining digits by
// 10 and divide by one. We just need to pay attention to multiply associated
// data (like the interval or 'unit'), too.
- // Instead of multiplying by 10 we multiply by 5 (cheaper operation) and
- // increase its (imaginary) exponent. At the same time we decrease the
- // divider's (one's) exponent and shift its significand.
- // Basically, if fractionals was a DiyFp (with fractionals.e == one.e):
- // fractionals.f *= 10;
- // fractionals.f >>= 1; fractionals.e++; // value remains unchanged.
- // one.f >>= 1; one.e++; // value remains unchanged.
- // and we have again fractionals.e == one.e which allows us to divide
- // fractionals.f() by one.f()
- // We simply combine the *= 10 and the >>= 1.
+ // Note that the multiplication by 10 does not overflow, because w.e >= -60
+ // and thus one.e >= -60.
+ ASSERT(one.e() >= -60);
+ ASSERT(fractionals < one.f());
+ ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
while (true) {
- fractionals *= 5;
- unit *= 5;
- unsafe_interval.set_f(unsafe_interval.f() * 5);
- unsafe_interval.set_e(unsafe_interval.e() + 1); // Will be optimized out.
- one.set_f(one.f() >> 1);
- one.set_e(one.e() + 1);
+ fractionals *= 10;
+ unit *= 10;
+ unsafe_interval.set_f(unsafe_interval.f() * 10);
// Integer division by one.
int digit = static_cast<int>(fractionals >> -one.e());
buffer[*length] = '0' + digit;
@@ -426,6 +477,113 @@ bool DigitGen(DiyFp low,
}
+
+// Generates (at most) requested_digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
+// Hence -60 <= w.e() <= -32.
+//
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+// * w is correct up to 1 ulp (unit in the last place). That
+// is, its error must be strictly less than a unit of its last digit.
+// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
+//
+// Postconditions: returns false if procedure fails.
+// otherwise:
+// * buffer is not null-terminated, but length contains the number of
+// digits.
+// * the representation in buffer is the most precise representation of
+// requested_digits digits.
+// * buffer contains at most requested_digits digits of w. If there are less
+// than requested_digits digits then some trailing '0's have been removed.
+// * kappa is such that
+// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
+//
+// Remark: This procedure takes into account the imprecision of its input
+// numbers. If the precision is not enough to guarantee all the postconditions
+// then false is returned. This usually happens rarely, but the failure-rate
+// increases with higher requested_digits.
+static bool DigitGenCounted(DiyFp w,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* kappa) {
+ ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+ ASSERT(kMinimalTargetExponent >= -60);
+ ASSERT(kMaximalTargetExponent <= -32);
+ // w is assumed to have an error less than 1 unit. Whenever w is scaled we
+ // also scale its error.
+ uint64_t w_error = 1;
+ // We cut the input number into two parts: the integral digits and the
+ // fractional digits. We don't emit any decimal separator, but adapt kappa
+ // instead. Example: instead of writing "1.2" we put "12" into the buffer and
+ // increase kappa by 1.
+ DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
+ // Division by one is a shift.
+ uint32_t integrals = static_cast<uint32_t>(w.f() >> -one.e());
+ // Modulo by one is an and.
+ uint64_t fractionals = w.f() & (one.f() - 1);
+ uint32_t divisor;
+ int divisor_exponent;
+ BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+ &divisor, &divisor_exponent);
+ *kappa = divisor_exponent + 1;
+ *length = 0;
+
+ // Loop invariant: buffer = w / 10^kappa (integer division)
+ // The invariant holds for the first iteration: kappa has been initialized
+ // with the divisor exponent + 1. And the divisor is the biggest power of ten
+ // that is smaller than 'integrals'.
+ while (*kappa > 0) {
+ int digit = integrals / divisor;
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ requested_digits--;
+ integrals %= divisor;
+ (*kappa)--;
+ // Note that kappa now equals the exponent of the divisor and that the
+ // invariant thus holds again.
+ if (requested_digits == 0) break;
+ divisor /= 10;
+ }
+
+ if (requested_digits == 0) {
+ uint64_t rest =
+ (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
+ return RoundWeedCounted(buffer, *length, rest,
+ static_cast<uint64_t>(divisor) << -one.e(), w_error,
+ kappa);
+ }
+
+ // The integrals have been generated. We are at the point of the decimal
+ // separator. In the following loop we simply multiply the remaining digits by
+ // 10 and divide by one. We just need to pay attention to multiply associated
+ // data (the 'unit'), too.
+ // Note that the multiplication by 10 does not overflow, because w.e >= -60
+ // and thus one.e >= -60.
+ ASSERT(one.e() >= -60);
+ ASSERT(fractionals < one.f());
+ ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+ while (requested_digits > 0 && fractionals > w_error) {
+ fractionals *= 10;
+ w_error *= 10;
+ // Integer division by one.
+ int digit = static_cast<int>(fractionals >> -one.e());
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ requested_digits--;
+ fractionals &= one.f() - 1; // Modulo by one.
+ (*kappa)--;
+ }
+ if (requested_digits != 0) return false;
+ return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
+ kappa);
+}
+
+
// Provides a decimal representation of v.
// Returns true if it succeeds, otherwise the result cannot be trusted.
// There will be *length digits inside the buffer (not null-terminated).
@@ -437,7 +595,10 @@ bool DigitGen(DiyFp low,
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be
// computed.
-bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) {
+static bool Grisu3(double v,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its
// closest floating-point neighbors. Any number strictly between
@@ -448,12 +609,12 @@ bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) {
ASSERT(boundary_plus.e() == w.e());
DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k
- GetCachedPower(w.e() + DiyFp::kSignificandSize, minimal_target_exponent,
- maximal_target_exponent, &mk, &ten_mk);
- ASSERT(minimal_target_exponent <= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize &&
- maximal_target_exponent >= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize);
+ GetCachedPower(w.e() + DiyFp::kSignificandSize, kMinimalTargetExponent,
+ kMaximalTargetExponent, &mk, &ten_mk);
+ ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize) &&
+ (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize));
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits.
@@ -488,17 +649,75 @@ bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) {
}
+// The "counted" version of grisu3 (see above) only generates requested_digits
+// number of digits. This version does not generate the shortest representation,
+// and with enough requested digits 0.1 will at some point print as 0.9999999...
+// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
+// therefore the rounding strategy for halfway cases is irrelevant.
+static bool Grisu3Counted(double v,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_exponent) {
+ DiyFp w = Double(v).AsNormalizedDiyFp();
+ DiyFp ten_mk; // Cached power of ten: 10^-k
+ int mk; // -k
+ GetCachedPower(w.e() + DiyFp::kSignificandSize, kMinimalTargetExponent,
+ kMaximalTargetExponent, &mk, &ten_mk);
+ ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize) &&
+ (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize));
+ // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+ // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+ // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+ // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+ // off by a small amount.
+ // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+ // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+ // (f-1) * 2^e < w*10^k < (f+1) * 2^e
+ DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+
+ // We now have (double) (scaled_w * 10^-mk).
+ // DigitGen will generate the first requested_digits digits of scaled_w and
+ // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
+ // will not always be exactly the same since DigitGenCounted only produces a
+ // limited number of digits.)
+ int kappa;
+ bool result = DigitGenCounted(scaled_w, requested_digits,
+ buffer, length, &kappa);
+ *decimal_exponent = -mk + kappa;
+ return result;
+}
+
+
bool FastDtoa(double v,
+ FastDtoaMode mode,
+ int requested_digits,
Vector<char> buffer,
int* length,
- int* point) {
+ int* decimal_point) {
ASSERT(v > 0);
ASSERT(!Double(v).IsSpecial());
- int decimal_exponent;
- bool result = grisu3(v, buffer, length, &decimal_exponent);
- *point = *length + decimal_exponent;
- buffer[*length] = '\0';
+ bool result = false;
+ int decimal_exponent = 0;
+ switch (mode) {
+ case FAST_DTOA_SHORTEST:
+ result = Grisu3(v, buffer, length, &decimal_exponent);
+ break;
+ case FAST_DTOA_PRECISION:
+ result = Grisu3Counted(v, requested_digits,
+ buffer, length, &decimal_exponent);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (result) {
+ *decimal_point = *length + decimal_exponent;
+ buffer[*length] = '\0';
+ }
return result;
}
diff --git a/deps/v8/src/fast-dtoa.h b/deps/v8/src/fast-dtoa.h
index 4403a7502..94c22ecd7 100644
--- a/deps/v8/src/fast-dtoa.h
+++ b/deps/v8/src/fast-dtoa.h
@@ -31,27 +31,52 @@
namespace v8 {
namespace internal {
+enum FastDtoaMode {
+ // Computes the shortest representation of the given input. The returned
+ // result will be the most accurate number of this length. Longer
+ // representations might be more accurate.
+ FAST_DTOA_SHORTEST,
+ // Computes a representation where the precision (number of digits) is
+ // given as input. The precision is independent of the decimal point.
+ FAST_DTOA_PRECISION
+};
+
// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
// include the terminating '\0' character.
static const int kFastDtoaMaximalLength = 17;
// Provides a decimal representation of v.
-// v must be a strictly positive finite double.
+// The result should be interpreted as buffer * 10^(point - length).
+//
+// Precondition:
+// * v must be a strictly positive finite double.
+//
// Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer followed by a null terminator.
-// If the function returns true then
-// v == (double) (buffer * 10^(point - length)).
-// The digits in the buffer are the shortest representation possible: no
-// 0.099999999999 instead of 0.1.
-// The last digit will be closest to the actual v. That is, even if several
-// digits might correctly yield 'v' when read again, the buffer will contain the
-// one closest to v.
-// The variable 'sign' will be '0' if the given number is positive, and '1'
-// otherwise.
+// If the function returns true and mode equals
+// - FAST_DTOA_SHORTEST, then
+// the parameter requested_digits is ignored.
+// The result satisfies
+// v == (double) (buffer * 10^(point - length)).
+// The digits in the buffer are the shortest representation possible. E.g.
+// if 0.099999999999 and 0.1 represent the same double then "1" is returned
+// with point = 0.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the buffer will contain
+// the one closest to v.
+// - FAST_DTOA_PRECISION, then
+// the buffer contains requested_digits digits.
+// the difference v - (buffer * 10^(point-length)) is closest to zero for
+// all possible representations of requested_digits digits.
+// If there are two values that are equally close, then FastDtoa returns
+// false.
+// For both modes the buffer must be large enough to hold the result.
bool FastDtoa(double d,
+ FastDtoaMode mode,
+ int requested_digits,
Vector<char> buffer,
int* length,
- int* point);
+ int* decimal_point);
} } // namespace v8::internal
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 263a2a40d..84a0eaaa6 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -108,6 +108,8 @@ DEFINE_bool(enable_sse2, true,
"enable use of SSE2 instructions if available")
DEFINE_bool(enable_sse3, true,
"enable use of SSE3 instructions if available")
+DEFINE_bool(enable_sse4_1, true,
+ "enable use of SSE4.1 instructions if available")
DEFINE_bool(enable_cmov, true,
"enable use of CMOV instruction if available")
DEFINE_bool(enable_rdtsc, true,
@@ -179,8 +181,8 @@ DEFINE_bool(always_inline_smi_code, false,
"always inline smi code in non-opt code")
// heap.cc
-DEFINE_int(max_new_space_size, 0, "max size of the new generation")
-DEFINE_int(max_old_space_size, 0, "max size of the old generation")
+DEFINE_int(max_new_space_size, 0, "max size of the new generation (in kBytes)")
+DEFINE_int(max_old_space_size, 0, "max size of the old generation (in Mbytes)")
DEFINE_bool(gc_global, false, "always perform global GCs")
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_bool(trace_gc, false,
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 76a441b64..3cdb0157e 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -143,8 +143,8 @@ void StackFrameIterator::Reset() {
state.pc_address =
reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
type = StackFrame::ComputeType(&state);
- if (SingletonFor(type) == NULL) return;
}
+ if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state);
}
@@ -203,13 +203,24 @@ bool StackTraceFrameIterator::IsValidFrame() {
// -------------------------------------------------------------------------
+bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
+ if (!validator_.IsValid(fp)) return false;
+ Address sp = ExitFrame::ComputeStackPointer(fp);
+ if (!validator_.IsValid(sp)) return false;
+ StackFrame::State state;
+ ExitFrame::FillState(fp, sp, &state);
+ if (!validator_.IsValid(reinterpret_cast<Address>(state.pc_address))) {
+ return false;
+ }
+ return *state.pc_address != NULL;
+}
+
+
SafeStackFrameIterator::SafeStackFrameIterator(
Address fp, Address sp, Address low_bound, Address high_bound) :
- maintainer_(), low_bound_(low_bound), high_bound_(high_bound),
- is_valid_top_(
- IsWithinBounds(low_bound, high_bound,
- Top::c_entry_fp(Top::GetCurrentThread())) &&
- Top::handler(Top::GetCurrentThread()) != NULL),
+ maintainer_(),
+ stack_validator_(low_bound, high_bound),
+ is_valid_top_(IsValidTop(low_bound, high_bound)),
is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
is_working_iterator_(is_valid_top_ || is_valid_fp_),
iteration_done_(!is_working_iterator_),
@@ -217,6 +228,14 @@ SafeStackFrameIterator::SafeStackFrameIterator(
}
+bool SafeStackFrameIterator::IsValidTop(Address low_bound, Address high_bound) {
+ Address fp = Top::c_entry_fp(Top::GetCurrentThread());
+ ExitFrameValidator validator(low_bound, high_bound);
+ if (!validator.IsValidFP(fp)) return false;
+ return Top::handler(Top::GetCurrentThread()) != NULL;
+}
+
+
void SafeStackFrameIterator::Advance() {
ASSERT(is_working_iterator_);
ASSERT(!done());
@@ -258,9 +277,8 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
// sure that caller FP address is valid.
Address caller_fp = Memory::Address_at(
frame->fp() + EntryFrameConstants::kCallerFPOffset);
- if (!IsValidStackAddress(caller_fp)) {
- return false;
- }
+ ExitFrameValidator validator(stack_validator_);
+ if (!validator.IsValidFP(caller_fp)) return false;
} else if (frame->is_arguments_adaptor()) {
// See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
// the number of arguments is stored on stack as Smi. We need to check
@@ -415,6 +433,22 @@ Address ExitFrame::GetCallerStackPointer() const {
}
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+ if (fp == 0) return NONE;
+ Address sp = ComputeStackPointer(fp);
+ FillState(fp, sp, state);
+ ASSERT(*state->pc_address != NULL);
+ return EXIT;
+}
+
+
+void ExitFrame::FillState(Address fp, Address sp, State* state) {
+ state->sp = sp;
+ state->fp = fp;
+ state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+}
+
+
Address StandardFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - n * kPointerSize;
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 20111904f..2d4f338ae 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -67,7 +67,7 @@ class PcToCodeCache : AllStatic {
static PcToCodeCacheEntry* GetCacheEntry(Address pc);
private:
- static const int kPcToCodeCacheSize = 256;
+ static const int kPcToCodeCacheSize = 1024;
static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
};
@@ -141,6 +141,13 @@ class StackFrame BASE_EMBEDDED {
NO_ID = 0
};
+ struct State {
+ State() : sp(NULL), fp(NULL), pc_address(NULL) { }
+ Address sp;
+ Address fp;
+ Address* pc_address;
+ };
+
// Copy constructor; it breaks the connection to host iterator.
StackFrame(const StackFrame& original) {
this->state_ = original.state_;
@@ -201,12 +208,6 @@ class StackFrame BASE_EMBEDDED {
int index) const { }
protected:
- struct State {
- Address sp;
- Address fp;
- Address* pc_address;
- };
-
explicit StackFrame(StackFrameIterator* iterator) : iterator_(iterator) { }
virtual ~StackFrame() { }
@@ -318,6 +319,8 @@ class ExitFrame: public StackFrame {
// pointer. Used when constructing the first stack frame seen by an
// iterator and the frames following entry frames.
static Type GetStateForFramePointer(Address fp, State* state);
+ static Address ComputeStackPointer(Address fp);
+ static void FillState(Address fp, Address sp, State* state);
protected:
explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
@@ -443,6 +446,7 @@ class JavaScriptFrame: public StandardFrame {
inline Object* function_slot_object() const;
friend class StackFrameIterator;
+ friend class StackTracer;
};
@@ -654,12 +658,36 @@ class SafeStackFrameIterator BASE_EMBEDDED {
}
private:
+ class StackAddressValidator {
+ public:
+ StackAddressValidator(Address low_bound, Address high_bound)
+ : low_bound_(low_bound), high_bound_(high_bound) { }
+ bool IsValid(Address addr) const {
+ return IsWithinBounds(low_bound_, high_bound_, addr);
+ }
+ private:
+ Address low_bound_;
+ Address high_bound_;
+ };
+
+ class ExitFrameValidator {
+ public:
+ explicit ExitFrameValidator(const StackAddressValidator& validator)
+ : validator_(validator) { }
+ ExitFrameValidator(Address low_bound, Address high_bound)
+ : validator_(low_bound, high_bound) { }
+ bool IsValidFP(Address fp);
+ private:
+ StackAddressValidator validator_;
+ };
+
bool IsValidStackAddress(Address addr) const {
- return IsWithinBounds(low_bound_, high_bound_, addr);
+ return stack_validator_.IsValid(addr);
}
bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame);
+ static bool IsValidTop(Address low_bound, Address high_bound);
// This is a nasty hack to make sure the active count is incremented
// before the constructor for the embedded iterator is invoked. This
@@ -674,8 +702,7 @@ class SafeStackFrameIterator BASE_EMBEDDED {
ActiveCountMaintainer maintainer_;
static int active_count_;
- Address low_bound_;
- Address high_bound_;
+ StackAddressValidator stack_validator_;
const bool is_valid_top_;
const bool is_valid_fp_;
const bool is_working_iterator_;
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index a1c5ec36a..fa835cb0f 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -332,30 +332,93 @@ bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
}
-void FullCodeGenerator::PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- // In an effect context, the true and the false case branch to the
- // same label.
- *if_true = *if_false = *fall_through = materialize_true;
- break;
- case Expression::kValue:
- *if_true = *fall_through = materialize_true;
- *if_false = materialize_false;
- break;
- case Expression::kTest:
- *if_true = true_label_;
- *if_false = false_label_;
- *fall_through = fall_through_;
- break;
- }
+void FullCodeGenerator::EffectContext::Plug(Register reg) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
+ // Move value into place.
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
+ // Move value into place.
+ __ push(reg);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Register reg) const {
+ // For simplicity we always test the accumulator register.
+ __ Move(result_register(), reg);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
+
+
+void FullCodeGenerator::EffectContext::PlugTOS() const {
+ __ Drop(1);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
+ __ pop(result_register());
+}
+
+
+void FullCodeGenerator::StackValueContext::PlugTOS() const {
+}
+
+
+void FullCodeGenerator::TestContext::PlugTOS() const {
+ // For simplicity we always test the accumulator register.
+ __ pop(result_register());
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
+
+
+void FullCodeGenerator::EffectContext::PrepareTest(
+ Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) const {
+ // In an effect context, the true and the false case branch to the
+ // same label.
+ *if_true = *if_false = *fall_through = materialize_true;
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::PrepareTest(
+ Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) const {
+ *if_true = *fall_through = materialize_true;
+ *if_false = materialize_false;
+}
+
+
+void FullCodeGenerator::StackValueContext::PrepareTest(
+ Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) const {
+ *if_true = *fall_through = materialize_true;
+ *if_false = materialize_false;
+}
+
+
+void FullCodeGenerator::TestContext::PrepareTest(
+ Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) const {
+ *if_true = true_label_;
+ *if_false = false_label_;
+ *fall_through = fall_through_;
}
@@ -366,7 +429,7 @@ void FullCodeGenerator::VisitDeclarations(
for (int i = 0; i < length; i++) {
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile
// time, we need to "declare" it at runtime to make sure it
@@ -386,7 +449,7 @@ void FullCodeGenerator::VisitDeclarations(
for (int j = 0, i = 0; i < length; i++) {
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
array->set(j++, *(var->name()));
@@ -576,20 +639,20 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
// Load only the operands that we need to materialize.
if (constant == kNoConstants) {
- VisitForValue(left, kStack);
- VisitForValue(right, kAccumulator);
+ VisitForStackValue(left);
+ VisitForAccumulatorValue(right);
} else if (constant == kRightConstant) {
- VisitForValue(left, kAccumulator);
+ VisitForAccumulatorValue(left);
} else {
ASSERT(constant == kLeftConstant);
- VisitForValue(right, kAccumulator);
+ VisitForAccumulatorValue(right);
}
SetSourcePosition(expr->position());
if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr, op, context_, mode, left, right, constant);
+ EmitInlineSmiBinaryOp(expr, op, mode, left, right, constant);
} else {
- EmitBinaryOp(op, context_, mode);
+ EmitBinaryOp(op, mode);
}
break;
}
@@ -603,39 +666,7 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
Label eval_right, done;
- // Set up the appropriate context for the left subexpression based
- // on the operation and our own context. Initially assume we can
- // inherit both true and false labels from our context.
- if (expr->op() == Token::OR) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- VisitForControl(expr->left(), &done, &eval_right, &eval_right);
- break;
- case Expression::kValue:
- VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
- break;
- case Expression::kTest:
- VisitForControl(expr->left(), true_label_, &eval_right, &eval_right);
- break;
- }
- } else {
- ASSERT_EQ(Token::AND, expr->op());
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- VisitForControl(expr->left(), &eval_right, &done, &eval_right);
- break;
- case Expression::kValue:
- VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
- break;
- case Expression::kTest:
- VisitForControl(expr->left(), &eval_right, false_label_, &eval_right);
- break;
- }
- }
+ context()->EmitLogicalLeft(expr, &eval_right, &done);
__ bind(&eval_right);
Visit(expr->right());
@@ -644,43 +675,75 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
}
-void FullCodeGenerator::VisitLogicalForValue(Expression* expr,
- Token::Value op,
- Location where,
- Label* done) {
- ASSERT(op == Token::AND || op == Token::OR);
- VisitForValue(expr, kAccumulator);
+void FullCodeGenerator::EffectContext::EmitLogicalLeft(BinaryOperation* expr,
+ Label* eval_right,
+ Label* done) const {
+ if (expr->op() == Token::OR) {
+ codegen()->VisitForControl(expr->left(), done, eval_right, eval_right);
+ } else {
+ ASSERT(expr->op() == Token::AND);
+ codegen()->VisitForControl(expr->left(), eval_right, done, eval_right);
+ }
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::EmitLogicalLeft(
+ BinaryOperation* expr,
+ Label* eval_right,
+ Label* done) const {
+ codegen()->Visit(expr->left());
+ // We want the value in the accumulator for the test, and on the stack in case
+ // we need it.
__ push(result_register());
+ Label discard, restore;
+ if (expr->op() == Token::OR) {
+ codegen()->DoTest(&restore, &discard, &restore);
+ } else {
+ ASSERT(expr->op() == Token::AND);
+ codegen()->DoTest(&discard, &restore, &restore);
+ }
+ __ bind(&restore);
+ __ pop(result_register());
+ __ jmp(done);
+ __ bind(&discard);
+ __ Drop(1);
+}
+
+void FullCodeGenerator::StackValueContext::EmitLogicalLeft(
+ BinaryOperation* expr,
+ Label* eval_right,
+ Label* done) const {
+ codegen()->VisitForAccumulatorValue(expr->left());
+ // We want the value in the accumulator for the test, and on the stack in case
+ // we need it.
+ __ push(result_register());
Label discard;
- switch (where) {
- case kAccumulator: {
- Label restore;
- if (op == Token::OR) {
- DoTest(&restore, &discard, &restore);
- } else {
- DoTest(&discard, &restore, &restore);
- }
- __ bind(&restore);
- __ pop(result_register());
- __ jmp(done);
- break;
- }
- case kStack: {
- if (op == Token::OR) {
- DoTest(done, &discard, &discard);
- } else {
- DoTest(&discard, done, &discard);
- }
- break;
- }
+ if (expr->op() == Token::OR) {
+ codegen()->DoTest(done, &discard, &discard);
+ } else {
+ ASSERT(expr->op() == Token::AND);
+ codegen()->DoTest(&discard, done, &discard);
}
-
__ bind(&discard);
__ Drop(1);
}
+void FullCodeGenerator::TestContext::EmitLogicalLeft(BinaryOperation* expr,
+ Label* eval_right,
+ Label* done) const {
+ if (expr->op() == Token::OR) {
+ codegen()->VisitForControl(expr->left(),
+ true_label_, eval_right, eval_right);
+ } else {
+ ASSERT(expr->op() == Token::AND);
+ codegen()->VisitForControl(expr->left(),
+ eval_right, false_label_, eval_right);
+ }
+}
+
+
void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
Breakable nested_statement(this, stmt);
@@ -761,7 +824,7 @@ void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt);
Expression* expr = stmt->expression();
- VisitForValue(expr, kAccumulator);
+ VisitForAccumulatorValue(expr);
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
@@ -780,7 +843,7 @@ void FullCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
Comment cmnt(masm_, "[ WithEnterStatement");
SetStatementPosition(stmt);
- VisitForValue(stmt->expression(), kStack);
+ VisitForStackValue(stmt->expression());
if (stmt->is_catch_block()) {
__ CallRuntime(Runtime::kPushCatchContext, 1);
} else {
@@ -955,7 +1018,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// The catch variable is *always* a variable proxy for a local variable.
Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(catch_var);
- Slot* variable_slot = catch_var->slot();
+ Slot* variable_slot = catch_var->AsSlot();
ASSERT_NOT_NULL(variable_slot);
ASSERT_EQ(Slot::LOCAL, variable_slot->type());
StoreToFrameField(SlotOffset(variable_slot), result_register());
@@ -1061,7 +1124,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
expr->then_expression_position());
Visit(expr->then_expression());
// If control flow falls through Visit, jump to done.
- if (context_ == Expression::kEffect || context_ == Expression::kValue) {
+ if (!context()->IsTest()) {
__ jmp(&done);
}
@@ -1070,7 +1133,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
expr->else_expression_position());
Visit(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
- if (context_ == Expression::kEffect || context_ == Expression::kValue) {
+ if (!context()->IsTest()) {
__ bind(&done);
}
}
@@ -1084,7 +1147,7 @@ void FullCodeGenerator::VisitSlot(Slot* expr) {
void FullCodeGenerator::VisitLiteral(Literal* expr) {
Comment cmnt(masm_, "[ Literal");
- Apply(context_, expr);
+ context()->Plug(expr->handle());
}
@@ -1110,17 +1173,17 @@ void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject");
- VisitForValue(expr->key(), kStack);
- VisitForValue(expr->value(), kStack);
+ VisitForStackValue(expr->key());
+ VisitForStackValue(expr->value());
// Create catch extension object.
__ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- Apply(context_, result_register());
+ context()->Plug(result_register());
}
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
- VisitForValue(expr->exception(), kStack);
+ VisitForStackValue(expr->exception());
__ CallRuntime(Runtime::kThrow, 1);
// Never returns here.
}
@@ -1150,9 +1213,9 @@ int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kRegExpCloneResult, 1);
- Apply(context_, result_register());
+ context()->Plug(result_register());
}
#undef __
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 9db233c22..03024e190 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -71,10 +71,7 @@ class FullCodeGenerator: public AstVisitor {
info_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
- location_(kStack),
- true_label_(NULL),
- false_label_(NULL),
- fall_through_(NULL) {
+ context_(NULL) {
}
static Handle<Code> MakeCode(CompilationInfo* info);
@@ -232,11 +229,6 @@ class FullCodeGenerator: public AstVisitor {
DISALLOW_COPY_AND_ASSIGN(ForIn);
};
- enum Location {
- kAccumulator,
- kStack
- };
-
enum ConstantOperand {
kNoConstants,
kLeftConstant,
@@ -262,39 +254,6 @@ class FullCodeGenerator: public AstVisitor {
Expression* left,
Expression* right);
- // Emit code to convert a pure value (in a register, slot, as a literal,
- // or on top of the stack) into the result expected according to an
- // expression context.
- void Apply(Expression::Context context, Register reg);
-
- // Slot cannot have type Slot::LOOKUP.
- void Apply(Expression::Context context, Slot* slot);
-
- void Apply(Expression::Context context, Literal* lit);
- void ApplyTOS(Expression::Context context);
-
- // Emit code to discard count elements from the top of stack, then convert
- // a pure value into the result expected according to an expression
- // context.
- void DropAndApply(int count, Expression::Context context, Register reg);
-
- // Set up branch labels for a test expression.
- void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through);
-
- // Emit code to convert pure control flow to a pair of labels into the
- // result expected according to an expression context.
- void Apply(Expression::Context context,
- Label* materialize_true,
- Label* materialize_false);
-
- // Emit code to convert constant control flow (true or false) into
- // the result expected according to an expression context.
- void Apply(Expression::Context context, bool flag);
-
// Helper function to convert a pure value into a test context. The value
// is expected on the stack or the accumulator, depending on the platform.
// See the platform-specific implementation for details.
@@ -316,39 +275,26 @@ class FullCodeGenerator: public AstVisitor {
MemOperand EmitSlotSearch(Slot* slot, Register scratch);
void VisitForEffect(Expression* expr) {
- Expression::Context saved_context = context_;
- context_ = Expression::kEffect;
+ EffectContext context(this);
+ Visit(expr);
+ }
+
+ void VisitForAccumulatorValue(Expression* expr) {
+ AccumulatorValueContext context(this);
Visit(expr);
- context_ = saved_context;
}
- void VisitForValue(Expression* expr, Location where) {
- Expression::Context saved_context = context_;
- Location saved_location = location_;
- context_ = Expression::kValue;
- location_ = where;
+ void VisitForStackValue(Expression* expr) {
+ StackValueContext context(this);
Visit(expr);
- context_ = saved_context;
- location_ = saved_location;
}
void VisitForControl(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through) {
- Expression::Context saved_context = context_;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- Label* saved_fall_through = fall_through_;
- context_ = Expression::kTest;
- true_label_ = if_true;
- false_label_ = if_false;
- fall_through_ = fall_through;
+ TestContext context(this, if_true, if_false, fall_through);
Visit(expr);
- context_ = saved_context;
- true_label_ = saved_true;
- false_label_ = saved_false;
- fall_through_ = saved_fall_through;
}
void VisitDeclarations(ZoneList<Declaration*>* declarations);
@@ -398,7 +344,7 @@ class FullCodeGenerator: public AstVisitor {
TypeofState typeof_state,
Label* slow,
Label* done);
- void EmitVariableLoad(Variable* expr, Expression::Context context);
+ void EmitVariableLoad(Variable* expr);
// Platform-specific support for allocating a new closure based on
// the given function info.
@@ -417,14 +363,12 @@ class FullCodeGenerator: public AstVisitor {
// Apply the compound assignment operator. Expects the left operand on top
// of the stack and the right one in the accumulator.
void EmitBinaryOp(Token::Value op,
- Expression::Context context,
OverwriteMode mode);
// Helper functions for generating inlined smi code for certain
// binary operations.
void EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
- Expression::Context context,
OverwriteMode mode,
Expression* left,
Expression* right,
@@ -432,31 +376,26 @@ class FullCodeGenerator: public AstVisitor {
void EmitConstantSmiBinaryOp(Expression* expr,
Token::Value op,
- Expression::Context context,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value);
void EmitConstantSmiBitOp(Expression* expr,
Token::Value op,
- Expression::Context context,
OverwriteMode mode,
Smi* value);
void EmitConstantSmiShiftOp(Expression* expr,
Token::Value op,
- Expression::Context context,
OverwriteMode mode,
Smi* value);
void EmitConstantSmiAdd(Expression* expr,
- Expression::Context context,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value);
void EmitConstantSmiSub(Expression* expr,
- Expression::Context context,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value);
@@ -468,8 +407,7 @@ class FullCodeGenerator: public AstVisitor {
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
void EmitVariableAssignment(Variable* var,
- Token::Value op,
- Expression::Context context);
+ Token::Value op);
// Complete a named property assignment. The receiver is expected on top
// of the stack and the right-hand-side value in the accumulator.
@@ -501,6 +439,10 @@ class FullCodeGenerator: public AstVisitor {
MacroAssembler* masm() { return masm_; }
+ class ExpressionContext;
+ const ExpressionContext* context() { return context_; }
+ void set_new_context(const ExpressionContext* context) { context_ = context; }
+
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
FunctionLiteral* function() { return info_->function(); }
@@ -509,6 +451,9 @@ class FullCodeGenerator: public AstVisitor {
static Register result_register();
static Register context_register();
+ // Helper for calling an IC stub.
+ void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
+
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value);
@@ -527,13 +472,7 @@ class FullCodeGenerator: public AstVisitor {
// Handles the shortcutted logical binary operations in VisitBinaryOperation.
void EmitLogicalOperation(BinaryOperation* expr);
- void VisitForTypeofValue(Expression* expr, Location where);
-
- void VisitLogicalForValue(Expression* expr,
- Token::Value op,
- Location where,
- Label* done);
-
+ void VisitForTypeofValue(Expression* expr);
MacroAssembler* masm_;
CompilationInfo* info_;
@@ -542,11 +481,178 @@ class FullCodeGenerator: public AstVisitor {
NestedStatement* nesting_stack_;
int loop_depth_;
- Expression::Context context_;
- Location location_;
- Label* true_label_;
- Label* false_label_;
- Label* fall_through_;
+ class ExpressionContext {
+ public:
+ explicit ExpressionContext(FullCodeGenerator* codegen)
+ : masm_(codegen->masm()), old_(codegen->context()), codegen_(codegen) {
+ codegen->set_new_context(this);
+ }
+
+ virtual ~ExpressionContext() {
+ codegen_->set_new_context(old_);
+ }
+
+ // Convert constant control flow (true or false) to the result expected for
+ // this expression context.
+ virtual void Plug(bool flag) const = 0;
+
+ // Emit code to convert a pure value (in a register, slot, as a literal,
+ // or on top of the stack) into the result expected according to this
+ // expression context.
+ virtual void Plug(Register reg) const = 0;
+ virtual void Plug(Slot* slot) const = 0;
+ virtual void Plug(Handle<Object> lit) const = 0;
+ virtual void Plug(Heap::RootListIndex index) const = 0;
+ virtual void PlugTOS() const = 0;
+
+ // Emit code to convert pure control flow to a pair of unbound labels into
+ // the result expected according to this expression context. The
+ // implementation may decide to bind either of the labels.
+ virtual void Plug(Label* materialize_true,
+ Label* materialize_false) const = 0;
+
+ // Emit code to discard count elements from the top of stack, then convert
+ // a pure value into the result expected according to this expression
+ // context.
+ virtual void DropAndPlug(int count, Register reg) const = 0;
+
+ // For shortcutting operations || and &&.
+ virtual void EmitLogicalLeft(BinaryOperation* expr,
+ Label* eval_right,
+ Label* done) const = 0;
+
+ // Set up branch labels for a test expression. The three Label** parameters
+ // are output parameters.
+ virtual void PrepareTest(Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) const = 0;
+
+ // Returns true if we are evaluating only for side effects (ie if the result
+ // will be discarded.
+ virtual bool IsEffect() const { return false; }
+
+ // Returns true if we are branching on the value rather than materializing
+ // it.
+ virtual bool IsTest() const { return false; }
+
+ protected:
+ FullCodeGenerator* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return masm_; }
+ MacroAssembler* masm_;
+
+ private:
+ const ExpressionContext* old_;
+ FullCodeGenerator* codegen_;
+ };
+
+ class AccumulatorValueContext : public ExpressionContext {
+ public:
+ explicit AccumulatorValueContext(FullCodeGenerator* codegen)
+ : ExpressionContext(codegen) { }
+
+ virtual void Plug(bool flag) const;
+ virtual void Plug(Register reg) const;
+ virtual void Plug(Label* materialize_true, Label* materialize_false) const;
+ virtual void Plug(Slot* slot) const;
+ virtual void Plug(Handle<Object> lit) const;
+ virtual void Plug(Heap::RootListIndex) const;
+ virtual void PlugTOS() const;
+ virtual void DropAndPlug(int count, Register reg) const;
+ virtual void EmitLogicalLeft(BinaryOperation* expr,
+ Label* eval_right,
+ Label* done) const;
+ virtual void PrepareTest(Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) const;
+ };
+
+ class StackValueContext : public ExpressionContext {
+ public:
+ explicit StackValueContext(FullCodeGenerator* codegen)
+ : ExpressionContext(codegen) { }
+
+ virtual void Plug(bool flag) const;
+ virtual void Plug(Register reg) const;
+ virtual void Plug(Label* materialize_true, Label* materialize_false) const;
+ virtual void Plug(Slot* slot) const;
+ virtual void Plug(Handle<Object> lit) const;
+ virtual void Plug(Heap::RootListIndex) const;
+ virtual void PlugTOS() const;
+ virtual void DropAndPlug(int count, Register reg) const;
+ virtual void EmitLogicalLeft(BinaryOperation* expr,
+ Label* eval_right,
+ Label* done) const;
+ virtual void PrepareTest(Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) const;
+ };
+
+ class TestContext : public ExpressionContext {
+ public:
+ explicit TestContext(FullCodeGenerator* codegen,
+ Label* true_label,
+ Label* false_label,
+ Label* fall_through)
+ : ExpressionContext(codegen),
+ true_label_(true_label),
+ false_label_(false_label),
+ fall_through_(fall_through) { }
+
+ virtual void Plug(bool flag) const;
+ virtual void Plug(Register reg) const;
+ virtual void Plug(Label* materialize_true, Label* materialize_false) const;
+ virtual void Plug(Slot* slot) const;
+ virtual void Plug(Handle<Object> lit) const;
+ virtual void Plug(Heap::RootListIndex) const;
+ virtual void PlugTOS() const;
+ virtual void DropAndPlug(int count, Register reg) const;
+ virtual void EmitLogicalLeft(BinaryOperation* expr,
+ Label* eval_right,
+ Label* done) const;
+ virtual void PrepareTest(Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) const;
+ virtual bool IsTest() const { return true; }
+
+ private:
+ Label* true_label_;
+ Label* false_label_;
+ Label* fall_through_;
+ };
+
+ class EffectContext : public ExpressionContext {
+ public:
+ explicit EffectContext(FullCodeGenerator* codegen)
+ : ExpressionContext(codegen) { }
+
+ virtual void Plug(bool flag) const;
+ virtual void Plug(Register reg) const;
+ virtual void Plug(Label* materialize_true, Label* materialize_false) const;
+ virtual void Plug(Slot* slot) const;
+ virtual void Plug(Handle<Object> lit) const;
+ virtual void Plug(Heap::RootListIndex) const;
+ virtual void PlugTOS() const;
+ virtual void DropAndPlug(int count, Register reg) const;
+ virtual void EmitLogicalLeft(BinaryOperation* expr,
+ Label* eval_right,
+ Label* done) const;
+ virtual void PrepareTest(Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) const;
+ virtual bool IsEffect() const { return true; }
+ };
+
+ const ExpressionContext* context_;
friend class NestedStatement;
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index a909caf3d..020732275 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -486,7 +486,7 @@ void GlobalHandles::PrintStats() {
}
PrintF("Global Handle Statistics:\n");
- PrintF(" allocated memory = %dB\n", sizeof(Node) * total);
+ PrintF(" allocated memory = %" V8_PTR_PREFIX "dB\n", sizeof(Node) * total);
PrintF(" # weak = %d\n", weak);
PrintF(" # pending = %d\n", pending);
PrintF(" # near_death = %d\n", near_death);
@@ -497,8 +497,10 @@ void GlobalHandles::PrintStats() {
void GlobalHandles::Print() {
PrintF("Global handles:\n");
for (Node* current = head_; current != NULL; current = current->next()) {
- PrintF(" handle %p to %p (weak=%d)\n", current->handle().location(),
- *current->handle(), current->state_ == Node::WEAK);
+ PrintF(" handle %p to %p (weak=%d)\n",
+ reinterpret_cast<void*>(current->handle().location()),
+ reinterpret_cast<void*>(*current->handle()),
+ current->state_ == Node::WEAK);
}
}
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index f168d6eb1..fbc749da8 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -214,6 +214,12 @@ const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
const intptr_t kMapAlignmentMask = kMapAlignment - 1;
+// Desired alignment for generated code is 32 bytes (to improve cache line
+// utilization).
+const int kCodeAlignmentBits = 5;
+const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
+const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
+
// Tag information for Failure.
const int kFailureTag = 3;
const int kFailureTagSize = 2;
@@ -588,6 +594,10 @@ enum StateTag {
#define MAP_POINTER_ALIGN(value) \
(((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
+// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
+#define CODE_POINTER_ALIGN(value) \
+ (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
+
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 655254c95..02074925e 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -142,6 +142,13 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
+ // If objects constructed from this function exist then changing
+ // 'estimated_nof_properties' is dangerous since the previois value might
+ // have been compiled into the fast construct stub. More over, the inobject
+ // slack tracking logic might have adjusted the previous value, so even
+ // passing the same value is risky.
+ if (func->shared()->live_objects_may_exist()) return;
+
func->shared()->set_expected_nof_properties(nof);
if (func->has_initial_map()) {
Handle<Map> new_initial_map =
@@ -158,16 +165,25 @@ void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value) {
static int ExpectedNofPropertiesFromEstimate(int estimate) {
- // TODO(1231235): We need dynamic feedback to estimate the number
- // of expected properties in an object. The static hack below
- // is barely a solution.
- if (estimate == 0) return 4;
- return estimate + 2;
+ // If no properties are added in the constructor, they are more likely
+ // to be added later.
+ if (estimate == 0) estimate = 2;
+
+ // We do not shrink objects that go into a snapshot (yet), so we adjust
+ // the estimate conservatively.
+ if (Serializer::enabled()) return estimate + 2;
+
+ // Inobject slack tracking will reclaim redundant inobject space later,
+ // so we can afford to adjust the estimate generously.
+ return estimate + 6;
}
void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
int estimate) {
+ // See the comment in SetExpectedNofProperties.
+ if (shared->live_objects_may_exist()) return;
+
shared->set_expected_nof_properties(
ExpectedNofPropertiesFromEstimate(estimate));
}
@@ -466,7 +482,8 @@ void InitScriptLineEnds(Handle<Script> script) {
if (!script->source()->IsString()) {
ASSERT(script->source()->IsUndefined());
- script->set_line_ends(*(Factory::NewFixedArray(0)));
+ Handle<FixedArray> empty = Factory::NewFixedArray(0);
+ script->set_line_ends(*empty);
ASSERT(script->line_ends()->IsFixedArray());
return;
}
@@ -762,20 +779,19 @@ static bool CompileLazyHelper(CompilationInfo* info,
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag) {
- CompilationInfo info(shared);
+ LazySharedCompilationInfo info(shared);
return CompileLazyHelper(&info, flag);
}
bool CompileLazy(Handle<JSFunction> function,
- Handle<Object> receiver,
ClearExceptionFlag flag) {
if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code());
function->shared()->set_code_age(0);
return true;
} else {
- CompilationInfo info(function, 0, receiver);
+ LazyFunctionCompilationInfo info(function, 0);
bool result = CompileLazyHelper(&info, flag);
PROFILE(FunctionCreateEvent(*function));
return result;
@@ -784,14 +800,13 @@ bool CompileLazy(Handle<JSFunction> function,
bool CompileLazyInLoop(Handle<JSFunction> function,
- Handle<Object> receiver,
ClearExceptionFlag flag) {
if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code());
function->shared()->set_code_age(0);
return true;
} else {
- CompilationInfo info(function, 1, receiver);
+ LazyFunctionCompilationInfo info(function, 1);
bool result = CompileLazyHelper(&info, flag);
PROFILE(FunctionCreateEvent(*function));
return result;
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 135dbfb5b..69170ff20 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -345,13 +345,9 @@ bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag);
-bool CompileLazy(Handle<JSFunction> function,
- Handle<Object> receiver,
- ClearExceptionFlag flag);
+bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
-bool CompileLazyInLoop(Handle<JSFunction> function,
- Handle<Object> receiver,
- ClearExceptionFlag flag);
+bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
class NoHandleAllocation BASE_EMBEDDED {
public:
diff --git a/deps/v8/src/hashmap.h b/deps/v8/src/hashmap.h
index b92c71573..3b947beb5 100644
--- a/deps/v8/src/hashmap.h
+++ b/deps/v8/src/hashmap.h
@@ -83,12 +83,12 @@ class HashMap {
void Clear();
// The number of (non-empty) entries in the table.
- uint32_t occupancy() const { return occupancy_; }
+ uint32_t occupancy() const { return occupancy_; }
// The capacity of the table. The implementation
// makes sure that occupancy is at most 80% of
// the table capacity.
- uint32_t capacity() const { return capacity_; }
+ uint32_t capacity() const { return capacity_; }
// Iteration
//
@@ -108,7 +108,7 @@ class HashMap {
uint32_t capacity_;
uint32_t occupancy_;
- Entry* map_end() const { return map_ + capacity_; }
+ Entry* map_end() const { return map_ + capacity_; }
Entry* Probe(void* key, uint32_t hash);
void Initialize(uint32_t capacity);
void Resize();
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 8f7dd3bab..27a14bc25 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -36,7 +36,7 @@ namespace v8 {
namespace internal {
void Heap::UpdateOldSpaceLimits() {
- int old_gen_size = PromotedSpaceSize();
+ intptr_t old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
@@ -59,6 +59,11 @@ Object* Heap::AllocateSymbol(Vector<const char> str,
}
+Object* Heap::CopyFixedArray(FixedArray* src) {
+ return CopyFixedArrayWithMap(src, src->map());
+}
+
+
Object* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space,
AllocationSpace retry_space) {
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 650800fa4..23bfbd807 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -63,8 +63,8 @@ MapSpace* Heap::map_space_ = NULL;
CellSpace* Heap::cell_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL;
-int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
-int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
+intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
+intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
int Heap::old_gen_exhausted_ = false;
@@ -75,19 +75,19 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
// a multiple of Page::kPageSize.
#if defined(ANDROID)
int Heap::max_semispace_size_ = 2*MB;
-int Heap::max_old_generation_size_ = 192*MB;
+intptr_t Heap::max_old_generation_size_ = 192*MB;
int Heap::initial_semispace_size_ = 128*KB;
-size_t Heap::code_range_size_ = 0;
+intptr_t Heap::code_range_size_ = 0;
#elif defined(V8_TARGET_ARCH_X64)
int Heap::max_semispace_size_ = 16*MB;
-int Heap::max_old_generation_size_ = 1*GB;
+intptr_t Heap::max_old_generation_size_ = 1*GB;
int Heap::initial_semispace_size_ = 1*MB;
-size_t Heap::code_range_size_ = 512*MB;
+intptr_t Heap::code_range_size_ = 512*MB;
#else
int Heap::max_semispace_size_ = 8*MB;
-int Heap::max_old_generation_size_ = 512*MB;
+intptr_t Heap::max_old_generation_size_ = 512*MB;
int Heap::initial_semispace_size_ = 512*KB;
-size_t Heap::code_range_size_ = 0;
+intptr_t Heap::code_range_size_ = 0;
#endif
// The snapshot semispace size will be the default semispace size if
@@ -108,7 +108,7 @@ HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
int Heap::survived_since_last_expansion_ = 0;
-int Heap::external_allocation_limit_ = 0;
+intptr_t Heap::external_allocation_limit_ = 0;
Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
@@ -137,13 +137,13 @@ int Heap::allocation_timeout_ = 0;
bool Heap::disallow_allocation_failure_ = false;
#endif // DEBUG
-int GCTracer::alive_after_last_gc_ = 0;
+intptr_t GCTracer::alive_after_last_gc_ = 0;
double GCTracer::last_gc_end_timestamp_ = 0.0;
int GCTracer::max_gc_pause_ = 0;
-int GCTracer::max_alive_after_gc_ = 0;
+intptr_t GCTracer::max_alive_after_gc_ = 0;
int GCTracer::min_in_mutator_ = kMaxInt;
-int Heap::Capacity() {
+intptr_t Heap::Capacity() {
if (!HasBeenSetup()) return 0;
return new_space_.Capacity() +
@@ -155,7 +155,7 @@ int Heap::Capacity() {
}
-int Heap::CommittedMemory() {
+intptr_t Heap::CommittedMemory() {
if (!HasBeenSetup()) return 0;
return new_space_.CommittedMemory() +
@@ -168,7 +168,7 @@ int Heap::CommittedMemory() {
}
-int Heap::Available() {
+intptr_t Heap::Available() {
if (!HasBeenSetup()) return 0;
return new_space_.Available() +
@@ -289,33 +289,46 @@ void Heap::ReportStatisticsBeforeGC() {
#if defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
- PrintF("Memory allocator, used: %8d, available: %8d\n",
+ PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d\n",
MemoryAllocator::Size(),
MemoryAllocator::Available());
- PrintF("New space, used: %8d, available: %8d\n",
+ PrintF("New space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d\n",
Heap::new_space_.Size(),
new_space_.Available());
- PrintF("Old pointers, used: %8d, available: %8d, waste: %8d\n",
+ PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d"
+ ", waste: %8" V8_PTR_PREFIX "d\n",
old_pointer_space_->Size(),
old_pointer_space_->Available(),
old_pointer_space_->Waste());
- PrintF("Old data space, used: %8d, available: %8d, waste: %8d\n",
+ PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d"
+ ", waste: %8" V8_PTR_PREFIX "d\n",
old_data_space_->Size(),
old_data_space_->Available(),
old_data_space_->Waste());
- PrintF("Code space, used: %8d, available: %8d, waste: %8d\n",
+ PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d"
+ ", waste: %8" V8_PTR_PREFIX "d\n",
code_space_->Size(),
code_space_->Available(),
code_space_->Waste());
- PrintF("Map space, used: %8d, available: %8d, waste: %8d\n",
+ PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d"
+ ", waste: %8" V8_PTR_PREFIX "d\n",
map_space_->Size(),
map_space_->Available(),
map_space_->Waste());
- PrintF("Cell space, used: %8d, available: %8d, waste: %8d\n",
+ PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d"
+ ", waste: %8" V8_PTR_PREFIX "d\n",
cell_space_->Size(),
cell_space_->Available(),
cell_space_->Waste());
- PrintF("Large object space, used: %8d, avaialble: %8d\n",
+ PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d\n",
lo_space_->Size(),
lo_space_->Available());
}
@@ -364,8 +377,8 @@ void Heap::GarbageCollectionPrologue() {
#endif
}
-int Heap::SizeOfObjects() {
- int total = 0;
+intptr_t Heap::SizeOfObjects() {
+ intptr_t total = 0;
AllSpaces spaces;
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->Size();
@@ -388,7 +401,7 @@ void Heap::GarbageCollectionEpilogue() {
if (FLAG_code_stats) ReportCodeStatistics("After GC");
#endif
- Counters::alive_after_last_gc.Set(SizeOfObjects());
+ Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
@@ -690,7 +703,7 @@ void Heap::PerformGarbageCollection(GarbageCollector collector,
EnsureFromSpaceIsCommitted();
- int start_new_space_size = Heap::new_space()->Size();
+ int start_new_space_size = Heap::new_space()->SizeAsInt();
if (collector == MARK_COMPACTOR) {
// Perform mark-sweep with optional compaction.
@@ -962,7 +975,7 @@ void Heap::Scavenge() {
DescriptorLookupCache::Clear();
// Used for updating survived_since_last_expansion_ at function end.
- int survived_watermark = PromotedSpaceSize();
+ intptr_t survived_watermark = PromotedSpaceSize();
CheckNewSpaceExpansionCriteria();
@@ -1032,8 +1045,8 @@ void Heap::Scavenge() {
new_space_.set_age_mark(new_space_.top());
// Update how much has survived scavenge.
- IncrementYoungSurvivorsCounter(
- (PromotedSpaceSize() - survived_watermark) + new_space_.Size());
+ IncrementYoungSurvivorsCounter(static_cast<int>(
+ (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
LOG(ResourceEvent("scavenge", "end"));
@@ -1218,7 +1231,14 @@ class ScavengingVisitor : public StaticVisitorBase {
RecordCopiedObject(target);
#endif
HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
-
+#if defined(ENABLE_LOGGING_AND_PROFILING)
+ if (Logger::is_logging() || CpuProfiler::is_profiling()) {
+ if (target->IsJSFunction()) {
+ PROFILE(FunctionMoveEvent(source->address(), target->address()));
+ PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target), source));
+ }
+ }
+#endif
return target;
}
@@ -2068,6 +2088,7 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_debug_info(undefined_value());
share->set_inferred_name(empty_string());
share->set_compiler_hints(0);
+ share->set_initial_map(undefined_value());
share->set_this_property_assignments_count(0);
share->set_this_property_assignments(undefined_value());
share->set_num_literals(0);
@@ -2436,7 +2457,7 @@ Object* Heap::CreateCode(const CodeDesc& desc,
// Compute size
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
int obj_size = Code::SizeFor(body_size);
- ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
+ ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
Object* result;
if (obj_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawCode(obj_size);
@@ -2650,6 +2671,20 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
}
+static bool HasDuplicates(DescriptorArray* descriptors) {
+ int count = descriptors->number_of_descriptors();
+ if (count > 1) {
+ String* prev_key = descriptors->GetKey(0);
+ for (int i = 1; i != count; i++) {
+ String* current_key = descriptors->GetKey(i);
+ if (prev_key == current_key) return true;
+ prev_key = current_key;
+ }
+ }
+ return false;
+}
+
+
Object* Heap::AllocateInitialMap(JSFunction* fun) {
ASSERT(!fun->has_initial_map());
@@ -2683,24 +2718,38 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) {
- count = in_object_properties;
- }
- Object* descriptors_obj = DescriptorArray::Allocate(count);
- if (descriptors_obj->IsFailure()) return descriptors_obj;
- DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
- for (int i = 0; i < count; i++) {
- String* name = fun->shared()->GetThisPropertyAssignmentName(i);
- ASSERT(name->IsSymbol());
- FieldDescriptor field(name, i, NONE);
- field.SetEnumerationIndex(i);
- descriptors->Set(i, &field);
+ // Inline constructor can only handle inobject properties.
+ fun->shared()->ForbidInlineConstructor();
+ } else {
+ Object* descriptors_obj = DescriptorArray::Allocate(count);
+ if (descriptors_obj->IsFailure()) return descriptors_obj;
+ DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
+ for (int i = 0; i < count; i++) {
+ String* name = fun->shared()->GetThisPropertyAssignmentName(i);
+ ASSERT(name->IsSymbol());
+ FieldDescriptor field(name, i, NONE);
+ field.SetEnumerationIndex(i);
+ descriptors->Set(i, &field);
+ }
+ descriptors->SetNextEnumerationIndex(count);
+ descriptors->SortUnchecked();
+
+ // The descriptors may contain duplicates because the compiler does not
+ // guarantee the uniqueness of property names (it would have required
+ // quadratic time). Once the descriptors are sorted we can check for
+ // duplicates in linear time.
+ if (HasDuplicates(descriptors)) {
+ fun->shared()->ForbidInlineConstructor();
+ } else {
+ map->set_instance_descriptors(descriptors);
+ map->set_pre_allocated_property_fields(count);
+ map->set_unused_property_fields(in_object_properties - count);
+ }
}
- descriptors->SetNextEnumerationIndex(count);
- descriptors->Sort();
- map->set_instance_descriptors(descriptors);
- map->set_pre_allocated_property_fields(count);
- map->set_unused_property_fields(in_object_properties - count);
}
+
+ fun->shared()->StartInobjectSlackTracking(map);
+
return map;
}
@@ -2717,7 +2766,20 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
// fixed array (eg, Heap::empty_fixed_array()). Currently, the object
// verification code has to cope with (temporarily) invalid objects. See
// for example, JSArray::JSArrayVerify).
- obj->InitializeBody(map->instance_size());
+ Object* filler;
+ // We cannot always fill with one_pointer_filler_map because objects
+ // created from API functions expect their internal fields to be initialized
+ // with undefined_value.
+ if (map->constructor()->IsJSFunction() &&
+ JSFunction::cast(map->constructor())->shared()->
+ IsInobjectSlackTrackingInProgress()) {
+ // We might want to shrink the object later.
+ ASSERT(obj->GetInternalFieldCount() == 0);
+ filler = Heap::one_pointer_filler_map();
+ } else {
+ filler = Heap::undefined_value();
+ }
+ obj->InitializeBody(map->instance_size(), filler);
}
@@ -2900,19 +2962,13 @@ Object* Heap::CopyJSObject(JSObject* source) {
Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
JSGlobalProxy* object) {
- // Allocate initial map if absent.
- if (!constructor->has_initial_map()) {
- Object* initial_map = AllocateInitialMap(constructor);
- if (initial_map->IsFailure()) return initial_map;
- constructor->set_initial_map(Map::cast(initial_map));
- Map::cast(initial_map)->set_constructor(constructor);
- }
-
+ ASSERT(constructor->has_initial_map());
Map* map = constructor->initial_map();
- // Check that the already allocated object has the same size as
+ // Check that the already allocated object has the same size and type as
// objects allocated using the constructor.
ASSERT(map->instance_size() == object->map()->instance_size());
+ ASSERT(map->instance_type() == object->map()->instance_type());
// Allocate the backing storage for the properties.
int prop_size = map->unused_property_fields() - map->inobject_properties();
@@ -3159,6 +3215,7 @@ Object* Heap::AllocateRawFixedArray(int length) {
if (length < 0 || length > FixedArray::kMaxLength) {
return Failure::OutOfMemoryException();
}
+ ASSERT(length > 0);
// Use the general function if we're forced to always allocate.
if (always_allocate()) return AllocateFixedArray(length, TENURED);
// Allocate the raw data for a fixed array.
@@ -3169,16 +3226,19 @@ Object* Heap::AllocateRawFixedArray(int length) {
}
-Object* Heap::CopyFixedArray(FixedArray* src) {
+Object* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
Object* obj = AllocateRawFixedArray(len);
if (obj->IsFailure()) return obj;
if (Heap::InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
- CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
+ dst->set_map(map);
+ CopyBlock(dst->address() + kPointerSize,
+ src->address() + kPointerSize,
+ FixedArray::SizeFor(len) - kPointerSize);
return obj;
}
- HeapObject::cast(obj)->set_map(src->map());
+ HeapObject::cast(obj)->set_map(map);
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
@@ -3449,8 +3509,10 @@ void Heap::ReportHeapStatistics(const char* title) {
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_);
PrintF("mark-compact GC : %d\n", mc_count_);
- PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
- PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
+ PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
+ old_gen_promotion_limit_);
+ PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
+ old_gen_allocation_limit_);
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@@ -4022,15 +4084,16 @@ bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
bool Heap::ConfigureHeapDefault() {
- return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size);
+ return ConfigureHeap(
+ FLAG_max_new_space_size * (KB / 2), FLAG_max_old_space_size * MB);
}
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker;
*stats->end_marker = HeapStats::kEndMarker;
- *stats->new_space_size = new_space_.Size();
- *stats->new_space_capacity = new_space_.Capacity();
+ *stats->new_space_size = new_space_.SizeAsInt();
+ *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
*stats->old_pointer_space_size = old_pointer_space_->Size();
*stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
*stats->old_data_space_size = old_data_space_->Size();
@@ -4064,7 +4127,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
-int Heap::PromotedSpaceSize() {
+intptr_t Heap::PromotedSpaceSize() {
return old_pointer_space_->Size()
+ old_data_space_->Size()
+ code_space_->Size()
@@ -4175,8 +4238,8 @@ bool Heap::Setup(bool create_heap_objects) {
if (!CreateInitialObjects()) return false;
}
- LOG(IntEvent("heap-capacity", Capacity()));
- LOG(IntEvent("heap-available", Available()));
+ LOG(IntPtrTEvent("heap-capacity", Capacity()));
+ LOG(IntPtrTEvent("heap-available", Available()));
#ifdef ENABLE_LOGGING_AND_PROFILING
// This should be called only after initial objects have been created.
@@ -4210,7 +4273,8 @@ void Heap::TearDown() {
PrintF("mark_compact_count=%d ", mc_count_);
PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
- PrintF("max_alive_after_gc=%d ", GCTracer::get_max_alive_after_gc());
+ PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
+ GCTracer::get_max_alive_after_gc());
PrintF("\n\n");
}
@@ -4336,7 +4400,9 @@ class PrintHandleVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++)
- PrintF(" handle %p to %p\n", p, *p);
+ PrintF(" handle %p to %p\n",
+ reinterpret_cast<void*>(p),
+ reinterpret_cast<void*>(*p));
}
};
@@ -4689,8 +4755,8 @@ void Heap::TracePathToGlobal() {
#endif
-static int CountTotalHolesSize() {
- int holes_size = 0;
+static intptr_t CountTotalHolesSize() {
+ intptr_t holes_size = 0;
OldSpaces spaces;
for (OldSpace* space = spaces.next();
space != NULL;
@@ -4788,13 +4854,14 @@ GCTracer::~GCTracer() {
PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
- PrintF("total_size_before=%d ", start_size_);
- PrintF("total_size_after=%d ", Heap::SizeOfObjects());
- PrintF("holes_size_before=%d ", in_free_list_or_wasted_before_gc_);
- PrintF("holes_size_after=%d ", CountTotalHolesSize());
+ PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
+ PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
+ PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
+ in_free_list_or_wasted_before_gc_);
+ PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
- PrintF("allocated=%d ", allocated_since_last_gc_);
- PrintF("promoted=%d ", promoted_objects_size_);
+ PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
+ PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
PrintF("\n");
}
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index cfb3b6a03..b1ef19f53 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -245,31 +245,31 @@ class Heap : public AllStatic {
// semi space. The young generation consists of two semi spaces and
// we reserve twice the amount needed for those in order to ensure
// that new space can be aligned to its size.
- static int MaxReserved() {
+ static intptr_t MaxReserved() {
return 4 * reserved_semispace_size_ + max_old_generation_size_;
}
static int MaxSemiSpaceSize() { return max_semispace_size_; }
static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
static int InitialSemiSpaceSize() { return initial_semispace_size_; }
- static int MaxOldGenerationSize() { return max_old_generation_size_; }
+ static intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
- static int Capacity();
+ static intptr_t Capacity();
// Returns the amount of memory currently committed for the heap.
- static int CommittedMemory();
+ static intptr_t CommittedMemory();
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
- static int Available();
+ static intptr_t Available();
// Returns the maximum object size in paged space.
static inline int MaxObjectSizeInPagedSpace();
// Returns of size of all objects residing in the heap.
- static int SizeOfObjects();
+ static intptr_t SizeOfObjects();
// Return the starting address and a mask for the new space. And-masking an
// address with the mask will result in the start address of the new space
@@ -498,7 +498,12 @@ class Heap : public AllStatic {
// Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT static Object* CopyFixedArray(FixedArray* src);
+ MUST_USE_RESULT static inline Object* CopyFixedArray(FixedArray* src);
+
+ // Make a copy of src, set the map, and return the copy. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT static Object* CopyFixedArrayWithMap(FixedArray* src,
+ Map* map);
// Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -1064,8 +1069,8 @@ class Heap : public AllStatic {
static int reserved_semispace_size_;
static int max_semispace_size_;
static int initial_semispace_size_;
- static int max_old_generation_size_;
- static size_t code_range_size_;
+ static intptr_t max_old_generation_size_;
+ static intptr_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
@@ -1093,7 +1098,7 @@ class Heap : public AllStatic {
static HeapState gc_state_;
// Returns the size of object residing in non new spaces.
- static int PromotedSpaceSize();
+ static intptr_t PromotedSpaceSize();
// Returns the amount of external memory registered since last global gc.
static int PromotedExternalMemorySize();
@@ -1128,16 +1133,16 @@ class Heap : public AllStatic {
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke.
- static int old_gen_promotion_limit_;
+ static intptr_t old_gen_promotion_limit_;
// Limit that triggers a global GC as soon as is reasonable. This is
// checked before expanding a paged space in the old generation and on
// every allocation in large object space.
- static int old_gen_allocation_limit_;
+ static intptr_t old_gen_allocation_limit_;
// Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced.
- static int external_allocation_limit_;
+ static intptr_t external_allocation_limit_;
// The amount of external memory registered through the API kept alive
// by global handles
@@ -1226,8 +1231,8 @@ class Heap : public AllStatic {
GCTracer* tracer,
CollectionPolicy collectionPolicy);
- static const int kMinimumPromotionLimit = 2 * MB;
- static const int kMinimumAllocationLimit = 8 * MB;
+ static const intptr_t kMinimumPromotionLimit = 2 * MB;
+ static const intptr_t kMinimumAllocationLimit = 8 * MB;
inline static void UpdateOldSpaceLimits();
@@ -1380,24 +1385,24 @@ class HeapStats {
int* start_marker; // 0
int* new_space_size; // 1
int* new_space_capacity; // 2
- int* old_pointer_space_size; // 3
- int* old_pointer_space_capacity; // 4
- int* old_data_space_size; // 5
- int* old_data_space_capacity; // 6
- int* code_space_size; // 7
- int* code_space_capacity; // 8
- int* map_space_size; // 9
- int* map_space_capacity; // 10
- int* cell_space_size; // 11
- int* cell_space_capacity; // 12
- int* lo_space_size; // 13
+ intptr_t* old_pointer_space_size; // 3
+ intptr_t* old_pointer_space_capacity; // 4
+ intptr_t* old_data_space_size; // 5
+ intptr_t* old_data_space_capacity; // 6
+ intptr_t* code_space_size; // 7
+ intptr_t* code_space_capacity; // 8
+ intptr_t* map_space_size; // 9
+ intptr_t* map_space_capacity; // 10
+ intptr_t* cell_space_size; // 11
+ intptr_t* cell_space_capacity; // 12
+ intptr_t* lo_space_size; // 13
int* global_handle_count; // 14
int* weak_global_handle_count; // 15
int* pending_global_handle_count; // 16
int* near_death_global_handle_count; // 17
int* destroyed_global_handle_count; // 18
- int* memory_allocator_size; // 19
- int* memory_allocator_capacity; // 20
+ intptr_t* memory_allocator_size; // 19
+ intptr_t* memory_allocator_capacity; // 20
int* objects_per_type; // 21
int* size_per_type; // 22
int* os_error; // 23
@@ -1832,7 +1837,7 @@ class GCTracer BASE_EMBEDDED {
static int get_max_gc_pause() { return max_gc_pause_; }
// Returns maximum size of objects alive after GC.
- static int get_max_alive_after_gc() { return max_alive_after_gc_; }
+ static intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
// Returns minimal interval between two subsequent collections.
static int get_min_in_mutator() { return min_in_mutator_; }
@@ -1847,7 +1852,7 @@ class GCTracer BASE_EMBEDDED {
}
double start_time_; // Timestamp set in the constructor.
- int start_size_; // Size of objects in heap set in constructor.
+ intptr_t start_size_; // Size of objects in heap set in constructor.
GarbageCollector collector_; // Type of collector.
// A count (including this one, eg, the first collection is 1) of the
@@ -1879,30 +1884,30 @@ class GCTracer BASE_EMBEDDED {
// Total amount of space either wasted or contained in one of free lists
// before the current GC.
- int in_free_list_or_wasted_before_gc_;
+ intptr_t in_free_list_or_wasted_before_gc_;
// Difference between space used in the heap at the beginning of the current
// collection and the end of the previous collection.
- int allocated_since_last_gc_;
+ intptr_t allocated_since_last_gc_;
// Amount of time spent in mutator that is time elapsed between end of the
// previous collection and the beginning of the current one.
double spent_in_mutator_;
// Size of objects promoted during the current collection.
- int promoted_objects_size_;
+ intptr_t promoted_objects_size_;
// Maximum GC pause.
static int max_gc_pause_;
// Maximum size of objects alive after GC.
- static int max_alive_after_gc_;
+ static intptr_t max_alive_after_gc_;
// Minimal interval between two subsequent collections.
static int min_in_mutator_;
// Size of objects alive after last GC.
- static int alive_after_last_gc_;
+ static intptr_t alive_after_last_gc_;
static double last_gc_end_timestamp_;
};
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index eef307d7e..e201179cd 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -993,6 +993,14 @@ void Assembler::dec_b(Register dst) {
}
+void Assembler::dec_b(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFE);
+ emit_operand(ecx, dst);
+}
+
+
void Assembler::dec(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1511,32 +1519,6 @@ void Assembler::bind_to(Label* L, int pos) {
}
-void Assembler::link_to(Label* L, Label* appendix) {
- EnsureSpace ensure_space(this);
- last_pc_ = NULL;
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- Label p;
- Label q = *L;
- do {
- p = q;
- Displacement disp = disp_at(&q);
- disp.next(&q);
- } while (q.is_linked());
- Displacement disp = disp_at(&p);
- disp.link_to(appendix);
- disp_at_put(&p, disp);
- p.Unuse(); // to avoid assertion failure in ~Label
- } else {
- // L is empty, simply use appendix.
- *L = *appendix;
- }
- }
- appendix->Unuse(); // appendix should not be used anymore
-}
-
-
void Assembler::bind(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = NULL;
@@ -1545,6 +1527,19 @@ void Assembler::bind(Label* L) {
}
+void Assembler::bind(NearLabel* L) {
+ ASSERT(!L->is_bound());
+ last_pc_ = NULL;
+ while (L->unresolved_branches_ > 0) {
+ int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
+ int disp = pc_offset() - branch_pos;
+ ASSERT(is_int8(disp));
+ set_byte_at(branch_pos - sizeof(int8_t), disp);
+ L->unresolved_branches_--;
+ }
+ L->bind_to(pc_offset());
+}
+
void Assembler::call(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1641,6 +1636,24 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
}
+void Assembler::jmp(NearLabel* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (L->is_bound()) {
+ const int short_size = 2;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ ASSERT(is_int8(offs - short_size));
+ // 1110 1011 #8-bit disp.
+ EMIT(0xEB);
+ EMIT((offs - short_size) & 0xFF);
+ } else {
+ EMIT(0xEB);
+ EMIT(0x00); // The displacement will be resolved later.
+ L->link_to(pc_offset());
+ }
+}
+
void Assembler::j(Condition cc, Label* L, Hint hint) {
EnsureSpace ensure_space(this);
@@ -1696,6 +1709,27 @@ void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
}
+void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(0 <= cc && cc < 16);
+ if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ ASSERT(is_int8(offs - short_size));
+ // 0111 tttn #8-bit disp
+ EMIT(0x70 | cc);
+ EMIT((offs - short_size) & 0xFF);
+ } else {
+ EMIT(0x70 | cc);
+ EMIT(0x00); // The displacement will be resolved later.
+ L->link_to(pc_offset());
+ }
+}
+
+
// FPU instructions.
void Assembler::fld(int i) {
@@ -2179,6 +2213,16 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2201,7 +2245,29 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
}
-void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
+void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0xC2);
+ emit_sse_operand(dst, src);
+ EMIT(1); // LT == 1
+}
+
+
+void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0x28);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2348,7 +2414,7 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) {
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2358,6 +2424,19 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+
+void Assembler::psllq(XMMRegister reg, int8_t imm8) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x73);
+ emit_sse_operand(esi, reg); // esi == 6
+ EMIT(imm8);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 928f17289..d8051c871 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -64,15 +64,15 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
- bool is_valid() const { return 0 <= code_ && code_ < 8; }
- bool is(Register reg) const { return code_ == reg.code_; }
+ bool is_valid() const { return 0 <= code_ && code_ < 8; }
+ bool is(Register reg) const { return code_ == reg.code_; }
// eax, ebx, ecx and edx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
- int code() const {
+ bool is_byte_register() const { return code_ <= 3; }
+ int code() const {
ASSERT(is_valid());
return code_;
}
- int bit() const {
+ int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
@@ -93,8 +93,8 @@ const Register no_reg = { -1 };
struct XMMRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 8; }
- int code() const {
+ bool is_valid() const { return 0 <= code_ && code_ < 8; }
+ int code() const {
ASSERT(is_valid());
return code_;
}
@@ -376,6 +376,7 @@ class CpuFeatures : public AllStatic {
static bool IsSupported(CpuFeature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
+ if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == RDTSC && !FLAG_enable_rdtsc) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
@@ -595,6 +596,7 @@ class Assembler : public Malloced {
void cmp(const Operand& op, Handle<Object> handle);
void dec_b(Register dst);
+ void dec_b(const Operand& dst);
void dec(Register dst);
void dec(const Operand& dst);
@@ -687,6 +689,7 @@ class Assembler : public Malloced {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
+ void bind(NearLabel* L);
// Calls
void call(Label* L);
@@ -701,11 +704,17 @@ class Assembler : public Malloced {
void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
+ // Short jump
+ void jmp(NearLabel* L);
+
// Conditional jumps
void j(Condition cc, Label* L, Hint hint = no_hint);
void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
+ // Conditional short jump
+ void j(Condition cc, NearLabel* L, Hint hint = no_hint);
+
// Floating-point operations
void fld(int i);
void fstp(int i);
@@ -788,9 +797,15 @@ class Assembler : public Malloced {
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
+ void andpd(XMMRegister dst, XMMRegister src);
+
void ucomisd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
+ void cmpltsd(XMMRegister dst, XMMRegister src);
+
+ void movaps(XMMRegister dst, XMMRegister src);
+
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
@@ -806,6 +821,8 @@ class Assembler : public Malloced {
void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
+ void psllq(XMMRegister reg, int8_t imm8);
+
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);
void movntdq(const Operand& dst, XMMRegister src);
@@ -839,9 +856,9 @@ class Assembler : public Malloced {
// Used for inline tables, e.g., jump-tables.
void dd(uint32_t data, RelocInfo::Mode reloc_info);
- int pc_offset() const { return pc_ - buffer_; }
+ int pc_offset() const { return pc_ - buffer_; }
int current_statement_position() const { return current_statement_position_; }
- int current_position() const { return current_position_; }
+ int current_position() const { return current_position_; }
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
@@ -868,6 +885,7 @@ class Assembler : public Malloced {
private:
byte* addr_at(int pos) { return buffer_ + pos; }
byte byte_at(int pos) { return buffer_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}
@@ -902,7 +920,6 @@ class Assembler : public Malloced {
// labels
void print(Label* L);
void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
// displacements
inline Displacement disp_at(Label* L);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index a095ef7bf..0ad3e6d48 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -105,7 +105,11 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool count_constructions) {
+ // Should never count constructions for api objects.
+ ASSERT(!is_api_function || !count_constructions);
+
// Enter a construct frame.
__ EnterConstructFrame();
@@ -148,6 +152,26 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpInstanceType(eax, JS_FUNCTION_TYPE);
__ j(equal, &rt_call);
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
+ __ j(not_zero, &allocate);
+
+ __ push(eax);
+ __ push(edi);
+
+ __ push(edi); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(edi);
+ __ pop(eax);
+
+ __ bind(&allocate);
+ }
+
// Now allocate the JSObject on the heap.
// edi: constructor
// eax: initial map
@@ -167,7 +191,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// ebx: JSObject
// edi: start of next object
{ Label loop, entry;
- __ mov(edx, Factory::undefined_value());
+ // To allow for truncation.
+ if (count_constructions) {
+ __ mov(edx, Factory::one_pointer_filler_map());
+ } else {
+ __ mov(edx, Factory::undefined_value());
+ }
__ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
@@ -351,13 +380,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true);
+}
+
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, false);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index dccf36b25..3e2b7ae1b 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -208,7 +208,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
+ NearLabel false_result, true_result, not_string;
__ mov(eax, Operand(esp, 1 * kPointerSize));
// 'null' => false.
@@ -966,7 +966,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ mov(ebx, Operand(eax)); // ebx: result
- Label skip_allocation;
+ NearLabel skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
@@ -1036,7 +1036,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
// Test if left operand is a string.
- Label lhs_not_string;
+ NearLabel lhs_not_string;
__ test(lhs, Immediate(kSmiTagMask));
__ j(zero, &lhs_not_string);
__ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
@@ -1045,7 +1045,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
__ TailCallStub(&string_add_left_stub);
- Label call_runtime_with_args;
+ NearLabel call_runtime_with_args;
// Left operand is not a string, test right.
__ bind(&lhs_not_string);
__ test(rhs, Immediate(kSmiTagMask));
@@ -1221,8 +1221,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Test that eax is a number.
Label runtime_call;
Label runtime_call_clear_stack;
- Label input_not_smi;
- Label loaded;
+ NearLabel input_not_smi;
+ NearLabel loaded;
__ mov(eax, Operand(esp, kPointerSize));
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &input_not_smi);
@@ -1295,7 +1295,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ lea(ecx, Operand(ecx, ecx, times_2, 0));
__ lea(ecx, Operand(eax, ecx, times_4, 0));
// Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
+ NearLabel cache_miss;
__ cmp(ebx, Operand(ecx, 0));
__ j(not_equal, &cache_miss);
__ cmp(edx, Operand(ecx, kIntSize));
@@ -1338,7 +1338,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Only free register is edi.
- Label done;
+ NearLabel done;
ASSERT(type_ == TranscendentalCache::SIN ||
type_ == TranscendentalCache::COS);
// More transcendental types can be added later.
@@ -1346,7 +1346,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Both fsin and fcos require arguments in the range +/-2^63 and
// return NaN for infinities and NaN. They can share all code except
// the actual fsin/fcos operation.
- Label in_range;
+ NearLabel in_range;
// If argument is outside the range -2^63..2^63, fsin/cos doesn't
// work. We must reduce it to the appropriate range.
__ mov(edi, edx);
@@ -1357,7 +1357,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ j(below, &in_range, taken);
// Check for infinity and NaN. Both return NaN for sin.
__ cmp(Operand(edi), Immediate(0x7ff00000));
- Label non_nan_result;
+ NearLabel non_nan_result;
__ j(not_equal, &non_nan_result, taken);
// Input is +/-Infinity or NaN. Result is NaN.
__ fstp(0);
@@ -1377,7 +1377,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ fld(1);
// FPU Stack: input, 2*pi, input.
{
- Label no_exceptions;
+ NearLabel no_exceptions;
__ fwait();
__ fnstsw_ax();
// Clear if Illegal Operand or Zero Division exceptions are set.
@@ -1389,7 +1389,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Compute st(0) % st(1)
{
- Label partial_remainder_loop;
+ NearLabel partial_remainder_loop;
__ bind(&partial_remainder_loop);
__ fprem1();
__ fwait();
@@ -1552,7 +1552,7 @@ void IntegerConvert(MacroAssembler* masm,
__ shr_cl(scratch2);
// Now the unsigned answer is in scratch2. We need to move it to ecx and
// we may need to fix the sign.
- Label negative;
+ NearLabel negative;
__ xor_(ecx, Operand(ecx));
__ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
__ j(greater, &negative);
@@ -1702,7 +1702,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
- Label load_smi, done;
+ NearLabel load_smi, done;
__ test(number, Immediate(kSmiTagMask));
__ j(zero, &load_smi, not_taken);
@@ -1720,7 +1720,7 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- Label load_smi_edx, load_eax, load_smi_eax, done;
+ NearLabel load_smi_edx, load_eax, load_smi_eax, done;
// Load operand in edx into xmm0.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
@@ -1750,7 +1750,7 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Label* not_numbers) {
- Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+ NearLabel load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
// Load operand in edx into xmm0, or branch to not_numbers.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
@@ -1798,7 +1798,7 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register scratch,
ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
+ NearLabel load_smi_1, load_smi_2, done_load_1, done;
if (arg_location == ARGS_IN_REGISTERS) {
__ mov(scratch, edx);
} else {
@@ -1857,7 +1857,7 @@ void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
- Label test_other, done;
+ NearLabel test_other, done;
// Test if both operands are floats or smi -> scratch=k_is_float;
// Otherwise scratch = k_not_float.
__ test(edx, Immediate(kSmiTagMask));
@@ -1884,7 +1884,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
if (op_ == Token::SUB) {
if (include_smi_code_) {
// Check whether the value is a smi.
- Label try_float;
+ NearLabel try_float;
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &try_float, not_taken);
@@ -1953,7 +1953,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
&slow);
// Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
+ NearLabel try_float;
__ not_(ecx);
__ cmp(ecx, 0xc0000000);
__ j(sign, &try_float, not_taken);
@@ -2026,7 +2026,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(not_zero, &slow, not_taken);
// Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
+ NearLabel adaptor;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -2103,7 +2103,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
- Label add_arguments_object;
+ NearLabel add_arguments_object;
__ bind(&try_allocate);
__ test(ecx, Operand(ecx));
__ j(zero, &add_arguments_object);
@@ -2155,7 +2155,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ SmiUntag(ecx);
// Copy the fixed array slots.
- Label loop;
+ NearLabel loop;
__ bind(&loop);
__ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
__ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
@@ -2383,7 +2383,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 4: End of string data
// Argument 3: Start of string data
- Label setup_two_byte, setup_rest;
+ NearLabel setup_two_byte, setup_rest;
__ test(edi, Operand(edi));
__ mov(edi, FieldOperand(eax, String::kLengthOffset));
__ j(zero, &setup_two_byte);
@@ -2477,7 +2477,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ebx: last_match_info backing store (FixedArray)
// ecx: offsets vector
// edx: number of capture registers
- Label next_capture, done;
+ NearLabel next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
@@ -2533,13 +2533,13 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// number string cache for smis is just the smi value, and the hash for
// doubles is the xor of the upper and lower words. See
// Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
+ NearLabel smi_hash_calculated;
+ NearLabel load_result_from_cache;
if (object_is_smi) {
__ mov(scratch, object);
__ SmiUntag(scratch);
} else {
- Label not_smi, hash_calculated;
+ NearLabel not_smi, hash_calculated;
STATIC_ASSERT(kSmiTag == 0);
__ test(object, Immediate(kSmiTagMask));
__ j(not_zero, &not_smi);
@@ -2663,7 +2663,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (cc_ != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
- Label check_for_nan;
+ NearLabel check_for_nan;
__ cmp(edx, Factory::undefined_value());
__ j(not_equal, &check_for_nan);
__ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
@@ -2678,7 +2678,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
} else {
- Label heap_number;
+ NearLabel heap_number;
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
__ j(equal, &heap_number);
@@ -2713,7 +2713,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ setcc(above_equal, eax);
__ ret(0);
} else {
- Label nan;
+ NearLabel nan;
__ j(above_equal, &nan);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -2730,7 +2730,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Non-strict object equality is slower, so it is handled later in the stub.
if (cc_ == equal && strict_) {
Label slow; // Fallthrough label.
- Label not_smis;
+ NearLabel not_smis;
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
@@ -2771,13 +2771,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
- Label first_non_object;
+ NearLabel first_non_object;
STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &first_non_object);
// Return non-zero (eax is not zero)
- Label return_not_equal;
+ NearLabel return_not_equal;
STATIC_ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal);
__ ret(0);
@@ -2828,7 +2828,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, not_taken);
- Label below_label, above_label;
+ NearLabel below_label, above_label;
// Return a result of -1, 0, or 1, based on EFLAGS.
__ j(below, &below_label, not_taken);
__ j(above, &above_label, not_taken);
@@ -2893,8 +2893,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label not_both_objects;
- Label return_unequal;
+ NearLabel not_both_objects;
+ NearLabel return_unequal;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -3056,7 +3056,7 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// not NULL. The frame pointer is NULL in the exception handler of
// a JS entry frame.
__ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
- Label skip;
+ NearLabel skip;
__ cmp(ebp, 0);
__ j(equal, &skip, not_taken);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -3188,7 +3188,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Make sure we're not trying to return 'the hole' from the runtime
// call as this may lead to crashes in the IC code later.
if (FLAG_debug_code) {
- Label okay;
+ NearLabel okay;
__ cmp(eax, Factory::the_hole_value());
__ j(not_equal, &okay);
__ int3();
@@ -3250,7 +3250,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
__ mov(esp, Operand::StaticVariable(handler_address));
// Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
+ NearLabel loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
@@ -3468,7 +3468,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// edx is function, eax is map.
// Look up the function and the map in the instanceof cache.
- Label miss;
+ NearLabel miss;
ExternalReference roots_address = ExternalReference::roots_address();
__ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
@@ -3500,7 +3500,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
+ NearLabel loop, is_instance, is_not_instance;
__ bind(&loop);
__ cmp(ecx, Operand(ebx));
__ j(equal, &is_instance);
@@ -3837,7 +3837,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// eax: first string
// edx: second string
// Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
+ NearLabel second_not_zero_length, both_not_zero_length;
__ mov(ecx, FieldOperand(edx, String::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0);
__ test(ecx, Operand(ecx));
@@ -4123,7 +4123,7 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register count,
Register scratch,
bool ascii) {
- Label loop;
+ NearLabel loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
// short strings.
@@ -4170,7 +4170,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
}
// Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
+ NearLabel last_bytes;
__ test(count, Immediate(~3));
__ j(zero, &last_bytes);
@@ -4190,7 +4190,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ j(zero, &done);
// Copy remaining characters.
- Label loop;
+ NearLabel loop;
__ bind(&loop);
__ mov_b(scratch, Operand(src, 0));
__ mov_b(Operand(dest, 0), scratch);
@@ -4216,7 +4216,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Make sure that both characters are not digits as such strings has a
// different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
+ NearLabel not_array_index;
__ mov(scratch, c1);
__ sub(Operand(scratch), Immediate(static_cast<int>('0')));
__ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
@@ -4374,7 +4374,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ add(hash, Operand(scratch));
// if (hash == 0) hash = 27;
- Label hash_not_zero;
+ NearLabel hash_not_zero;
__ test(hash, Operand(hash));
__ j(not_zero, &hash_not_zero);
__ mov(hash, Immediate(27));
@@ -4543,7 +4543,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ IncrementCounter(&Counters::string_compare_native, 1);
// Find minimum length.
- Label left_shorter;
+ NearLabel left_shorter;
__ mov(scratch1, FieldOperand(left, String::kLengthOffset));
__ mov(scratch3, scratch1);
__ sub(scratch3, FieldOperand(right, String::kLengthOffset));
@@ -4579,7 +4579,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
{
// Compare loop.
- Label loop;
+ NearLabel loop;
__ bind(&loop);
// Compare characters.
__ mov_b(scratch2, Operand(left, index, times_1, 0));
@@ -4625,7 +4625,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 2 * kPointerSize)); // left
__ mov(eax, Operand(esp, 1 * kPointerSize)); // right
- Label not_same;
+ NearLabel not_same;
__ cmp(edx, Operand(eax));
__ j(not_equal, &not_same);
STATIC_ASSERT(EQUAL == 0);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 86f3877c7..9c8573cea 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -249,7 +249,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// the function.
for (int i = 0; i < scope()->num_parameters(); i++) {
Variable* par = scope()->parameter(i);
- Slot* slot = par->slot();
+ Slot* slot = par->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
// The use of SlotOperand below is safe in unspilled code
// because the slot is guaranteed to be a context slot.
@@ -285,7 +285,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) {
frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
+ StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
}
@@ -717,10 +717,10 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
Property property(&global, &key, RelocInfo::kNoPosition);
Reference ref(this, &property);
ref.GetValue();
- } else if (variable != NULL && variable->slot() != NULL) {
+ } else if (variable != NULL && variable->AsSlot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+ LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
} else {
// Anything else can be handled normally.
Load(expr);
@@ -759,17 +759,17 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
frame_->Push(&result);
}
- Variable* arguments = scope()->arguments()->var();
- Variable* shadow = scope()->arguments_shadow()->var();
- ASSERT(arguments != NULL && arguments->slot() != NULL);
- ASSERT(shadow != NULL && shadow->slot() != NULL);
+ Variable* arguments = scope()->arguments();
+ Variable* shadow = scope()->arguments_shadow();
+ ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
+ ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
JumpTarget done;
bool skip_arguments = false;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
// We have to skip storing into the arguments slot if it has
// already been written to. This can happen if the a function
// has a local variable named 'arguments'.
- LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+ LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
Result probe = frame_->Pop();
if (probe.is_constant()) {
// We have to skip updating the arguments object if it has
@@ -782,10 +782,10 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
}
}
if (!skip_arguments) {
- StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
}
- StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+ StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
return frame_->Pop();
}
@@ -842,7 +842,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
LoadGlobal();
ref->set_type(Reference::NAMED);
} else {
- ASSERT(var->slot() != NULL);
+ ASSERT(var->AsSlot() != NULL);
ref->set_type(Reference::SLOT);
}
} else {
@@ -3274,7 +3274,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
Load(receiver);
- LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
// Emit the source position information after having loaded the
// receiver and the arguments.
@@ -3536,7 +3536,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile time,
// we need to "declare" it at runtime to make sure it actually
@@ -4252,7 +4252,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// the bottom check of the loop condition.
if (node->is_fast_smi_loop()) {
// Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
+ SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
}
Visit(node->body());
@@ -4278,7 +4278,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// expression if we are in a fast smi loop condition.
if (node->is_fast_smi_loop() && has_valid_frame()) {
// Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
+ SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
}
// Based on the condition analysis, compile the backward jump as
@@ -4577,8 +4577,8 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// Store the caught exception in the catch variable.
Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->slot() != NULL);
- StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
+ ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
+ StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
// Remove the exception from the stack.
frame_->Drop();
@@ -5173,7 +5173,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
done->Jump(result);
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
if (potential_slot != NULL) {
// Generate fast case for locals that rewrite to slots.
@@ -5206,7 +5206,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
Result arguments = allocator()->Allocate();
ASSERT(arguments.is_valid());
__ mov(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
arguments,
slow));
frame_->Push(&arguments);
@@ -5714,7 +5714,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
Comment cmnt(masm(), "[ Variable Assignment");
Variable* var = node->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
ASSERT(slot != NULL);
// Evaluate the right-hand side.
@@ -6063,14 +6063,14 @@ void CodeGenerator::VisitCall(Call* node) {
// in generated code. If we succeed, there is no need to perform a
// context lookup in the runtime system.
JumpTarget done;
- if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->slot()->type() == Slot::LOOKUP);
+ if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
JumpTarget slow;
// Prepare the stack for the call to
// ResolvePossiblyDirectEvalNoLookup by pushing the loaded
// function, the first argument to the eval call and the
// receiver.
- Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
+ Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&slow);
frame_->Push(&fun);
@@ -6153,8 +6153,8 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->RestoreContextRegister();
frame_->Push(&result);
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
+ } else if (var != NULL && var->AsSlot() != NULL &&
+ var->AsSlot()->type() == Slot::LOOKUP) {
// ----------------------------------
// JavaScript examples:
//
@@ -6173,7 +6173,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Generate fast case for loading functions from slots that
// correspond to local/global variables or arguments unless they
// are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->slot(),
+ EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&function,
&slow,
@@ -8053,7 +8053,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
- Slot* slot = variable->slot();
+ Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->Push(variable->name());
@@ -9144,9 +9144,15 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
public:
DeferredReferenceGetNamedValue(Register dst,
Register receiver,
- Handle<String> name)
- : dst_(dst), receiver_(receiver), name_(name) {
- set_comment("[ DeferredReferenceGetNamedValue");
+ Handle<String> name,
+ bool is_contextual)
+ : dst_(dst),
+ receiver_(receiver),
+ name_(name),
+ is_contextual_(is_contextual) {
+ set_comment(is_contextual
+ ? "[ DeferredReferenceGetNamedValue (contextual)"
+ : "[ DeferredReferenceGetNamedValue");
}
virtual void Generate();
@@ -9158,6 +9164,7 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
Register dst_;
Register receiver_;
Handle<String> name_;
+ bool is_contextual_;
};
@@ -9167,9 +9174,15 @@ void DeferredReferenceGetNamedValue::Generate() {
}
__ Set(ecx, Immediate(name_));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a test eax instruction to indicate
- // that the inobject property case was inlined.
+ RelocInfo::Mode mode = is_contextual_
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ __ call(ic, mode);
+ // The call must be followed by:
+ // - a test eax instruction to indicate that the inobject property
+ // case was inlined.
+ // - a mov ecx instruction to indicate that the contextual property
+ // load was inlined.
//
// Store the delta to the map check instruction here in the test
// instruction. Use masm_-> instead of the __ macro since the
@@ -9177,8 +9190,13 @@ void DeferredReferenceGetNamedValue::Generate() {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
- masm_->test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+ if (is_contextual_) {
+ masm_->mov(ecx, -delta_to_patch_site);
+ __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+ } else {
+ masm_->test(eax, Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+ }
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -9349,12 +9367,17 @@ Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
#ifdef DEBUG
int original_height = frame()->height();
#endif
+
+ bool contextual_load_in_builtin =
+ is_contextual &&
+ (Bootstrapper::IsActive() ||
+ (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
+
Result result;
- // Do not inline the inobject property case for loads from the global
- // object. Also do not inline for unoptimized code. This saves time in
- // the code generator. Unoptimized code is toplevel code or code that is
- // not in a loop.
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+ // Do not inline in the global code or when not in loop.
+ if (scope()->is_global_scope() ||
+ loop_nesting() == 0 ||
+ contextual_load_in_builtin) {
Comment cmnt(masm(), "[ Load from named Property");
frame()->Push(name);
@@ -9367,19 +9390,26 @@ Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
// instruction here.
__ nop();
} else {
- // Inline the inobject property case.
- Comment cmnt(masm(), "[ Inlined named property load");
+ // Inline the property load.
+ Comment cmnt(masm(), is_contextual
+ ? "[ Inlined contextual property load"
+ : "[ Inlined named property load");
Result receiver = frame()->Pop();
receiver.ToRegister();
result = allocator()->Allocate();
ASSERT(result.is_valid());
DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
+ new DeferredReferenceGetNamedValue(result.reg(),
+ receiver.reg(),
+ name,
+ is_contextual);
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
+ if (!is_contextual) {
+ // Check that the receiver is a heap object.
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+ }
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
@@ -9391,17 +9421,33 @@ Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
// which allows the assert below to succeed and patching to work.
deferred->Branch(not_equal);
- // The delta from the patch label to the load offset must be statically
- // known.
+ // The delta from the patch label to the actual load must be
+ // statically known.
ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
LoadIC::kOffsetToLoadInstruction);
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
- __ IncrementCounter(&Counters::named_load_inline, 1);
+ if (is_contextual) {
+ // Load the (initialy invalid) cell and get its value.
+ masm()->mov(result.reg(), Factory::null_value());
+ if (FLAG_debug_code) {
+ __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
+ Factory::global_property_cell_map());
+ __ Assert(equal, "Uninitialized inlined contextual load");
+ }
+ __ mov(result.reg(),
+ FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
+ __ cmp(result.reg(), Factory::the_hole_value());
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::named_load_global_inline, 1);
+ } else {
+ // The initial (invalid) offset has to be large enough to force a 32-bit
+ // instruction encoding to allow patching with an arbitrary offset. Use
+ // kMaxInt (minus kHeapObjectTag).
+ int offset = kMaxInt;
+ masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
+ __ IncrementCounter(&Counters::named_load_inline, 1);
+ }
+
deferred->BindExit();
}
ASSERT(frame()->height() == original_height - 1);
@@ -9741,7 +9787,7 @@ void Reference::GetValue() {
switch (type_) {
case SLOT: {
Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
if (!persist_after_get_) set_unloaded();
@@ -9786,7 +9832,7 @@ void Reference::TakeValue() {
return;
}
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP ||
slot->type() == Slot::CONTEXT ||
@@ -9819,7 +9865,7 @@ void Reference::SetValue(InitState init_state) {
switch (type_) {
case SLOT: {
Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL);
cgen_->StoreToSlot(slot, init_state);
set_unloaded();
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 64305ef69..52c2b3848 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -685,7 +685,8 @@ int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
case 0xDD: switch (regop) {
case 0: mnem = "fld_d"; break;
- case 2: mnem = "fstp"; break;
+ case 1: mnem = "fisttp_d"; break;
+ case 2: mnem = "fst_d"; break;
case 3: mnem = "fstp_d"; break;
default: UnimplementedInstruction();
}
@@ -717,6 +718,10 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xD9:
switch (modrm_byte & 0xF8) {
+ case 0xC0:
+ mnem = "fld";
+ has_register = true;
+ break;
case 0xC8:
mnem = "fxch";
has_register = true;
@@ -957,6 +962,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
+ } else if (f0byte == 0x28) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movaps %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1156,6 +1169,23 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x73) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("psllq %s,%d",
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x54) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("andpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else {
UnimplementedInstruction();
}
@@ -1168,12 +1198,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
{ data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- if (mod == 3 && regop == ecx) {
- AppendToBuffer("dec_b %s", NameOfCPURegister(rm));
+ if (regop == ecx) {
+ AppendToBuffer("dec_b ");
+ data += PrintRightOperand(data);
} else {
UnimplementedInstruction();
}
- data++;
}
break;
@@ -1274,6 +1304,23 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm));
data++;
}
+ } else if (b2 == 0xC2) {
+ // Intel manual 2A, Table 3-18.
+ const char* const pseudo_op[] = {
+ "cmpeqsd",
+ "cmpltsd",
+ "cmplesd",
+ "cmpunordsd",
+ "cmpneqsd",
+ "cmpnltsd",
+ "cmpnlesd",
+ "cmpordsd"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[data[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data += 2;
} else {
if (mod != 0x3) {
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
@@ -1367,7 +1414,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
" %s",
tmp_buffer_.start());
return instr_len;
-}
+} // NOLINT (function is too long)
//------------------------------------------------------------------------------
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index 9baf76336..dd44f0ee5 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -35,16 +35,8 @@ namespace v8 {
namespace internal {
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
- if (fp == 0) return NONE;
- // Compute the stack pointer.
- Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
- // Fill in the state.
- state->fp = fp;
- state->sp = sp;
- state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- ASSERT(*state->pc_address != NULL);
- return EXIT;
+Address ExitFrame::ComputeStackPointer(Address fp) {
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 1e65c4b40..cf53f4b81 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -100,7 +100,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Copy parameters into context if necessary.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->slot();
+ Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -118,7 +118,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
}
- Variable* arguments = scope()->arguments()->AsVariable();
+ Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
@@ -140,9 +140,8 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ CallStub(&stub);
__ mov(ecx, eax); // Duplicate result.
- Move(arguments->slot(), eax, ebx, edx);
- Slot* dot_arguments_slot =
- scope()->arguments_shadow()->AsVariable()->slot();
+ Move(arguments->AsSlot(), eax, ebx, edx);
+ Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
Move(dot_arguments_slot, ecx, ebx, edx);
}
@@ -162,7 +161,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
{ Comment cmnt(masm_, "[ Stack check");
- Label ok;
+ NearLabel ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
__ cmp(esp, Operand::StaticVariable(stack_limit));
@@ -239,226 +238,191 @@ FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
}
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
+void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
+}
- case Expression::kEffect:
- // Nothing to do.
- break;
- case Expression::kValue:
- // Move value into place.
- switch (location_) {
- case kAccumulator:
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- break;
- case kStack:
- __ push(reg);
- break;
- }
- break;
+void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
+ MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
+ __ mov(result_register(), slot_operand);
+}
- case Expression::kTest:
- // For simplicity we always test the accumulator register.
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- DoTest(true_label_, false_label_, fall_through_);
- break;
- }
+
+void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
+ MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
+ // Memory operands can be pushed directly.
+ __ push(slot_operand);
}
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Nothing to do.
- break;
- case Expression::kValue: {
- MemOperand slot_operand = EmitSlotSearch(slot, result_register());
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), slot_operand);
- break;
- case kStack:
- // Memory operands can be pushed directly.
- __ push(slot_operand);
- break;
- }
- break;
- }
+void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
+ // For simplicity we always test the accumulator register.
+ codegen()->Move(result_register(), slot);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
- case Expression::kTest:
- // For simplicity we always test the accumulator register.
- Move(result_register(), slot);
- DoTest(true_label_, false_label_, fall_through_);
- break;
- }
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on IA32.
}
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Nothing to do.
- break;
- case Expression::kValue:
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), lit->handle());
- break;
- case kStack:
- // Immediates can be pushed directly.
- __ push(Immediate(lit->handle()));
- break;
- }
- break;
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on IA32.
+}
- case Expression::kTest:
- // For simplicity we always test the accumulator register.
- __ mov(result_register(), lit->handle());
- DoTest(true_label_, false_label_, fall_through_);
- break;
- }
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on IA32.
}
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on IA32.
+}
- case Expression::kEffect:
- __ Drop(1);
- break;
- case Expression::kValue:
- switch (location_) {
- case kAccumulator:
- __ pop(result_register());
- break;
- case kStack:
- break;
- }
- break;
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
- case Expression::kTest:
- // For simplicity we always test the accumulator register.
- __ pop(result_register());
- DoTest(true_label_, false_label_, fall_through_);
- break;
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ mov(result_register(), lit);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates can be pushed directly.
+ __ push(Immediate(lit));
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ __ jmp(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ __ jmp(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ __ jmp(false_label_);
+ } else {
+ __ jmp(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ __ jmp(false_label_);
+ } else {
+ __ jmp(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ mov(result_register(), lit);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
}
}
-void FullCodeGenerator::DropAndApply(int count,
- Expression::Context context,
- Register reg) {
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
ASSERT(count > 0);
- ASSERT(!reg.is(esp));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
+ __ Drop(count);
+}
- case Expression::kEffect:
- __ Drop(count);
- break;
- case Expression::kValue:
- switch (location_) {
- case kAccumulator:
- __ Drop(count);
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- break;
- case kStack:
- if (count > 1) __ Drop(count - 1);
- __ mov(Operand(esp, 0), reg);
- break;
- }
- break;
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
- case Expression::kTest:
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- DoTest(true_label_, false_label_, fall_through_);
- break;
- }
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ mov(Operand(esp, 0), reg);
}
-void FullCodeGenerator::Apply(Expression::Context context,
- Label* materialize_true,
- Label* materialize_false) {
- switch (context) {
- case Expression::kUninitialized:
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Move(result_register(), reg);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
- case Expression::kEffect:
- ASSERT_EQ(materialize_true, materialize_false);
- __ bind(materialize_true);
- break;
- case Expression::kValue: {
- Label done;
- switch (location_) {
- case kAccumulator:
- __ bind(materialize_true);
- __ mov(result_register(), Factory::true_value());
- __ jmp(&done);
- __ bind(materialize_false);
- __ mov(result_register(), Factory::false_value());
- break;
- case kStack:
- __ bind(materialize_true);
- __ push(Immediate(Factory::true_value()));
- __ jmp(&done);
- __ bind(materialize_false);
- __ push(Immediate(Factory::false_value()));
- break;
- }
- __ bind(&done);
- break;
- }
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT_EQ(materialize_true, materialize_false);
+ __ bind(materialize_true);
+}
- case Expression::kTest:
- break;
- }
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ NearLabel done;
+ __ bind(materialize_true);
+ __ mov(result_register(), Factory::true_value());
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ mov(result_register(), Factory::false_value());
+ __ bind(&done);
}
-// Convert constant control flow (true or false) to the result expected for
-// a given expression context.
-void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- break;
- case Expression::kValue: {
- Handle<Object> value =
- flag ? Factory::true_value() : Factory::false_value();
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), value);
- break;
- case kStack:
- __ push(Immediate(value));
- break;
- }
- break;
- }
- case Expression::kTest:
- if (flag) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- }
- break;
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ NearLabel done;
+ __ bind(materialize_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ push(Immediate(Factory::false_value()));
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_false == false_label_);
+ ASSERT(materialize_true == true_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Handle<Object> value =
+ flag ? Factory::true_value() : Factory::false_value();
+ __ mov(result_register(), value);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Handle<Object> value =
+ flag ? Factory::true_value() : Factory::false_value();
+ __ push(Immediate(value));
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ if (flag) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
}
}
@@ -551,7 +515,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
FunctionLiteral* function) {
Comment cmnt(masm_, "[ Declaration");
ASSERT(variable != NULL); // Must have been resolved.
- Slot* slot = variable->slot();
+ Slot* slot = variable->AsSlot();
Property* prop = variable->AsProperty();
if (slot != NULL) {
switch (slot->type()) {
@@ -561,7 +525,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ mov(Operand(ebp, SlotOffset(slot)),
Immediate(Factory::the_hole_value()));
} else if (function != NULL) {
- VisitForValue(function, kAccumulator);
+ VisitForAccumulatorValue(function);
__ mov(Operand(ebp, SlotOffset(slot)), result_register());
}
break;
@@ -583,7 +547,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
Immediate(Factory::the_hole_value()));
// No write barrier since the hole value is in old space.
} else if (function != NULL) {
- VisitForValue(function, kAccumulator);
+ VisitForAccumulatorValue(function);
__ mov(ContextOperand(esi, slot->index()), result_register());
int offset = Context::SlotOffset(slot->index());
__ mov(ebx, esi);
@@ -605,7 +569,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
if (mode == Variable::CONST) {
__ push(Immediate(Factory::the_hole_value()));
} else if (function != NULL) {
- VisitForValue(function, kStack);
+ VisitForStackValue(function);
} else {
__ push(Immediate(Smi::FromInt(0))); // No initial value!
}
@@ -618,23 +582,20 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
- VisitForValue(prop->obj(), kStack);
+ VisitForStackValue(prop->obj());
if (function != NULL) {
- VisitForValue(prop->key(), kStack);
- VisitForValue(function, kAccumulator);
+ VisitForStackValue(prop->key());
+ VisitForAccumulatorValue(function);
__ pop(ecx);
} else {
- VisitForValue(prop->key(), kAccumulator);
+ VisitForAccumulatorValue(prop->key());
__ mov(ecx, result_register());
__ mov(result_register(), Factory::the_hole_value());
}
__ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Absence of a test eax instruction following the call
- // indicates that none of the load was inlined.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
}
}
@@ -660,7 +621,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
// Keep the switch value on the stack until a case matches.
- VisitForValue(stmt->tag(), kStack);
+ VisitForStackValue(stmt->tag());
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -680,13 +641,13 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
next_test.Unuse();
// Compile the label expression.
- VisitForValue(clause->label(), kAccumulator);
+ VisitForAccumulatorValue(clause->label());
// Perform the comparison as if via '==='.
__ mov(edx, Operand(esp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
if (inline_smi_code) {
- Label slow_case;
+ NearLabel slow_case;
__ mov(ecx, edx);
__ or_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
@@ -742,14 +703,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. Both SpiderMonkey and JSC
// ignore null and undefined in contrast to the specification; see
// ECMA-262 section 12.6.4.
- VisitForValue(stmt->enumerable(), kAccumulator);
+ VisitForAccumulatorValue(stmt->enumerable());
__ cmp(eax, Factory::undefined_value());
__ j(equal, &exit);
__ cmp(eax, Factory::null_value());
__ j(equal, &exit);
// Convert the object to a JS object.
- Label convert, done_convert;
+ NearLabel convert, done_convert;
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &convert);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
@@ -790,7 +751,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(zero, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
+ NearLabel check_prototype;
__ cmp(ecx, Operand(eax));
__ j(equal, &check_prototype);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -805,7 +766,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
- Label use_cache;
+ NearLabel use_cache;
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
__ jmp(&use_cache);
@@ -817,7 +778,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
- Label fixed_array;
+ NearLabel fixed_array;
__ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::meta_map());
__ j(not_equal, &fixed_array);
@@ -859,7 +820,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check if the expected map still matches that of the enumerable.
// If not, we have to filter the key.
- Label update_each;
+ NearLabel update_each;
__ mov(ecx, Operand(esp, 4 * kPointerSize));
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each);
@@ -882,7 +843,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
EmitAssignment(stmt->each());
// Generate code for the body of the loop.
- Label stack_limit_hit, stack_check_done;
+ Label stack_limit_hit;
+ NearLabel stack_check_done;
Visit(stmt->body());
__ StackLimitCheck(&stack_limit_hit);
@@ -922,13 +884,13 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
__ push(Immediate(info));
__ CallRuntime(Runtime::kNewClosure, 2);
}
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var(), context_);
+ EmitVariableLoad(expr->var());
}
@@ -964,7 +926,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
if (s != NULL && s->is_eval_scope()) {
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
- Label next, fast;
+ NearLabel next, fast;
if (!context.is(temp)) {
__ mov(temp, context);
}
@@ -991,8 +953,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- __ call(ic, mode);
- __ nop(); // Signal no inlined code.
+ EmitCallIC(ic, mode);
}
@@ -1039,7 +1000,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
__ jmp(done);
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
if (potential_slot != NULL) {
// Generate fast case for locals that rewrite to slots.
@@ -1065,11 +1026,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
// variables. Then load the argument from the arguments
// object using keyed load.
__ mov(edx,
- ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
__ mov(eax, Immediate(key_literal->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ jmp(done);
}
}
@@ -1078,12 +1039,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
+void FullCodeGenerator::EmitVariableLoad(Variable* var) {
// Four cases: non-this global variables, lookup slots, all other
// types of slots, and parameters that rewrite to explicit property
// accesses on the arguments object.
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
Property* property = var->AsProperty();
if (var->is_global() && !var->is_this()) {
@@ -1093,13 +1053,8 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ mov(eax, CodeGenerator::GlobalObject());
__ mov(ecx, var->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- // By emitting a nop we make sure that we do not have a test eax
- // instruction after the call it is treated specially by the LoadIC code
- // Remember that the assembler may choose to do peephole optimization
- // (eg, push/pop elimination).
- __ nop();
- Apply(context, eax);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ context()->Plug(eax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
Label done, slow;
@@ -1115,7 +1070,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ CallRuntime(Runtime::kLoadContextSlot, 2);
__ bind(&done);
- Apply(context, eax);
+ context()->Plug(eax);
} else if (slot != NULL) {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
@@ -1124,16 +1079,16 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
if (var->mode() == Variable::CONST) {
// Constants may be the hole value if they have not been initialized.
// Unhole them.
- Label done;
+ NearLabel done;
MemOperand slot_operand = EmitSlotSearch(slot, eax);
__ mov(eax, slot_operand);
__ cmp(eax, Factory::the_hole_value());
__ j(not_equal, &done);
__ mov(eax, Factory::undefined_value());
__ bind(&done);
- Apply(context, eax);
+ context()->Plug(eax);
} else {
- Apply(context, slot);
+ context()->Plug(slot);
}
} else {
@@ -1144,7 +1099,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->slot();
+ Slot* object_slot = object_var->AsSlot();
ASSERT_NOT_NULL(object_slot);
// Load the object.
@@ -1161,19 +1116,17 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test eax, ..." instruction after the
- // call. It is treated specially by the LoadIC code.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
+
// Drop key and object left on the stack by IC.
- Apply(context, eax);
+ context()->Plug(eax);
}
}
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
+ NearLabel materialized;
// Registers will be used as follows:
// edi = JS function.
// ecx = literals array.
@@ -1221,7 +1174,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ mov(edx, FieldOperand(ebx, size - kPointerSize));
__ mov(FieldOperand(eax, size - kPointerSize), edx);
}
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -1258,29 +1211,28 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Fall through.
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
- VisitForValue(value, kAccumulator);
+ VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->handle()));
__ mov(edx, Operand(esp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
// Fall through.
case ObjectLiteral::Property::PROTOTYPE:
__ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForValue(key, kStack);
- VisitForValue(value, kStack);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
__ CallRuntime(Runtime::kSetProperty, 3);
break;
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
__ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForValue(key, kStack);
+ VisitForStackValue(key);
__ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
- VisitForValue(value, kStack);
+ VisitForStackValue(value);
__ CallRuntime(Runtime::kDefineAccessor, 4);
break;
default: UNREACHABLE();
@@ -1288,9 +1240,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
if (result_saved) {
- ApplyTOS(context_);
+ context()->PlugTOS();
} else {
- Apply(context_, eax);
+ context()->Plug(eax);
}
}
@@ -1337,7 +1289,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(eax);
result_saved = true;
}
- VisitForValue(subexpr, kAccumulator);
+ VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
__ mov(ebx, Operand(esp, 0)); // Copy of array literal.
@@ -1350,9 +1302,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
- ApplyTOS(context_);
+ context()->PlugTOS();
} else {
- Apply(context_, eax);
+ context()->Plug(eax);
}
}
@@ -1385,39 +1337,38 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
- VisitForValue(property->obj(), kAccumulator);
+ VisitForAccumulatorValue(property->obj());
__ push(result_register());
} else {
- VisitForValue(property->obj(), kStack);
+ VisitForStackValue(property->obj());
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- VisitForValue(property->obj(), kStack);
- VisitForValue(property->key(), kAccumulator);
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
} else {
- VisitForValue(property->obj(), kStack);
- VisitForValue(property->key(), kStack);
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
- Expression::kValue);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- break;
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ break;
+ }
}
Token::Value op = expr->binary_op();
@@ -1427,28 +1378,26 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
ASSERT(constant == kRightConstant || constant == kNoConstants);
if (constant == kNoConstants) {
__ push(eax); // Left operand goes on the stack.
- VisitForValue(expr->value(), kAccumulator);
+ VisitForAccumulatorValue(expr->value());
}
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT
: NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr,
op,
- Expression::kValue,
mode,
expr->target(),
expr->value(),
constant);
} else {
- EmitBinaryOp(op, Expression::kValue, mode);
+ EmitBinaryOp(op, mode);
}
- location_ = saved_location;
-
} else {
- VisitForValue(expr->value(), kAccumulator);
+ VisitForAccumulatorValue(expr->value());
}
// Record source position before possible IC call.
@@ -1458,8 +1407,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(),
- context_);
+ expr->op());
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
@@ -1476,25 +1424,23 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
- Expression::Context context,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
- Label call_stub, done;
+ NearLabel call_stub;
+ Label done;
__ add(Operand(eax), Immediate(value));
__ j(overflow, &call_stub);
__ test(eax, Immediate(kSmiTagMask));
@@ -1514,12 +1460,11 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
}
__ CallStub(&stub);
__ bind(&done);
- Apply(context, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
- Expression::Context context,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
@@ -1551,13 +1496,12 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
__ CallStub(&stub);
__ bind(&done);
- Apply(context, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
Token::Value op,
- Expression::Context context,
OverwriteMode mode,
Smi* value) {
Label call_stub, smi_case, done;
@@ -1614,13 +1558,12 @@ void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
}
__ bind(&done);
- Apply(context, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
Token::Value op,
- Expression::Context context,
OverwriteMode mode,
Smi* value) {
Label smi_case, done;
@@ -1651,13 +1594,12 @@ void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
}
__ bind(&done);
- Apply(context, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
Token::Value op,
- Expression::Context context,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
@@ -1665,19 +1607,19 @@ void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
- EmitConstantSmiBitOp(expr, op, context, mode, value);
+ EmitConstantSmiBitOp(expr, op, mode, value);
break;
case Token::SHL:
case Token::SAR:
case Token::SHR:
ASSERT(!left_is_constant_smi);
- EmitConstantSmiShiftOp(expr, op, context, mode, value);
+ EmitConstantSmiShiftOp(expr, op, mode, value);
break;
case Token::ADD:
- EmitConstantSmiAdd(expr, context, mode, left_is_constant_smi, value);
+ EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
break;
case Token::SUB:
- EmitConstantSmiSub(expr, context, mode, left_is_constant_smi, value);
+ EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
break;
default:
UNREACHABLE();
@@ -1687,18 +1629,17 @@ void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
- Expression::Context context,
OverwriteMode mode,
Expression* left,
Expression* right,
ConstantOperand constant) {
if (constant == kRightConstant) {
Smi* value = Smi::cast(*right->AsLiteral()->handle());
- EmitConstantSmiBinaryOp(expr, op, context, mode, false, value);
+ EmitConstantSmiBinaryOp(expr, op, mode, false, value);
return;
} else if (constant == kLeftConstant) {
Smi* value = Smi::cast(*left->AsLiteral()->handle());
- EmitConstantSmiBinaryOp(expr, op, context, mode, true, value);
+ EmitConstantSmiBinaryOp(expr, op, mode, true, value);
return;
}
@@ -1792,12 +1733,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
}
__ bind(&done);
- Apply(context, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context,
OverwriteMode mode) {
TypeInfo type = TypeInfo::Unknown();
GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS, type);
@@ -1808,7 +1748,7 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
__ push(result_register());
__ CallStub(&stub);
}
- Apply(context, eax);
+ context()->Plug(eax);
}
@@ -1834,30 +1774,29 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
- EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
break;
}
case NAMED_PROPERTY: {
__ push(eax); // Preserve value.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForAccumulatorValue(prop->obj());
__ mov(edx, eax);
__ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop(); // Signal no inlined code.
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
case KEYED_PROPERTY: {
__ push(eax); // Preserve value.
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax);
__ pop(edx);
__ pop(eax); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop(); // Signal no inlined code.
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
}
@@ -1865,12 +1804,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op,
- Expression::Context context) {
+ Token::Value op) {
// Left-hand sides that rewrite to explicit property accesses do not reach
// here.
ASSERT(var != NULL);
- ASSERT(var->is_global() || var->slot() != NULL);
+ ASSERT(var->is_global() || var->AsSlot() != NULL);
if (var->is_global()) {
ASSERT(!var->is_this());
@@ -1880,14 +1818,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(ecx, var->name());
__ mov(edx, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
// of const variables. Const assignments are simply skipped.
Label done;
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -1936,7 +1873,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ bind(&done);
}
- Apply(context, eax);
+ context()->Plug(eax);
}
@@ -1965,8 +1902,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(edx);
}
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1974,9 +1910,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ push(Operand(esp, kPointerSize)); // Receiver is under value.
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
- DropAndApply(1, context_, eax);
+ context()->DropAndPlug(1, eax);
} else {
- Apply(context_, eax);
+ context()->Plug(eax);
}
}
@@ -2004,10 +1940,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2018,7 +1951,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(eax);
}
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2027,16 +1960,15 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForValue(expr->obj(), kAccumulator);
+ VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
- Apply(context_, eax);
} else {
- VisitForValue(expr->obj(), kStack);
- VisitForValue(expr->key(), kAccumulator);
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
__ pop(edx);
EmitKeyedPropertyLoad(expr);
- Apply(context_, eax);
}
+ context()->Plug(eax);
}
@@ -2047,17 +1979,17 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
__ Set(ecx, Immediate(name));
// Record source position of the IC call.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- __ call(ic, mode);
+ EmitCallIC(ic, mode);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2068,19 +2000,19 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
- VisitForValue(key, kAccumulator);
+ VisitForAccumulatorValue(key);
__ mov(ecx, eax);
// Record source position of the IC call.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(
arg_count, in_loop);
- __ call(ic, mode);
+ EmitCallIC(ic, mode);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2089,7 +2021,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
// Record source position for debugger.
SetSourcePosition(expr->position());
@@ -2098,7 +2030,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
__ CallStub(&stub);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- DropAndApply(1, context_, eax);
+ context()->DropAndPlug(1, eax);
}
@@ -2112,14 +2044,14 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
- VisitForValue(fun, kStack);
+ VisitForStackValue(fun);
__ push(Immediate(Factory::undefined_value())); // Reserved receiver slot.
// Push the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
// Push copy of the function - found below the arguments.
@@ -2148,19 +2080,19 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ CallStub(&stub);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- DropAndApply(1, context_, eax);
+ context()->DropAndPlug(1, eax);
} else if (var != NULL && !var->is_this() && var->is_global()) {
// Push global object as receiver for the call IC.
__ push(CodeGenerator::GlobalObject());
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
+ } else if (var != NULL && var->AsSlot() != NULL &&
+ var->AsSlot()->type() == Slot::LOOKUP) {
// Call to a lookup slot (dynamically introduced variable).
Label slow, done;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLoadFromSlotFastCase(var->slot(),
+ EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&slow,
&done);
@@ -2196,25 +2128,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
- VisitForValue(prop->obj(), kStack);
+ VisitForStackValue(prop->obj());
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed CallIC.
- VisitForValue(prop->obj(), kStack);
+ // for a regular property use keyed EmitCallIC.
+ VisitForStackValue(prop->obj());
if (prop->is_synthetic()) {
- VisitForValue(prop->key(), kAccumulator);
+ VisitForAccumulatorValue(prop->key());
// Record source code position for IC call.
SetSourcePosition(prop->position());
__ pop(edx); // We do not need to keep the receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test eax,..."
- // instruction after the call as it is treated specially
- // by the LoadIC code.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Push result (function).
__ push(eax);
// Push Global receiver.
@@ -2235,7 +2163,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
loop_depth() == 0) {
lit->set_try_full_codegen(true);
}
- VisitForValue(fun, kStack);
+ VisitForStackValue(fun);
// Load global receiver object.
__ mov(ebx, CodeGenerator::GlobalObject());
__ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
@@ -2254,13 +2182,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- VisitForValue(expr->expression(), kStack);
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
// Call the construct call builtin that handles allocation and
@@ -2273,59 +2201,59 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
Split(zero, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask | 0x80000000));
Split(zero, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
@@ -2342,42 +2270,42 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
Split(below_equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
Split(above_equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
@@ -2386,7 +2314,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ test(ebx, Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2394,83 +2322,83 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
// this compiler.
__ jmp(if_false);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
Split(equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
Split(equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
Split(equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2482,8 +2410,8 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
__ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2501,7 +2429,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
Split(equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2509,21 +2437,21 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ pop(ebx);
__ cmp(eax, Operand(ebx));
Split(equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2532,12 +2460,12 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// ArgumentsAccessStub expects the key in edx and the formal
// parameter count in eax.
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2560,7 +2488,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
__ bind(&exit);
if (FLAG_debug_code) __ AbortIfNotSmi(eax);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2568,7 +2496,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null.
__ test(eax, Immediate(kSmiTagMask));
@@ -2615,7 +2543,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// All done.
__ bind(&done);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2630,14 +2558,14 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
__ mov(eax, Factory::undefined_value());
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2685,7 +2613,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
}
__ mov(eax, edi);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2693,11 +2621,11 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
ASSERT(args->length() == 3);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallStub(&stub);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2705,21 +2633,21 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
ASSERT(args->length() == 4);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
- VisitForValue(args->at(3), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
__ CallStub(&stub);
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator); // Load the object.
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label done;
+ NearLabel done;
// If the object is a smi return the object.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &done);
@@ -2729,28 +2657,28 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
__ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
__ bind(&done);
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
// Load the arguments on the stack and call the runtime function.
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
__ CallRuntime(Runtime::kMath_pow, 2);
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack); // Load the object.
- VisitForValue(args->at(1), kAccumulator); // Load the value.
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(ebx); // eax = value. ebx = object.
- Label done;
+ NearLabel done;
// If the object is a smi, return the value.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &done);
@@ -2767,7 +2695,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
__ bind(&done);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2775,18 +2703,18 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label done;
StringCharFromCodeGenerator generator(eax, ebx);
@@ -2797,15 +2725,15 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, ebx);
+ context()->Plug(ebx);
}
void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
Register object = ebx;
Register index = eax;
@@ -2844,15 +2772,15 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, result);
+ context()->Plug(result);
}
void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
Register object = ebx;
Register index = eax;
@@ -2893,31 +2821,31 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, result);
+ context()->Plug(result);
}
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
StringAddStub stub(NO_STRING_ADD_FLAGS);
__ CallStub(&stub);
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
StringCompareStub stub;
__ CallStub(&stub);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2925,9 +2853,9 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN);
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
__ CallStub(&stub);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2935,18 +2863,18 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS);
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
__ CallStub(&stub);
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the runtime function.
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -2954,38 +2882,38 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // For receiver and function.
- VisitForValue(args->at(0), kStack); // Receiver.
+ VisitForStackValue(args->at(0)); // Receiver.
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i + 1), kStack);
+ VisitForStackValue(args->at(i + 1));
}
- VisitForValue(args->at(arg_count + 1), kAccumulator); // Function.
+ VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
// InvokeFunction requires function in edi. Move it in there.
if (!result_register().is(edi)) __ mov(edi, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(edi, count, CALL_FUNCTION);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kRegExpConstructResult, 3);
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kSwapElements, 3);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -3000,11 +2928,11 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ mov(eax, Factory::undefined_value());
- Apply(context_, eax);
+ context()->Plug(eax);
return;
}
- VisitForValue(args->at(1), kAccumulator);
+ VisitForAccumulatorValue(args->at(1));
Register key = eax;
Register cache = ebx;
@@ -3033,7 +2961,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
__ CallRuntime(Runtime::kGetFromCache, 2);
__ bind(&done);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -3044,8 +2972,8 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
Register left = ebx;
Register tmp = ecx;
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
__ pop(left);
Label done, fail, ok;
@@ -3070,14 +2998,14 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ mov(eax, Immediate(Factory::true_value()));
__ bind(&done);
- Apply(context_, eax);
+ context()->Plug(eax);
}
void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
if (FLAG_debug_code) {
__ AbortIfNotString(eax);
@@ -3087,21 +3015,21 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(FieldOperand(eax, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
Split(zero, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
if (FLAG_debug_code) {
__ AbortIfNotString(eax);
@@ -3110,7 +3038,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
__ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
__ IndexFromHash(eax, eax);
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -3134,7 +3062,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
if (expr->is_jsruntime()) {
@@ -3142,14 +3070,14 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Set(ecx, Immediate(expr->name()));
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- __ call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
}
- Apply(context_, eax);
+ context()->Plug(eax);
}
@@ -3163,20 +3091,20 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
- Apply(context_, true);
+ context()->Plug(true);
} else if (var != NULL &&
!var->is_global() &&
- var->slot() != NULL &&
- var->slot()->type() != Slot::LOOKUP) {
+ var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- Apply(context_, false);
+ context()->Plug(false);
} else {
// Property or variable reference. Call the delete builtin with
// object and property name as arguments.
if (prop != NULL) {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
} else if (var->is_global()) {
__ push(CodeGenerator::GlobalObject());
__ push(Immediate(var->name()));
@@ -3190,7 +3118,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ push(Immediate(var->name()));
}
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- Apply(context_, eax);
+ context()->Plug(eax);
}
break;
}
@@ -3198,26 +3126,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
VisitForEffect(expr->expression());
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- break;
- case Expression::kValue:
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), Factory::undefined_value());
- break;
- case kStack:
- __ push(Immediate(Factory::undefined_value()));
- break;
- }
- break;
- case Expression::kTest:
- __ jmp(false_label_);
- break;
- }
+ context()->Plug(Factory::undefined_value());
break;
}
@@ -3229,31 +3138,33 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label* if_false = NULL;
Label* fall_through = NULL;
// Notice that the labels are swapped.
- PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
- Apply(context_, if_false, if_true); // Labels swapped.
+ context()->Plug(if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- VisitForTypeofValue(expr->expression(), kStack);
+ { StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
__ CallRuntime(Runtime::kTypeof, 1);
- Apply(context_, eax);
+ context()->Plug(eax);
break;
}
case Token::ADD: {
Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
Label no_conversion;
__ test(result_register(), Immediate(kSmiTagMask));
__ j(zero, &no_conversion);
__ push(result_register());
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
__ bind(&no_conversion);
- Apply(context_, result_register());
+ context()->Plug(result_register());
break;
}
@@ -3265,9 +3176,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register eax.
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
__ CallStub(&stub);
- Apply(context_, eax);
+ context()->Plug(eax);
break;
}
@@ -3275,11 +3186,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
// The generic unary operation stub expects the argument to be
// in the accumulator register eax.
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
Label done;
bool inline_smi_case = ShouldInlineSmiCase(expr->op());
if (inline_smi_case) {
- Label call_stub;
+ NearLabel call_stub;
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &call_stub);
__ lea(eax, Operand(eax, kSmiTagMask));
@@ -3296,7 +3207,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
__ CallStub(&stub);
__ bind(&done);
- Apply(context_, eax);
+ context()->Plug(eax);
break;
}
@@ -3332,24 +3243,21 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
- Expression::kValue);
- location_ = saved_location;
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
} else {
// Reserve space for result of postfix operation.
- if (expr->is_postfix() && context_ != Expression::kEffect) {
+ if (expr->is_postfix() && !context()->IsEffect()) {
__ push(Immediate(Smi::FromInt(0)));
}
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the accumulator.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForAccumulatorValue(prop->obj());
__ push(eax);
EmitNamedPropertyLoad(prop);
} else {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
EmitKeyedPropertyLoad(prop);
@@ -3357,7 +3265,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ NearLabel no_conversion;
if (ShouldInlineSmiCase(expr->op())) {
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &no_conversion);
@@ -3368,34 +3276,27 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Save result for postfix expressions.
if (expr->is_postfix()) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Do not save result.
- break;
- case Expression::kValue:
- case Expression::kTest:
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(eax);
- break;
- case NAMED_PROPERTY:
- __ mov(Operand(esp, kPointerSize), eax);
- break;
- case KEYED_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
- }
- break;
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
}
}
// Inline smi case if we are in a loop.
- Label stub_call, done;
+ NearLabel stub_call;
+ Label done;
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
@@ -3428,35 +3329,32 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case VARIABLE:
if (expr->is_postfix()) {
// Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN,
- Expression::kEffect);
- // For all contexts except kEffect: We have the result on
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ }
+ // For all contexts except EffectContext We have the result on
// top of the stack.
- if (context_ != Expression::kEffect) {
- ApplyTOS(context_);
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
}
} else {
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN,
- context_);
+ Token::ASSIGN);
}
break;
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->handle());
__ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
- if (context_ != Expression::kEffect) {
- ApplyTOS(context_);
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
}
} else {
- Apply(context_, eax);
+ context()->Plug(eax);
}
break;
}
@@ -3464,17 +3362,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(ecx);
__ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
// Result is on the stack
- if (context_ != Expression::kEffect) {
- ApplyTOS(context_);
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
}
} else {
- Apply(context_, eax);
+ context()->Plug(eax);
}
break;
}
@@ -3482,8 +3377,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
+
if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
Comment cmnt(masm_, "Global variable");
__ mov(eax, CodeGenerator::GlobalObject());
@@ -3491,17 +3389,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop(); // Signal no inlined code.
- if (where == kStack) __ push(eax);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ context()->Plug(eax);
} else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
+ proxy->var()->AsSlot() != NULL &&
+ proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- Slot* slot = proxy->var()->slot();
+ Slot* slot = proxy->var()->AsSlot();
EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
@@ -3510,10 +3407,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
__ bind(&done);
- if (where == kStack) __ push(eax);
+ context()->Plug(eax);
} else {
// This expression cannot throw a reference error at the top level.
- VisitForValue(expr, where);
+ Visit(expr);
}
}
@@ -3535,7 +3432,10 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
Handle<String> check = Handle<String>::cast(right_literal_value);
- VisitForTypeofValue(left_unary->expression(), kAccumulator);
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(left_unary->expression());
+ }
+
if (check->Equals(Heap::number_symbol())) {
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_true);
@@ -3612,8 +3512,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3621,21 +3521,21 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Expression* left = expr->left();
Expression* right = expr->right();
if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
return;
}
- VisitForValue(expr->left(), kStack);
+ VisitForStackValue(expr->left());
switch (expr->op()) {
case Token::IN:
- VisitForValue(expr->right(), kStack);
+ VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ cmp(eax, Factory::true_value());
Split(equal, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
- VisitForValue(expr->right(), kStack);
+ VisitForStackValue(expr->right());
InstanceofStub stub;
__ CallStub(&stub);
__ test(eax, Operand(eax));
@@ -3645,7 +3545,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
default: {
- VisitForValue(expr->right(), kAccumulator);
+ VisitForAccumulatorValue(expr->right());
Condition cc = no_condition;
bool strict = false;
switch (op) {
@@ -3684,7 +3584,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
bool inline_smi_code = ShouldInlineSmiCase(op);
if (inline_smi_code) {
- Label slow_case;
+ NearLabel slow_case;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
@@ -3706,7 +3606,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Convert the result of the comparison into one expected for this
// expression's context.
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -3715,10 +3615,10 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
__ cmp(eax, Factory::null_value());
if (expr->is_strict()) {
Split(equal, if_true, if_false, fall_through);
@@ -3734,20 +3634,46 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
__ test(edx, Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
}
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- Apply(context_, eax);
+ context()->Plug(eax);
+}
+
+
+Register FullCodeGenerator::result_register() {
+ return eax;
}
-Register FullCodeGenerator::result_register() { return eax; }
+Register FullCodeGenerator::context_register() {
+ return esi;
+}
-Register FullCodeGenerator::context_register() { return esi; }
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+ ASSERT(mode == RelocInfo::CODE_TARGET ||
+ mode == RelocInfo::CODE_TARGET_CONTEXT);
+ __ call(ic, mode);
+
+ // If we're calling a (keyed) load or store stub, we have to mark
+ // the call as containing no inlined code so we will not attempt to
+ // patch it.
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ case Code::KEYED_LOAD_IC:
+ case Code::STORE_IC:
+ case Code::KEYED_STORE_IC:
+ __ nop(); // Signals no inlined code.
+ break;
+ default:
+ // Do nothing.
+ break;
+ }
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 3d0bd796a..a2990a20e 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -692,7 +692,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- esp[0] : return address
// -----------------------------------
Label miss;
- Label index_out_of_range;
Register receiver = edx;
Register index = eax;
@@ -707,7 +706,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
result,
&miss, // When not a string.
&miss, // When not a number.
- &index_out_of_range,
+ &miss, // When index out of range.
STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm);
__ ret(0);
@@ -715,10 +714,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(Factory::undefined_value()));
- __ ret(0);
-
__ bind(&miss);
GenerateMiss(masm);
}
@@ -890,8 +885,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
- // Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
+ // Check that the key is an array index, that is Uint32.
+ __ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
__ j(not_zero, &slow, not_taken);
// Get the map of the receiver.
@@ -1666,6 +1661,38 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
}
+// One byte opcode for mov ecx,0xXXXXXXXX.
+static const byte kMovEcxByte = 0xB9;
+
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+ Object* map,
+ Object* cell) {
+ // The address of the instruction following the call.
+ Address mov_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+ // If the instruction following the call is not a cmp eax, nothing
+ // was inlined.
+ if (*mov_instruction_address != kMovEcxByte) return false;
+
+ Address delta_address = mov_instruction_address + 1;
+ // The delta to the start of the map check instruction.
+ int delta = *reinterpret_cast<int*>(delta_address);
+
+ // The map address is the last 4 bytes of the 7-byte
+ // operand-immediate compare instruction, so we add 3 to get the
+ // offset to the last 4 bytes.
+ Address map_address = mov_instruction_address + delta + 3;
+ *(reinterpret_cast<Object**>(map_address)) = map;
+
+ // The cell is in the last 4 bytes of a five byte mov reg, imm32
+ // instruction, so we add 1 to get the offset to the last 4 bytes.
+ Address offset_address =
+ mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
+ *reinterpret_cast<Object**>(offset_address) = cell;
+ return true;
+}
+
+
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 87e25d73d..a62f74b74 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1361,6 +1361,13 @@ void MacroAssembler::Drop(int stack_elements) {
}
+void MacroAssembler::Move(Register dst, Register src) {
+ if (!dst.is(src)) {
+ mov(dst, src);
+ }
+}
+
+
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, value);
}
@@ -1553,6 +1560,17 @@ void MacroAssembler::ConvertToInt32(Register dst,
}
+void MacroAssembler::LoadPowerOf2(XMMRegister dst,
+ Register scratch,
+ int power) {
+ ASSERT(is_uintn(power + HeapNumber::kExponentBias,
+ HeapNumber::kExponentBits));
+ mov(scratch, Immediate(power + HeapNumber::kExponentBias));
+ movd(dst, Operand(scratch));
+ psllq(dst, HeapNumber::kMantissaBits);
+}
+
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index a7534cbd4..fe3267c94 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -258,6 +258,8 @@ class MacroAssembler: public Assembler {
TypeInfo info,
Label* on_not_int32);
+ void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
+
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object);
@@ -503,6 +505,9 @@ class MacroAssembler: public Assembler {
void Call(Label* target) { call(target); }
+ // Move if the registers are not identical.
+ void Move(Register target, Register source);
+
void Move(Register target, Handle<Object> value);
Handle<Object> CodeObject() { return code_object_; }
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 828e71a8f..bb0a46cd8 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -265,7 +265,11 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype) {
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ // Check we're still in the same context.
+ __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
+ Top::global());
+ __ j(not_equal, miss);
// Get the global function with the given index.
JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
@@ -1626,7 +1630,8 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- eax);
+ eax,
+ &miss);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss);
@@ -1695,7 +1700,8 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- eax);
+ eax,
+ &miss);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss);
@@ -1813,6 +1819,234 @@ Object* CallStubCompiler::CompileStringFromCharCodeCall(
}
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ if (!CpuFeatures::IsSupported(SSE2)) return Heap::undefined_value();
+ CpuFeatures::Scope use_sse2(SSE2);
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into eax.
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi);
+
+ // Check if the argument is a heap number and load its value into xmm0.
+ Label slow;
+ __ CheckMap(eax, Factory::heap_number_map(), &slow, true);
+ __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+
+ // Check if the argument is strictly positive. Note this also
+ // discards NaN.
+ __ xorpd(xmm1, xmm1);
+ __ ucomisd(xmm0, xmm1);
+ __ j(below_equal, &slow);
+
+ // Do a truncating conversion.
+ __ cvttsd2si(eax, Operand(xmm0));
+
+ // Check if the result fits into a smi. Note this also checks for
+ // 0x80000000 which signals a failed conversion.
+ Label wont_fit_into_smi;
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &wont_fit_into_smi);
+
+ // Smi tag and return.
+ __ SmiTag(eax);
+ __ bind(&smi);
+ __ ret(2 * kPointerSize);
+
+ // Check if the argument is < 2^kMantissaBits.
+ Label already_round;
+ __ bind(&wont_fit_into_smi);
+ __ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
+ __ ucomisd(xmm0, xmm1);
+ __ j(above_equal, &already_round);
+
+ // Save a copy of the argument.
+ __ movaps(xmm2, xmm0);
+
+ // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
+ __ addsd(xmm0, xmm1);
+ __ subsd(xmm0, xmm1);
+
+ // Compare the argument and the tentative result to get the right mask:
+ // if xmm2 < xmm0:
+ // xmm2 = 1...1
+ // else:
+ // xmm2 = 0...0
+ __ cmpltsd(xmm2, xmm0);
+
+ // Subtract 1 if the argument was less than the tentative result.
+ __ LoadPowerOf2(xmm1, ebx, 0);
+ __ andpd(xmm1, xmm2);
+ __ subsd(xmm0, xmm1);
+
+ // Return a new heap number.
+ __ AllocateHeapNumber(eax, ebx, edx, &slow);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ ret(2 * kPointerSize);
+
+ // Return the argument (when it's an already round heap number).
+ __ bind(&already_round);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ ret(2 * kPointerSize);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+ __ bind(&miss);
+ // ecx: function name.
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+}
+
+
+Object* CallStubCompiler::CompileMathAbsCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into eax.
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smi);
+
+ // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
+ // otherwise.
+ __ mov(ebx, eax);
+ __ sar(ebx, kBitsPerInt - 1);
+
+ // Do bitwise not or do nothing depending on ebx.
+ __ xor_(eax, Operand(ebx));
+
+ // Add 1 or do nothing depending on ebx.
+ __ sub(eax, Operand(ebx));
+
+ // If the result is still negative, go to the slow case.
+ // This only happens for the most negative smi.
+ Label slow;
+ __ j(negative, &slow);
+
+ // Smi case done.
+ __ ret(2 * kPointerSize);
+
+ // Check if the argument is a heap number and load its exponent and
+ // sign into ebx.
+ __ bind(&not_smi);
+ __ CheckMap(eax, Factory::heap_number_map(), &slow, true);
+ __ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
+
+ // Check the sign of the argument. If the argument is positive,
+ // just return it.
+ Label negative_sign;
+ __ test(ebx, Immediate(HeapNumber::kSignMask));
+ __ j(not_zero, &negative_sign);
+ __ ret(2 * kPointerSize);
+
+ // If the argument is negative, clear the sign, and return a new
+ // number.
+ __ bind(&negative_sign);
+ __ and_(ebx, ~HeapNumber::kSignMask);
+ __ mov(ecx, FieldOperand(eax, HeapNumber::kMantissaOffset));
+ __ AllocateHeapNumber(eax, edi, edx, &slow);
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ebx);
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ __ ret(2 * kPointerSize);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+ __ bind(&miss);
+ // ecx: function name.
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -1894,7 +2128,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(above_equal, &miss, not_taken);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, eax);
+ masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss);
}
@@ -1914,7 +2148,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, eax);
+ masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss);
}
@@ -1935,7 +2169,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss);
}
@@ -2324,7 +2558,10 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
name,
edx,
&miss);
- if (cell->IsFailure()) return cell;
+ if (cell->IsFailure()) {
+ miss.Unuse();
+ return cell;
+ }
}
// Return undefined if maps of the full prototype chain are still the
@@ -2374,7 +2611,10 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi,
callback, name, &miss, &failure);
- if (!success) return failure;
+ if (!success) {
+ miss.Unuse();
+ return failure;
+ }
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2474,12 +2714,12 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
+ __ IncrementCounter(&Counters::named_load_global_stub, 1);
__ mov(eax, ebx);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+ __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
@@ -2535,9 +2775,13 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi,
callback, name, &miss, &failure);
- if (!success) return failure;
+ if (!success) {
+ miss.Unuse();
+ return failure;
+ }
__ bind(&miss);
+
__ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index 5f1e1e4e5..a31f6e836 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -1313,7 +1313,7 @@ void VirtualFrame::Push(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL) {
- Slot* slot = proxy->var()->slot();
+ Slot* slot = proxy->var()->AsSlot();
if (slot->type() == Slot::LOCAL) {
PushLocalAt(slot->index());
return;
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index b4a333ec9..a9c2a4837 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -299,6 +299,7 @@ void LoadIC::ClearInlinedVersion(Address address) {
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), 0);
+ PatchInlinedContextualLoad(address, Heap::null_value(), Heap::null_value());
}
@@ -720,6 +721,14 @@ Object* KeyedCallIC::LoadFunction(State state,
}
+#ifdef DEBUG
+#define TRACE_IC_NAMED(msg, name) \
+ if (FLAG_trace_ic) PrintF(msg, *(name)->ToCString())
+#else
+#define TRACE_IC_NAMED(msg, name)
+#endif
+
+
Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@@ -797,15 +806,24 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
LOG(SuspectReadEvent(*name, *object));
}
- bool can_be_inlined =
+ bool can_be_inlined_precheck =
FLAG_use_ic &&
- state == PREMONOMORPHIC &&
lookup.IsProperty() &&
lookup.IsCacheable() &&
lookup.holder() == *object &&
- lookup.type() == FIELD &&
!object->IsAccessCheckNeeded();
+ bool can_be_inlined =
+ can_be_inlined_precheck &&
+ state == PREMONOMORPHIC &&
+ lookup.type() == FIELD;
+
+ bool can_be_inlined_contextual =
+ can_be_inlined_precheck &&
+ state == UNINITIALIZED &&
+ lookup.holder()->IsGlobalObject() &&
+ lookup.type() == NORMAL;
+
if (can_be_inlined) {
Map* map = lookup.holder()->map();
// Property's index in the properties array. If negative we have
@@ -816,32 +834,29 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
int offset = map->instance_size() + (index * kPointerSize);
if (PatchInlinedLoad(address(), map, offset)) {
set_target(megamorphic_stub());
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[LoadIC : inline patch %s]\n", *name->ToCString());
- }
-#endif
+ TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name);
return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
-#ifdef DEBUG
} else {
- if (FLAG_trace_ic) {
- PrintF("[LoadIC : no inline patch %s (patching failed)]\n",
- *name->ToCString());
- }
+ TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n",
+ name);
}
} else {
- if (FLAG_trace_ic) {
- PrintF("[LoadIC : no inline patch %s (not inobject)]\n",
- *name->ToCString());
- }
+ TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name);
+ }
+ } else if (can_be_inlined_contextual) {
+ Map* map = lookup.holder()->map();
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
+ lookup.holder()->property_dictionary()->ValueAt(
+ lookup.GetDictionaryEntry()));
+ if (PatchInlinedContextualLoad(address(), map, cell)) {
+ set_target(megamorphic_stub());
+ TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
+ ASSERT(cell->value() != Heap::the_hole_value());
+ return cell->value();
}
} else {
if (FLAG_use_ic && state == PREMONOMORPHIC) {
- if (FLAG_trace_ic) {
- PrintF("[LoadIC : no inline patch %s (not inlinable)]\n",
- *name->ToCString());
-#endif
- }
+ TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name);
}
}
@@ -1526,18 +1541,17 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
// Static IC stub generators.
//
-static Object* CompileFunction(Object* result,
- Handle<Object> object,
- InLoopFlag in_loop) {
+static JSFunction* CompileFunction(JSFunction* function,
+ InLoopFlag in_loop) {
// Compile now with optimization.
HandleScope scope;
- Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
+ Handle<JSFunction> function_handle(function);
if (in_loop == IN_LOOP) {
- CompileLazyInLoop(function, object, CLEAR_EXCEPTION);
+ CompileLazyInLoop(function_handle, CLEAR_EXCEPTION);
} else {
- CompileLazy(function, object, CLEAR_EXCEPTION);
+ CompileLazy(function_handle, CLEAR_EXCEPTION);
}
- return *function;
+ return *function_handle;
}
@@ -1560,7 +1574,7 @@ Object* CallIC_Miss(Arguments args) {
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
- return CompileFunction(result, args.at<Object>(0), ic.target()->ic_in_loop());
+ return CompileFunction(JSFunction::cast(result), ic.target()->ic_in_loop());
}
@@ -1576,7 +1590,7 @@ Object* KeyedCallIC_Miss(Arguments args) {
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
- return CompileFunction(result, args.at<Object>(0), ic.target()->ic_in_loop());
+ return CompileFunction(JSFunction::cast(result), ic.target()->ic_in_loop());
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 17450cc35..a5fada09f 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -298,6 +298,10 @@ class LoadIC: public IC {
static bool PatchInlinedLoad(Address address, Object* map, int index);
+ static bool PatchInlinedContextualLoad(Address address,
+ Object* map,
+ Object* cell);
+
friend class IC;
};
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 9abf61ce5..5a08212b1 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -67,12 +67,12 @@ class List {
// Returns a reference to the element at index i. This reference is
// not safe to use after operations that can change the list's
// backing store (eg, Add).
- inline T& operator[](int i) const {
+ inline T& operator[](int i) const {
ASSERT(0 <= i);
ASSERT(i < length_);
return data_[i];
}
- inline T& at(int i) const { return operator[](i); }
+ inline T& at(int i) const { return operator[](i); }
inline T& last() const { return at(length_ - 1); }
inline T& first() const { return at(0); }
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 41523a82a..2fae3afb1 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -408,6 +408,7 @@ static void CompileScriptForTracker(Handle<Script> script) {
// Build AST.
ScriptDataImpl* pre_data = NULL;
+ EagerCompilationInfo info(script, is_eval);
FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data);
// Check for parse errors.
@@ -415,10 +416,9 @@ static void CompileScriptForTracker(Handle<Script> script) {
ASSERT(Top::has_pending_exception());
return;
}
+ info.set_function(lit);
// Compile the code.
- CompilationInfo info(lit, script, is_eval);
-
LiveEditFunctionTracker tracker(lit);
Handle<Code> code = MakeCodeForLiveEdit(&info);
@@ -664,7 +664,7 @@ class FunctionInfoListener {
int j = 0;
for (int i = 0; i < list.length(); i++) {
Variable* var1 = list[i];
- Slot* slot = var1->slot();
+ Slot* slot = var1->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
if (j != i) {
list[j] = var1;
@@ -677,7 +677,7 @@ class FunctionInfoListener {
for (int k = 1; k < j; k++) {
int l = k;
for (int m = k + 1; m < j; m++) {
- if (list[l]->slot()->index() > list[m]->slot()->index()) {
+ if (list[l]->AsSlot()->index() > list[m]->AsSlot()->index()) {
l = m;
}
}
@@ -687,7 +687,7 @@ class FunctionInfoListener {
SetElement(scope_info_list, scope_info_length, list[i]->name());
scope_info_length++;
SetElement(scope_info_list, scope_info_length,
- Handle<Smi>(Smi::FromInt(list[i]->slot()->index())));
+ Handle<Smi>(Smi::FromInt(list[i]->AsSlot()->index())));
scope_info_length++;
}
SetElement(scope_info_list, scope_info_length,
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 0bca5ebd8..4230cba91 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -171,7 +171,9 @@ void StackTracer::Trace(TickSample* sample) {
SafeStackTraceFrameIterator it(sample->fp, sample->sp,
sample->sp, js_entry_sp);
while (!it.done() && i < TickSample::kMaxFramesCount) {
- sample->stack[i++] = reinterpret_cast<Address>(it.frame()->function());
+ sample->stack[i++] =
+ reinterpret_cast<Address>(it.frame()->function_slot_object()) -
+ kHeapObjectTag;
it.Advance();
}
sample->frames_count = i;
@@ -391,6 +393,13 @@ void Logger::IntEvent(const char* name, int value) {
}
+void Logger::IntPtrTEvent(const char* name, intptr_t value) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (FLAG_log) UncheckedIntPtrTEvent(name, value);
+#endif
+}
+
+
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntEvent(const char* name, int value) {
if (!Log::IsEnabled()) return;
@@ -401,6 +410,16 @@ void Logger::UncheckedIntEvent(const char* name, int value) {
#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
+ if (!Log::IsEnabled()) return;
+ LogMessageBuilder msg;
+ msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
+ msg.WriteToLogFile();
+}
+#endif
+
+
void Logger::HandleEvent(const char* name, Object** location) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_handles) return;
@@ -869,14 +888,17 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
void Logger::FunctionCreateEvent(JSFunction* function) {
#ifdef ENABLE_LOGGING_AND_PROFILING
+ // This function can be called from GC iterators (during Scavenge,
+ // MC, and MS), so marking bits can be set on objects. That's
+ // why unchecked accessors are used here.
static Address prev_code = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]);
msg.AppendAddress(function->address());
msg.Append(',');
- msg.AppendAddress(function->code()->address(), prev_code);
- prev_code = function->code()->address();
+ msg.AppendAddress(function->unchecked_code()->address(), prev_code);
+ prev_code = function->unchecked_code()->address();
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
@@ -887,6 +909,16 @@ void Logger::FunctionCreateEvent(JSFunction* function) {
}
+void Logger::FunctionCreateEventFromMove(JSFunction* function,
+ HeapObject*) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) {
+ FunctionCreateEvent(function);
+ }
+#endif
+}
+
+
void Logger::FunctionMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
MoveEventInternal(FUNCTION_MOVE_EVENT, from, to);
@@ -990,11 +1022,12 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
void Logger::HeapSampleStats(const char* space, const char* kind,
- int capacity, int used) {
+ intptr_t capacity, intptr_t used) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg;
- msg.Append("heap-sample-stats,\"%s\",\"%s\",%d,%d\n",
+ msg.Append("heap-sample-stats,\"%s\",\"%s\","
+ "%" V8_PTR_PREFIX "d,%" V8_PTR_PREFIX "d\n",
space, kind, capacity, used);
msg.WriteToLogFile();
#endif
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 160072dec..e51373786 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -159,6 +159,7 @@ class Logger {
// Emits an event with an int value -> (name, value).
static void IntEvent(const char* name, int value);
+ static void IntPtrTEvent(const char* name, intptr_t value);
// Emits an event with an handle value -> (name, location).
static void HandleEvent(const char* name, Object** location);
@@ -216,6 +217,8 @@ class Logger {
static void CodeDeleteEvent(Address from);
// Emits a function object create event.
static void FunctionCreateEvent(JSFunction* function);
+ static void FunctionCreateEventFromMove(JSFunction* function,
+ HeapObject*);
// Emits a function move event.
static void FunctionMoveEvent(Address from, Address to);
// Emits a function delete event.
@@ -235,7 +238,7 @@ class Logger {
static void HeapSampleJSProducerEvent(const char* constructor,
Address* stack);
static void HeapSampleStats(const char* space, const char* kind,
- int capacity, int used);
+ intptr_t capacity, intptr_t used);
static void SharedLibraryEvent(const char* library_path,
uintptr_t start,
@@ -324,6 +327,7 @@ class Logger {
// Logs an IntEvent regardless of whether FLAG_log is true.
static void UncheckedIntEvent(const char* name, int value);
+ static void UncheckedIntPtrTEvent(const char* name, intptr_t value);
// Stops logging and profiling in case of insufficient resources.
static void StopLoggingAndProfiling();
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index a9e852ef3..26f88cfe6 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -167,8 +167,8 @@ void MarkCompactCollector::Finish() {
// reclaiming the waste and free list blocks).
static const int kFragmentationLimit = 15; // Percent.
static const int kFragmentationAllowed = 1 * MB; // Absolute.
- int old_gen_recoverable = 0;
- int old_gen_used = 0;
+ intptr_t old_gen_recoverable = 0;
+ intptr_t old_gen_used = 0;
OldSpaces spaces;
for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
@@ -282,10 +282,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
FixedArray::BodyDescriptor,
void>::Visit);
- table_.Register(kVisitSharedFunctionInfo,
- &FixedBodyVisitor<StaticMarkingVisitor,
- SharedFunctionInfo::BodyDescriptor,
- void>::Visit);
+ table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
@@ -537,6 +534,17 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
+ static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
+ SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
+ if (shared->IsInobjectSlackTrackingInProgress()) {
+ shared->DetachInitialMap();
+ }
+ FixedBodyVisitor<StaticMarkingVisitor,
+ SharedFunctionInfo::BodyDescriptor,
+ void>::Visit(map, object);
+ }
+
+
static void VisitCodeEntry(Address entry_address) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
@@ -1139,6 +1147,14 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// Only JSObject and subtypes have map transitions and back pointers.
if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
if (map->instance_type() > JS_FUNCTION_TYPE) continue;
+
+ if (map->IsMarked() && map->attached_to_shared_function_info()) {
+ // This map is used for inobject slack tracking and has been detached
+ // from SharedFunctionInfo during the mark phase.
+ // Since it survived the GC, reattach it now.
+ map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
+ }
+
// Follow the chain of back pointers to find the prototype.
Map* current = map;
while (SafeIsMap(current)) {
@@ -1992,8 +2008,10 @@ class MapCompact {
#ifdef DEBUG
if (FLAG_gc_verbose) {
- PrintF("update %p : %p -> %p\n", obj->address(),
- map, new_map);
+ PrintF("update %p : %p -> %p\n",
+ obj->address(),
+ reinterpret_cast<void*>(map),
+ reinterpret_cast<void*>(new_map));
}
#endif
}
@@ -2052,8 +2070,8 @@ void MarkCompactCollector::SweepSpaces() {
&UpdatePointerToNewGen,
Heap::WATERMARK_SHOULD_BE_VALID);
- int live_maps_size = Heap::map_space()->Size();
- int live_maps = live_maps_size / Map::kSize;
+ intptr_t live_maps_size = Heap::map_space()->Size();
+ int live_maps = static_cast<int>(live_maps_size / Map::kSize);
ASSERT(live_map_objects_size_ == live_maps_size);
if (Heap::map_space()->NeedsCompaction(live_maps)) {
@@ -2504,6 +2522,7 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
+ PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
@@ -2596,6 +2615,7 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
+ PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index f26c3b501..4f492bc5e 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -684,6 +684,11 @@ CallSite.prototype.getEvalOrigin = function () {
return FormatEvalOrigin(script);
};
+CallSite.prototype.getScriptNameOrSourceURL = function () {
+ var script = %FunctionGetScript(this.fun);
+ return script ? script.nameOrSourceURL() : null;
+};
+
CallSite.prototype.getFunction = function () {
return this.fun;
};
@@ -775,7 +780,11 @@ CallSite.prototype.isConstructor = function () {
};
function FormatEvalOrigin(script) {
- var eval_origin = "";
+ var sourceURL = script.nameOrSourceURL();
+ if (sourceURL)
+ return sourceURL;
+
+ var eval_origin = "eval at ";
if (script.eval_from_function_name) {
eval_origin += script.eval_from_function_name;
} else {
@@ -786,9 +795,9 @@ function FormatEvalOrigin(script) {
if (eval_from_script) {
if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
// eval script originated from another eval.
- eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")";
+ eval_origin += " (" + FormatEvalOrigin(eval_from_script) + ")";
} else {
- // eval script originated from "real" scource.
+ // eval script originated from "real" source.
if (eval_from_script.name) {
eval_origin += " (" + eval_from_script.name;
var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
@@ -807,25 +816,30 @@ function FormatEvalOrigin(script) {
};
function FormatSourcePosition(frame) {
+ var fileName;
var fileLocation = "";
if (frame.isNative()) {
fileLocation = "native";
} else if (frame.isEval()) {
- fileLocation = "eval at " + frame.getEvalOrigin();
+ fileName = frame.getScriptNameOrSourceURL();
+ if (!fileName)
+ fileLocation = frame.getEvalOrigin();
} else {
- var fileName = frame.getFileName();
- if (fileName) {
- fileLocation += fileName;
- var lineNumber = frame.getLineNumber();
- if (lineNumber != null) {
- fileLocation += ":" + lineNumber;
- var columnNumber = frame.getColumnNumber();
- if (columnNumber) {
- fileLocation += ":" + columnNumber;
- }
+ fileName = frame.getFileName();
+ }
+
+ if (fileName) {
+ fileLocation += fileName;
+ var lineNumber = frame.getLineNumber();
+ if (lineNumber != null) {
+ fileLocation += ":" + lineNumber;
+ var columnNumber = frame.getColumnNumber();
+ if (columnNumber) {
+ fileLocation += ":" + columnNumber;
}
}
}
+
if (!fileLocation) {
fileLocation = "unknown source";
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index b7c3ebcbc..a687c2b8f 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -73,13 +73,13 @@ namespace internal {
// Core register.
struct Register {
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
+ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
ASSERT(is_valid());
return code_;
}
- int bit() const {
+ int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
@@ -129,13 +129,13 @@ Register ToRegister(int num);
// Coprocessor register.
struct FPURegister {
- bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
- bool is(FPURegister creg) const { return code_ == creg.code_; }
- int code() const {
+ bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
+ bool is(FPURegister creg) const { return code_ == creg.code_; }
+ int code() const {
ASSERT(is_valid());
return code_;
}
- int bit() const {
+ int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
@@ -665,4 +665,3 @@ class Assembler : public Malloced {
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_MIPS_H_
-
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 75e7a293f..66f891bd7 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -240,10 +240,10 @@ class CodeGenerator: public AstVisitor {
void ProcessDeferred();
// State
- bool has_cc() const { return cc_reg_ != cc_always; }
+ bool has_cc() const { return cc_reg_ != cc_always; }
TypeofState typeof_state() const { return state_->typeof_state(); }
- JumpTarget* true_target() const { return state_->true_target(); }
- JumpTarget* false_target() const { return state_->false_target(); }
+ JumpTarget* true_target() const { return state_->true_target(); }
+ JumpTarget* false_target() const { return state_->false_target(); }
// We don't track loop nesting level on mips yet.
int loop_nesting() const { return 0; }
@@ -280,7 +280,7 @@ class CodeGenerator: public AstVisitor {
MemOperand SlotOperand(Slot* slot, Register tmp);
// Expressions
- MemOperand GlobalObject() const {
+ MemOperand GlobalObject() const {
return ContextOperand(cp, Context::GLOBAL_INDEX);
}
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index 0fce3cdd9..d63056299 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -52,9 +52,7 @@ StackFrame::Type StackFrame::ComputeType(State* state) {
}
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
- if (fp == 0) return NONE;
- // Compute frame type and stack pointer.
+Address ExitFrame::ComputeStackPointer(Address fp) {
Address sp = fp + ExitFrameConstants::kSPDisplacement;
const int offset = ExitFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp + offset);
@@ -62,11 +60,7 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize;
}
- // Fill in the state.
- state->sp = sp;
- state->fp = fp;
- state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- return EXIT;
+ return sp;
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 6d49d7503..5883f8b34 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -89,7 +89,7 @@ void Failure::FailureVerify() {
void HeapObject::PrintHeader(const char* id) {
- PrintF("%p: [%s]\n", this, id);
+ PrintF("%p: [%s]\n", reinterpret_cast<void*>(this), id);
}
@@ -522,9 +522,9 @@ void JSObject::PrintElements() {
void JSObject::JSObjectPrint() {
- PrintF("%p: [JSObject]\n", this);
- PrintF(" - map = %p\n", map());
- PrintF(" - prototype = %p\n", GetPrototype());
+ PrintF("%p: [JSObject]\n", reinterpret_cast<void*>(this));
+ PrintF(" - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(" - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
PrintF(" {\n");
PrintProperties();
PrintElements();
@@ -649,8 +649,9 @@ void Map::MapVerify() {
}
-void Map::NormalizedMapVerify() {
+void Map::SharedMapVerify() {
MapVerify();
+ ASSERT(is_shared());
ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors());
ASSERT_EQ(Heap::empty_fixed_array(), code_cache());
ASSERT_EQ(0, pre_allocated_property_fields());
@@ -743,7 +744,7 @@ void String::StringVerify() {
void JSFunction::JSFunctionPrint() {
HeapObject::PrintHeader("Function");
- PrintF(" - map = 0x%p\n", map());
+ PrintF(" - map = 0x%p\n", reinterpret_cast<void*>(map()));
PrintF(" - initial_map = ");
if (has_initial_map()) {
initial_map()->ShortPrint();
@@ -904,7 +905,7 @@ void Code::CodePrint() {
void Code::CodeVerify() {
CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
- static_cast<intptr_t>(kCodeAlignment)));
+ kCodeAlignment));
Address last_gc_pc = NULL;
for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Verify();
@@ -1223,9 +1224,9 @@ void BreakPointInfo::BreakPointInfoVerify() {
void BreakPointInfo::BreakPointInfoPrint() {
HeapObject::PrintHeader("BreakPointInfo");
- PrintF("\n - code_position: %d", code_position());
- PrintF("\n - source_position: %d", source_position());
- PrintF("\n - statement_position: %d", statement_position());
+ PrintF("\n - code_position: %d", code_position()->value());
+ PrintF("\n - source_position: %d", source_position()->value());
+ PrintF("\n - statement_position: %d", statement_position()->value());
PrintF("\n - break_point_objects: ");
break_point_objects()->ShortPrint();
}
@@ -1381,7 +1382,7 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
for (int i = 0; i < length(); i++) {
Object* e = get(i);
if (e->IsMap()) {
- Map::cast(e)->NormalizedMapVerify();
+ Map::cast(e)->SharedMapVerify();
} else {
ASSERT(e->IsUndefined());
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index bac224f4e..f63d6725e 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -83,7 +83,6 @@ PropertyDetails PropertyDetails::AsDeleted() {
}
-
#define SMI_ACCESSORS(holder, name, offset) \
int holder::name() { \
Object* value = READ_FIELD(this, offset); \
@@ -1343,8 +1342,8 @@ Object* JSObject::InObjectPropertyAtPut(int index,
-void JSObject::InitializeBody(int object_size) {
- Object* value = Heap::undefined_value();
+void JSObject::InitializeBody(int object_size, Object* value) {
+ ASSERT(!value->IsHeapObject() || !Heap::InNewSpace(value));
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, value);
}
@@ -2279,6 +2278,36 @@ bool Map::is_extensible() {
}
+void Map::set_attached_to_shared_function_info(bool value) {
+ if (value) {
+ set_bit_field2(bit_field2() | (1 << kAttachedToSharedFunctionInfo));
+ } else {
+ set_bit_field2(bit_field2() & ~(1 << kAttachedToSharedFunctionInfo));
+ }
+}
+
+bool Map::attached_to_shared_function_info() {
+ return ((1 << kAttachedToSharedFunctionInfo) & bit_field2()) != 0;
+}
+
+
+void Map::set_is_shared(bool value) {
+ if (value) {
+ set_bit_field2(bit_field2() | (1 << kIsShared));
+ } else {
+ set_bit_field2(bit_field2() & ~(1 << kIsShared));
+ }
+}
+
+bool Map::is_shared() {
+ return ((1 << kIsShared) & bit_field2()) != 0;
+}
+
+
+JSFunction* Map::unchecked_constructor() {
+ return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset));
+}
+
Code::Flags Code::flags() {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
@@ -2571,6 +2600,7 @@ ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -2662,6 +2692,37 @@ PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
#endif
+
+int SharedFunctionInfo::construction_count() {
+ return READ_BYTE_FIELD(this, kConstructionCountOffset);
+}
+
+
+void SharedFunctionInfo::set_construction_count(int value) {
+ ASSERT(0 <= value && value < 256);
+ WRITE_BYTE_FIELD(this, kConstructionCountOffset, static_cast<byte>(value));
+}
+
+
+bool SharedFunctionInfo::live_objects_may_exist() {
+ return (compiler_hints() & (1 << kLiveObjectsMayExist)) != 0;
+}
+
+
+void SharedFunctionInfo::set_live_objects_may_exist(bool value) {
+ if (value) {
+ set_compiler_hints(compiler_hints() | (1 << kLiveObjectsMayExist));
+ } else {
+ set_compiler_hints(compiler_hints() & ~(1 << kLiveObjectsMayExist));
+ }
+}
+
+
+bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
+ return initial_map() != Heap::undefined_value();
+}
+
+
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -3138,9 +3199,9 @@ Object* JSObject::EnsureWritableFastElements() {
ASSERT(HasFastElements());
FixedArray* elems = FixedArray::cast(elements());
if (elems->map() != Heap::fixed_cow_array_map()) return elems;
- Object* writable_elems = Heap::CopyFixedArray(elems);
+ Object* writable_elems = Heap::CopyFixedArrayWithMap(elems,
+ Heap::fixed_array_map());
if (writable_elems->IsFailure()) return writable_elems;
- FixedArray::cast(writable_elems)->set_map(Heap::fixed_array_map());
set_elements(FixedArray::cast(writable_elems));
Counters::cow_arrays_converted.Increment();
return writable_elems;
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index ef5185145..737bf5726 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -1476,8 +1476,8 @@ Object* JSObject::ConvertDescriptorToField(String* name,
FixedArray* new_properties = 0; // Will always be NULL or a valid pointer.
int new_unused_property_fields = map()->unused_property_fields() - 1;
if (map()->unused_property_fields() == 0) {
- new_unused_property_fields = kFieldsAdded - 1;
- Object* new_properties_unchecked =
+ new_unused_property_fields = kFieldsAdded - 1;
+ Object* new_properties_unchecked =
properties()->CopySize(properties()->length() + kFieldsAdded);
if (new_properties_unchecked->IsFailure()) return new_properties_unchecked;
new_properties = FixedArray::cast(new_properties_unchecked);
@@ -2099,61 +2099,34 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
}
-bool NormalizedMapCache::IsCacheable(JSObject* object) {
- // Caching for global objects is not worth it (there are too few of them).
- return !object->IsGlobalObject();
-}
-
-
Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) {
- Object* result;
-
Map* fast = obj->map();
- if (!IsCacheable(obj)) {
- result = fast->CopyNormalized(mode);
- if (result->IsFailure()) return result;
- } else {
- int index = Hash(fast) % kEntries;
- result = get(index);
-
- if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
+ int index = Hash(fast) % kEntries;
+ Object* result = get(index);
+ if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // Make sure that the new slow map has exactly the same hash as the
- // original fast map. This way we can use hash to check if a slow map
- // is already in the hash (see Contains method).
- ASSERT(Hash(fast) == Hash(Map::cast(result)));
- // The cached map should match newly created normalized map bit-by-bit.
- Object* fresh = fast->CopyNormalized(mode);
- if (!fresh->IsFailure()) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kSize) == 0);
- }
+ if (FLAG_enable_slow_asserts) {
+ // The cached map should match newly created normalized map bit-by-bit.
+ Object* fresh = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+ if (!fresh->IsFailure()) {
+ ASSERT(memcmp(Map::cast(fresh)->address(),
+ Map::cast(result)->address(),
+ Map::kSize) == 0);
}
-#endif
- return result;
}
-
- result = fast->CopyNormalized(mode);
- if (result->IsFailure()) return result;
- set(index, result);
+#endif
+ return result;
}
+
+ result = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+ if (result->IsFailure()) return result;
+ set(index, result);
Counters::normalized_maps.Increment();
return result;
}
-bool NormalizedMapCache::Contains(Map* map) {
- // If the map is present in the cache it can only be at one place:
- // at the index calculated from the hash. We assume that a slow map has the
- // same hash as a fast map it has been generated from.
- int index = Hash(map) % kEntries;
- return get(index) == map;
-}
-
-
void NormalizedMapCache::Clear() {
int entries = length();
for (int i = 0; i != entries; i++) {
@@ -2184,7 +2157,7 @@ bool NormalizedMapCache::CheckHit(Map* slow,
Map* fast,
PropertyNormalizationMode mode) {
#ifdef DEBUG
- slow->NormalizedMapVerify();
+ slow->SharedMapVerify();
#endif
return
slow->constructor() == fast->constructor() &&
@@ -2194,17 +2167,17 @@ bool NormalizedMapCache::CheckHit(Map* slow,
fast->inobject_properties()) &&
slow->instance_type() == fast->instance_type() &&
slow->bit_field() == fast->bit_field() &&
- slow->bit_field2() == fast->bit_field2();
+ (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2();
}
Object* JSObject::UpdateMapCodeCache(String* name, Code* code) {
- if (!HasFastProperties() &&
- NormalizedMapCache::IsCacheable(this) &&
- Top::context()->global_context()->normalized_map_cache()->
- Contains(map())) {
- // Replace the map with the identical copy that can be safely modified.
- Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES);
+ if (map()->is_shared()) {
+ // Fast case maps are never marked as shared.
+ ASSERT(!HasFastProperties());
+ // Replace the map with an identical copy that can be safely modified.
+ Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
+ UNIQUE_NORMALIZED_MAP);
if (obj->IsFailure()) return obj;
Counters::normalized_maps.Increment();
@@ -3189,12 +3162,14 @@ Object* Map::CopyDropDescriptors() {
}
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
+ Map::cast(result)->set_is_shared(false);
Map::cast(result)->ClearCodeCache();
return result;
}
-Object* Map::CopyNormalized(PropertyNormalizationMode mode) {
+Object* Map::CopyNormalized(PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing) {
int new_instance_size = instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) {
new_instance_size -= inobject_properties() * kPointerSize;
@@ -3213,8 +3188,12 @@ Object* Map::CopyNormalized(PropertyNormalizationMode mode) {
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
+ Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
+
#ifdef DEBUG
- Map::cast(result)->NormalizedMapVerify();
+ if (Map::cast(result)->is_shared()) {
+ Map::cast(result)->SharedMapVerify();
+ }
#endif
return result;
@@ -3271,6 +3250,47 @@ void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
}
+void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
+ Map* current = this;
+ while (current != Heap::meta_map()) {
+ DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
+ *RawField(current, Map::kInstanceDescriptorsOffset));
+ if (d == Heap::empty_descriptor_array()) {
+ Map* prev = current->map();
+ current->set_map(Heap::meta_map());
+ callback(current, data);
+ current = prev;
+ continue;
+ }
+
+ FixedArray* contents = reinterpret_cast<FixedArray*>(
+ d->get(DescriptorArray::kContentArrayIndex));
+ Object** map_or_index_field = RawField(contents, HeapObject::kMapOffset);
+ Object* map_or_index = *map_or_index_field;
+ bool map_done = true;
+ for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
+ i < contents->length();
+ i += 2) {
+ PropertyDetails details(Smi::cast(contents->get(i + 1)));
+ if (details.IsTransition()) {
+ Map* next = reinterpret_cast<Map*>(contents->get(i));
+ next->set_map(current);
+ *map_or_index_field = Smi::FromInt(i + 2);
+ current = next;
+ map_done = false;
+ break;
+ }
+ }
+ if (!map_done) continue;
+ *map_or_index_field = Heap::fixed_array_map();
+ Map* prev = current->map();
+ current->set_map(Heap::meta_map());
+ callback(current, data);
+ current = prev;
+ }
+}
+
+
Object* CodeCache::Update(String* name, Code* code) {
ASSERT(code->ic_state() == MONOMORPHIC);
@@ -3825,7 +3845,7 @@ Object* DescriptorArray::RemoveTransitions() {
}
-void DescriptorArray::Sort() {
+void DescriptorArray::SortUnchecked() {
// In-place heap sort.
int len = number_of_descriptors();
@@ -3875,7 +3895,11 @@ void DescriptorArray::Sort() {
parent_index = child_index;
}
}
+}
+
+void DescriptorArray::Sort() {
+ SortUnchecked();
SLOW_ASSERT(IsSortedNoDuplicates());
}
@@ -5269,6 +5293,13 @@ bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
}
+void SharedFunctionInfo::ForbidInlineConstructor() {
+ set_compiler_hints(BooleanBit::set(compiler_hints(),
+ kHasOnlySimpleThisPropertyAssignments,
+ false));
+}
+
+
void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
bool only_simple_this_property_assignments,
FixedArray* assignments) {
@@ -5366,6 +5397,107 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
}
+void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
+ ASSERT(!IsInobjectSlackTrackingInProgress());
+
+ // Only initiate the tracking the first time.
+ if (live_objects_may_exist()) return;
+ set_live_objects_may_exist(true);
+
+ // No tracking during the snapshot construction phase.
+ if (Serializer::enabled()) return;
+
+ if (map->unused_property_fields() == 0) return;
+
+ // Nonzero counter is a leftover from the previous attempt interrupted
+ // by GC, keep it.
+ if (construction_count() == 0) {
+ set_construction_count(kGenerousAllocationCount);
+ }
+ set_initial_map(map);
+ ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
+ construct_stub());
+ set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
+}
+
+
+// Called from GC, hence reinterpret_cast and unchecked accessors.
+void SharedFunctionInfo::DetachInitialMap() {
+ Map* map = reinterpret_cast<Map*>(initial_map());
+
+ // Make the map remember to restore the link if it survives the GC.
+ map->set_bit_field2(
+ map->bit_field2() | (1 << Map::kAttachedToSharedFunctionInfo));
+
+ // Undo state changes made by StartInobjectTracking (except the
+ // construction_count). This way if the initial map does not survive the GC
+ // then StartInobjectTracking will be called again the next time the
+ // constructor is called. The countdown will continue and (possibly after
+ // several more GCs) CompleteInobjectSlackTracking will eventually be called.
+ set_initial_map(Heap::raw_unchecked_undefined_value());
+ ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
+ *RawField(this, kConstructStubOffset));
+ set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
+ // It is safe to clear the flag: it will be set again if the map is live.
+ set_live_objects_may_exist(false);
+}
+
+
+// Called from GC, hence reinterpret_cast and unchecked accessors.
+void SharedFunctionInfo::AttachInitialMap(Map* map) {
+ map->set_bit_field2(
+ map->bit_field2() & ~(1 << Map::kAttachedToSharedFunctionInfo));
+
+ // Resume inobject slack tracking.
+ set_initial_map(map);
+ ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
+ *RawField(this, kConstructStubOffset));
+ set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
+ // The map survived the gc, so there may be objects referencing it.
+ set_live_objects_may_exist(true);
+}
+
+
+static void GetMinInobjectSlack(Map* map, void* data) {
+ int slack = map->unused_property_fields();
+ if (*reinterpret_cast<int*>(data) > slack) {
+ *reinterpret_cast<int*>(data) = slack;
+ }
+}
+
+
+static void ShrinkInstanceSize(Map* map, void* data) {
+ int slack = *reinterpret_cast<int*>(data);
+ map->set_inobject_properties(map->inobject_properties() - slack);
+ map->set_unused_property_fields(map->unused_property_fields() - slack);
+ map->set_instance_size(map->instance_size() - slack * kPointerSize);
+
+ // Visitor id might depend on the instance size, recalculate it.
+ map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
+}
+
+
+void SharedFunctionInfo::CompleteInobjectSlackTracking() {
+ ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
+ Map* map = Map::cast(initial_map());
+
+ set_initial_map(Heap::undefined_value());
+ ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
+ construct_stub());
+ set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
+
+ int slack = map->unused_property_fields();
+ map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
+ if (slack != 0) {
+ // Resize the initial map and all maps in its transition tree.
+ map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
+ // Give the correct expected_nof_properties to initial maps created later.
+ ASSERT(expected_nof_properties() >= slack);
+ set_expected_nof_properties(expected_nof_properties() - slack);
+ }
+}
+
+
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -5919,21 +6051,24 @@ bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
}
-bool JSObject::HasLocalElement(uint32_t index) {
+JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
!Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
+ return UNDEFINED_ELEMENT;
}
// Check for lookup interceptor
if (HasIndexedInterceptor()) {
- return HasElementWithInterceptor(this, index);
+ return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
+ : UNDEFINED_ELEMENT;
}
// Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) return true;
+ if (this->IsStringObjectWithCharacterAt(index)) {
+ return STRING_CHARACTER_ELEMENT;
+ }
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
@@ -5941,12 +6076,16 @@ bool JSObject::HasLocalElement(uint32_t index) {
static_cast<uint32_t>
(Smi::cast(JSArray::cast(this)->length())->value()) :
static_cast<uint32_t>(FixedArray::cast(elements())->length());
- return (index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole();
+ if ((index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole()) {
+ return FAST_ELEMENT;
+ }
+ break;
}
case PIXEL_ELEMENTS: {
PixelArray* pixels = PixelArray::cast(elements());
- return (index < static_cast<uint32_t>(pixels->length()));
+ if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
+ break;
}
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@@ -5956,18 +6095,22 @@ bool JSObject::HasLocalElement(uint32_t index) {
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
- return (index < static_cast<uint32_t>(array->length()));
+ if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
+ break;
}
case DICTIONARY_ELEMENTS: {
- return element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound;
+ if (element_dictionary()->FindEntry(index) !=
+ NumberDictionary::kNotFound) {
+ return DICTIONARY_ELEMENT;
+ }
+ break;
}
default:
UNREACHABLE();
break;
}
- UNREACHABLE();
- return Heap::null_value();
+
+ return UNDEFINED_ELEMENT;
}
@@ -8710,11 +8853,11 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
// No free slot - extend break point info array.
Handle<FixedArray> old_break_points =
Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
- debug_info->set_break_points(*Factory::NewFixedArray(
- old_break_points->length() +
- Debug::kEstimatedNofBreakPointsInFunction));
Handle<FixedArray> new_break_points =
- Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
+ Factory::NewFixedArray(old_break_points->length() +
+ Debug::kEstimatedNofBreakPointsInFunction);
+
+ debug_info->set_break_points(*new_break_points);
for (int i = 0; i < old_break_points->length(); i++) {
new_break_points->set(i, old_break_points->get(i));
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 7f6538cf9..7f301b5c0 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -200,6 +200,14 @@ enum PropertyNormalizationMode {
};
+// NormalizedMapSharingMode is used to specify whether a map may be shared
+// by different objects with normalized properties.
+enum NormalizedMapSharingMode {
+ UNIQUE_NORMALIZED_MAP,
+ SHARED_NORMALIZED_MAP
+};
+
+
// Instance size sentinel for objects of variable size.
static const int kVariableSizeSentinel = 0;
@@ -1417,7 +1425,26 @@ class JSObject: public HeapObject {
// Tells whether the index'th element is present.
inline bool HasElement(uint32_t index);
bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
- bool HasLocalElement(uint32_t index);
+
+ // Tells whether the index'th element is present and how it is stored.
+ enum LocalElementType {
+ // There is no element with given index.
+ UNDEFINED_ELEMENT,
+
+ // Element with given index is handled by interceptor.
+ INTERCEPTED_ELEMENT,
+
+ // Element with given index is character in string.
+ STRING_CHARACTER_ELEMENT,
+
+ // Element with given index is stored in fast backing store.
+ FAST_ELEMENT,
+
+ // Element with given index is stored in slow backing store.
+ DICTIONARY_ELEMENT
+ };
+
+ LocalElementType HasLocalElement(uint32_t index);
bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
@@ -1576,7 +1603,7 @@ class JSObject: public HeapObject {
// initialized by set_properties
// Note: this call does not update write barrier, it is caller's
// reponsibility to ensure that *v* can be collected without WB here.
- inline void InitializeBody(int object_size);
+ inline void InitializeBody(int object_size, Object* value);
// Check whether this object references another object
bool ReferencesObject(Object* obj);
@@ -1892,6 +1919,11 @@ class DescriptorArray: public FixedArray {
MUST_USE_RESULT Object* RemoveTransitions();
// Sort the instance descriptors by the hash codes of their keys.
+ // Does not check for duplicates.
+ void SortUnchecked();
+
+ // Sort the instance descriptors by the hash codes of their keys.
+ // Checks the result for duplicates.
void Sort();
// Search the instance descriptors for given name.
@@ -2485,12 +2517,8 @@ class NormalizedMapCache: public FixedArray {
public:
static const int kEntries = 64;
- static bool IsCacheable(JSObject* object);
-
Object* Get(JSObject* object, PropertyNormalizationMode mode);
- bool Contains(Map* map);
-
void Clear();
// Casting
@@ -2985,11 +3013,6 @@ class Code: public HeapObject {
void CodePrint();
void CodeVerify();
#endif
- // Code entry points are aligned to 32 bytes.
- static const int kCodeAlignmentBits = 5;
- static const int kCodeAlignment = 1 << kCodeAlignmentBits;
- static const int kCodeAlignmentMask = kCodeAlignment - 1;
-
// Layout description.
static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
@@ -2998,8 +3021,7 @@ class Code: public HeapObject {
// Add padding to align the instruction start following right after
// the Code object header.
static const int kHeaderSize =
- (kKindSpecificFlagsOffset + kIntSize + kCodeAlignmentMask) &
- ~kCodeAlignmentMask;
+ CODE_POINTER_ALIGN(kKindSpecificFlagsOffset + kIntSize);
// Byte offsets within kKindSpecificFlagsOffset.
static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1;
@@ -3146,6 +3168,19 @@ class Map: public HeapObject {
return ((1 << kHasFastElements) & bit_field2()) != 0;
}
+ // Tells whether the map is attached to SharedFunctionInfo
+ // (for inobject slack tracking).
+ inline void set_attached_to_shared_function_info(bool value);
+
+ inline bool attached_to_shared_function_info();
+
+ // Tells whether the map is shared between objects that may have different
+ // behavior. If true, the map should never be modified, instead a clone
+ // should be created and modified.
+ inline void set_is_shared(bool value);
+
+ inline bool is_shared();
+
// Tells whether the instance needs security checks when accessing its
// properties.
inline void set_is_access_check_needed(bool access_check_needed);
@@ -3157,6 +3192,8 @@ class Map: public HeapObject {
// [constructor]: points back to the function responsible for this map.
DECL_ACCESSORS(constructor, Object)
+ inline JSFunction* unchecked_constructor();
+
// [instance descriptors]: describes the object.
DECL_ACCESSORS(instance_descriptors, DescriptorArray)
@@ -3165,7 +3202,8 @@ class Map: public HeapObject {
MUST_USE_RESULT Object* CopyDropDescriptors();
- MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode);
+ MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing);
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
@@ -3229,12 +3267,16 @@ class Map: public HeapObject {
#ifdef DEBUG
void MapPrint();
void MapVerify();
- void NormalizedMapVerify();
+ void SharedMapVerify();
#endif
inline int visitor_id();
inline void set_visitor_id(int visitor_id);
+ typedef void (*TraverseCallback)(Map* map, void* data);
+
+ void TraverseTransitionTree(TraverseCallback callback, void* data);
+
static const int kMaxPreAllocatedPropertyFields = 255;
// Layout description.
@@ -3288,6 +3330,8 @@ class Map: public HeapObject {
static const int kFunctionWithPrototype = 1;
static const int kHasFastElements = 2;
static const int kStringWrapperSafeForDefaultValueOf = 3;
+ static const int kAttachedToSharedFunctionInfo = 4;
+ static const int kIsShared = 5;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
@@ -3442,6 +3486,100 @@ class SharedFunctionInfo: public HeapObject {
inline int expected_nof_properties();
inline void set_expected_nof_properties(int value);
+ // Inobject slack tracking is the way to reclaim unused inobject space.
+ //
+ // The instance size is initially determined by adding some slack to
+ // expected_nof_properties (to allow for a few extra properties added
+ // after the constructor). There is no guarantee that the extra space
+ // will not be wasted.
+ //
+ // Here is the algorithm to reclaim the unused inobject space:
+ // - Detect the first constructor call for this SharedFunctionInfo.
+ // When it happens enter the "in progress" state: remember the
+ // constructor's initial_map and install a special construct stub that
+ // counts constructor calls.
+ // - While the tracking is in progress create objects filled with
+ // one_pointer_filler_map instead of undefined_value. This way they can be
+ // resized quickly and safely.
+ // - Once enough (kGenerousAllocationCount) objects have been created
+ // compute the 'slack' (traverse the map transition tree starting from the
+ // initial_map and find the lowest value of unused_property_fields).
+ // - Traverse the transition tree again and decrease the instance size
+ // of every map. Existing objects will resize automatically (they are
+ // filled with one_pointer_filler_map). All further allocations will
+ // use the adjusted instance size.
+ // - Decrease expected_nof_properties so that an allocations made from
+ // another context will use the adjusted instance size too.
+ // - Exit "in progress" state by clearing the reference to the initial_map
+ // and setting the regular construct stub (generic or inline).
+ //
+ // The above is the main event sequence. Some special cases are possible
+ // while the tracking is in progress:
+ //
+ // - GC occurs.
+ // Check if the initial_map is referenced by any live objects (except this
+ // SharedFunctionInfo). If it is, continue tracking as usual.
+ // If it is not, clear the reference and reset the tracking state. The
+ // tracking will be initiated again on the next constructor call.
+ //
+ // - The constructor is called from another context.
+ // Immediately complete the tracking, perform all the necessary changes
+ // to maps. This is necessary because there is no efficient way to track
+ // multiple initial_maps.
+ // Proceed to create an object in the current context (with the adjusted
+ // size).
+ //
+ // - A different constructor function sharing the same SharedFunctionInfo is
+ // called in the same context. This could be another closure in the same
+ // context, or the first function could have been disposed.
+ // This is handled the same way as the previous case.
+ //
+ // Important: inobject slack tracking is not attempted during the snapshot
+ // creation.
+
+ static const int kGenerousAllocationCount = 16;
+
+ // [construction_count]: Counter for constructor calls made during
+ // the tracking phase.
+ inline int construction_count();
+ inline void set_construction_count(int value);
+
+ // [initial_map]: initial map of the first function called as a constructor.
+ // Saved for the duration of the tracking phase.
+ // This is a weak link (GC resets it to undefined_value if no other live
+ // object reference this map).
+ DECL_ACCESSORS(initial_map, Object)
+
+ // True if the initial_map is not undefined and the countdown stub is
+ // installed.
+ inline bool IsInobjectSlackTrackingInProgress();
+
+ // Starts the tracking.
+ // Stores the initial map and installs the countdown stub.
+ // IsInobjectSlackTrackingInProgress is normally true after this call,
+ // except when tracking have not been started (e.g. the map has no unused
+ // properties or the snapshot is being built).
+ void StartInobjectSlackTracking(Map* map);
+
+ // Completes the tracking.
+ // IsInobjectSlackTrackingInProgress is false after this call.
+ void CompleteInobjectSlackTracking();
+
+ // Clears the initial_map before the GC marking phase to ensure the reference
+ // is weak. IsInobjectSlackTrackingInProgress is false after this call.
+ void DetachInitialMap();
+
+ // Restores the link to the initial map after the GC marking phase.
+ // IsInobjectSlackTrackingInProgress is true after this call.
+ void AttachInitialMap(Map* map);
+
+ // False if there are definitely no live objects created from this function.
+ // True if live objects _may_ exist (existence not guaranteed).
+ // May go back from true to false after GC.
+ inline bool live_objects_may_exist();
+
+ inline void set_live_objects_may_exist(bool value);
+
// [instance class name]: class name for instances.
DECL_ACCESSORS(instance_class_name, Object)
@@ -3542,6 +3680,10 @@ class SharedFunctionInfo: public HeapObject {
// prototype.
bool CanGenerateInlineConstructor(Object* prototype);
+ // Prevents further attempts to generate inline constructors.
+ // To be called if generation failed for any reason.
+ void ForbidInlineConstructor();
+
// For functions which only contains this property assignments this provides
// access to the names for the properties assigned.
DECL_ACCESSORS(this_property_assignments, Object)
@@ -3589,8 +3731,10 @@ class SharedFunctionInfo: public HeapObject {
static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
- static const int kThisPropertyAssignmentsOffset =
+ static const int kInitialMapOffset =
kInferredNameOffset + kPointerSize;
+ static const int kThisPropertyAssignmentsOffset =
+ kInitialMapOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
@@ -3614,7 +3758,7 @@ class SharedFunctionInfo: public HeapObject {
static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
- // is to allow interation without maps decoding during
+ // is to allow iteration without maps decoding during
// garbage collections.
// To avoid wasting space on 64-bit architectures we use
// the following trick: we group integer fields into pairs
@@ -3649,6 +3793,18 @@ class SharedFunctionInfo: public HeapObject {
static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
#endif
+
+ // The construction counter for inobject slack tracking is stored in the
+ // most significant byte of compiler_hints which is otherwise unused.
+ // Its offset depends on the endian-ness of the architecture.
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ static const int kConstructionCountOffset = kCompilerHintsOffset + 3;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ static const int kConstructionCountOffset = kCompilerHintsOffset + 0;
+#else
+#error Unknown byte ordering
+#endif
+
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
typedef FixedBodyDescriptor<kNameOffset,
@@ -3668,7 +3824,8 @@ class SharedFunctionInfo: public HeapObject {
static const int kHasOnlySimpleThisPropertyAssignments = 0;
static const int kTryFullCodegen = 1;
static const int kAllowLazyCompilation = 2;
- static const int kCodeAgeShift = 3;
+ static const int kLiveObjectsMayExist = 3;
+ static const int kCodeAgeShift = 4;
static const int kCodeAgeMask = 7;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 856c47406..a386848d3 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -115,11 +115,7 @@ class Parser {
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram(Handle<String> source,
bool in_global_context);
- FunctionLiteral* ParseLazy(Handle<String> source,
- Handle<String> name,
- int start_position,
- int end_position,
- bool is_expression);
+ FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info);
FunctionLiteral* ParseJson(Handle<String> source);
// The minimum number of contiguous assignment that will
@@ -156,12 +152,12 @@ class Parser {
ScriptDataImpl* pre_data_;
FuncNameInferrer* fni_;
- bool inside_with() const { return with_nesting_level_ > 0; }
- ParserFactory* factory() const { return factory_; }
+ bool inside_with() const { return with_nesting_level_ > 0; }
+ ParserFactory* factory() const { return factory_; }
ParserLog* log() const { return log_; }
Scanner& scanner() { return scanner_; }
- Mode mode() const { return mode_; }
- ScriptDataImpl* pre_data() const { return pre_data_; }
+ Mode mode() const { return mode_; }
+ ScriptDataImpl* pre_data() const { return pre_data_; }
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
@@ -877,12 +873,30 @@ class ParserLog BASE_EMBEDDED {
virtual int function_position() { return 0; }
virtual int symbol_position() { return 0; }
virtual int symbol_ids() { return 0; }
+ virtual void PauseRecording() {}
+ virtual void ResumeRecording() {}
virtual Vector<unsigned> ExtractData() {
return Vector<unsigned>();
};
};
+
+class ConditionalLogPauseScope {
+ public:
+ ConditionalLogPauseScope(bool pause, ParserLog* log)
+ : log_(log), pause_(pause) {
+ if (pause) log->PauseRecording();
+ }
+ ~ConditionalLogPauseScope() {
+ if (pause_) log_->ResumeRecording();
+ }
+ private:
+ ParserLog* log_;
+ bool pause_;
+};
+
+
class AstBuildingParserFactory : public ParserFactory {
public:
explicit AstBuildingParserFactory(int expected_symbols)
@@ -970,15 +984,31 @@ class PartialParserRecorder: public ParserLog {
return data;
}
+ virtual void PauseRecording() {
+ pause_count_++;
+ is_recording_ = false;
+ }
+
+ virtual void ResumeRecording() {
+ ASSERT(pause_count_ > 0);
+ if (--pause_count_ == 0) is_recording_ = !has_error();
+ }
+
protected:
bool has_error() {
return static_cast<bool>(preamble_[ScriptDataImpl::kHasErrorOffset]);
}
+ bool is_recording() {
+ return is_recording_;
+ }
void WriteString(Vector<const char> str);
Collector<unsigned> function_store_;
unsigned preamble_[ScriptDataImpl::kHeaderSize];
+ bool is_recording_;
+ int pause_count_;
+
#ifdef DEBUG
int prev_start;
#endif
@@ -991,6 +1021,7 @@ class CompleteParserRecorder: public PartialParserRecorder {
CompleteParserRecorder();
virtual void LogSymbol(int start, Vector<const char> literal) {
+ if (!is_recording_) return;
int hash = vector_hash(literal);
HashMap::Entry* entry = symbol_table_.Lookup(&literal, hash, true);
int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
@@ -1001,7 +1032,7 @@ class CompleteParserRecorder: public PartialParserRecorder {
Vector<Vector<const char> > symbol = symbol_entries_.AddBlock(1, literal);
entry->key = &symbol[0];
}
- symbol_store_.Add(id - 1);
+ WriteNumber(id - 1);
}
virtual Vector<unsigned> ExtractData() {
@@ -1061,13 +1092,6 @@ class CompleteParserRecorder: public PartialParserRecorder {
};
-void ScriptDataImpl::SkipFunctionEntry(int start) {
- ASSERT(function_index_ + FunctionEntry::kSize <= store_.length());
- ASSERT(static_cast<int>(store_[function_index_]) == start);
- function_index_ += FunctionEntry::kSize;
-}
-
-
FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) {
// The current pre-data entry must be a FunctionEntry with the given
// start position.
@@ -1126,7 +1150,10 @@ bool ScriptDataImpl::SanityCheck() {
-PartialParserRecorder::PartialParserRecorder() : function_store_(0) {
+PartialParserRecorder::PartialParserRecorder()
+ : function_store_(0),
+ is_recording_(true),
+ pause_count_(0) {
preamble_[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber;
preamble_[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion;
preamble_[ScriptDataImpl::kHasErrorOffset] = false;
@@ -1202,6 +1229,7 @@ void PartialParserRecorder::LogMessage(Scanner::Location loc,
for (int i = 0; i < args.length(); i++) {
WriteString(CStrVector(args[i]));
}
+ is_recording_ = false;
}
@@ -1248,7 +1276,7 @@ FunctionEntry PartialParserRecorder::LogFunction(int start) {
ASSERT(start > prev_start);
prev_start = start;
#endif
- if (has_error()) return FunctionEntry();
+ if (!is_recording_) return FunctionEntry();
FunctionEntry result(function_store_.AddBlock(FunctionEntry::kSize, 0));
result.set_start_pos(start);
return result;
@@ -1343,6 +1371,8 @@ Scope* ParserFactory::NewScope(Scope* parent, Scope::Type type,
bool inside_with) {
ASSERT(parent != NULL);
parent->type_ = type;
+ // Initialize function is hijacked by DummyScope to increment scope depth.
+ parent->Initialize(inside_with);
return parent;
}
@@ -1415,6 +1445,7 @@ class LexicalScope BASE_EMBEDDED {
}
~LexicalScope() {
+ parser_->top_scope_->Leave();
parser_->top_scope_ = prev_scope_;
parser_->with_nesting_level_ = prev_level_;
}
@@ -1457,7 +1488,7 @@ Parser::Parser(Handle<Script> script,
ParserLog* log,
ScriptDataImpl* pre_data)
: script_(script),
- scanner_(is_pre_parsing),
+ scanner_(),
top_scope_(NULL),
with_nesting_level_(0),
temp_scope_(NULL),
@@ -1480,7 +1511,8 @@ bool Parser::PreParseProgram(Handle<String> source,
NoHandleAllocation no_handle_allocation;
scanner_.Initialize(source, stream, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
- mode_ = PARSE_EAGERLY;
+ mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
+ if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
DummyScope top_scope;
LexicalScope scope(this, &top_scope);
TemporaryScope temp_scope(this);
@@ -1503,6 +1535,7 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
source->TryFlatten();
scanner_.Initialize(source, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
+ if (pre_data_ != NULL) pre_data_->Initialize();
// Compute the parsing mode.
mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
@@ -1550,21 +1583,20 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
}
-FunctionLiteral* Parser::ParseLazy(Handle<String> source,
- Handle<String> name,
- int start_position,
- int end_position,
- bool is_expression) {
+FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse_lazy);
+ Handle<String> source(String::cast(script_->source()));
Counters::total_parse_size.Increment(source->length());
+ Handle<String> name(String::cast(info->name()));
fni_ = new FuncNameInferrer();
fni_->PushEnclosingName(name);
// Initialize parser state.
source->TryFlatten();
- scanner_.Initialize(source, start_position, end_position, JAVASCRIPT);
+ scanner_.Initialize(source, info->start_position(), info->end_position(),
+ JAVASCRIPT);
ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
@@ -1579,7 +1611,8 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
LexicalScope lexical_scope(this, scope);
TemporaryScope temp_scope(this);
- FunctionLiteralType type = is_expression ? EXPRESSION : DECLARATION;
+ FunctionLiteralType type =
+ info->is_expression() ? EXPRESSION : DECLARATION;
bool ok = true;
result = ParseFunctionLiteral(name, RelocInfo::kNoPosition, type, &ok);
// Make sure the results agree.
@@ -1600,6 +1633,7 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
return result;
}
+
FunctionLiteral* Parser::ParseJson(Handle<String> source) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
@@ -1657,7 +1691,10 @@ void Parser::ReportMessage(const char* type, Vector<const char*> args) {
Handle<String> Parser::GetSymbol(bool* ok) {
- log()->LogSymbol(scanner_.location().beg_pos, scanner_.literal());
+ if (is_pre_parsing_) {
+ log()->LogSymbol(scanner_.location().beg_pos, scanner_.literal());
+ return Handle<String>::null();
+ }
int symbol_id = -1;
if (pre_data() != NULL) {
symbol_id = pre_data()->GetSymbolIdentifier();
@@ -1970,7 +2007,7 @@ void* Parser::ParseSourceElements(ZoneListWrapper<Statement>* processor,
}
// Propagate the collected information on this property assignments.
- if (top_scope_->is_function_scope()) {
+ if (!is_pre_parsing_ && top_scope_->is_function_scope()) {
bool only_simple_this_property_assignments =
this_property_assignment_finder.only_simple_this_property_assignments()
&& top_scope_->declarations()->length() == 0;
@@ -4122,8 +4159,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
int num_parameters = 0;
// Parse function body.
- { Scope::Type type = Scope::FUNCTION_SCOPE;
- Scope* scope = factory()->NewScope(top_scope_, type, inside_with());
+ { Scope* scope =
+ factory()->NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
LexicalScope lexical_scope(this, scope);
TemporaryScope temp_scope(this);
top_scope_->SetScopeName(name);
@@ -4154,7 +4191,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
// NOTE: We create a proxy and resolve it here so that in the
// future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxis as is the case now.
- if (!function_name.is_null() && function_name->length() > 0) {
+ if (!is_pre_parsing_
+ && !function_name.is_null()
+ && function_name->length() > 0) {
Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
VariableProxy* fproxy =
top_scope_->NewUnresolved(function_name, inside_with());
@@ -4188,22 +4227,18 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
}
Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
scanner_.SeekForward(end_pos);
- pre_data()->Skip(entry.predata_function_skip(),
- entry.predata_symbol_skip());
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
only_simple_this_property_assignments = false;
this_property_assignments = Factory::empty_fixed_array();
Expect(Token::RBRACE, CHECK_OK);
} else {
- if (pre_data() != NULL) {
- // Skip pre-data entry for non-lazily compiled function.
- pre_data()->SkipFunctionEntry(function_block_pos);
+ FunctionEntry entry;
+ if (is_lazily_compiled) entry = log()->LogFunction(function_block_pos);
+ {
+ ConditionalLogPauseScope pause_if(is_lazily_compiled, log());
+ ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
}
- FunctionEntry entry = log()->LogFunction(function_block_pos);
- int predata_function_position_before = log()->function_position();
- int predata_symbol_position_before = log()->symbol_position();
- ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
materialized_literal_count = temp_scope.materialized_literal_count();
expected_property_count = temp_scope.expected_property_count();
only_simple_this_property_assignments =
@@ -4213,13 +4248,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
Expect(Token::RBRACE, CHECK_OK);
end_pos = scanner_.location().end_pos;
if (entry.is_valid()) {
+ ASSERT(is_lazily_compiled);
+ ASSERT(is_pre_parsing_);
entry.set_end_pos(end_pos);
entry.set_literal_count(materialized_literal_count);
entry.set_property_count(expected_property_count);
- entry.set_predata_function_skip(
- log()->function_position() - predata_function_position_before);
- entry.set_predata_symbol_skip(
- log()->symbol_position() - predata_symbol_position_before);
}
}
@@ -5439,12 +5472,6 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
// ----------------------------------------------------------------------------
// The Parser interface.
-// MakeAST() is just a wrapper for the corresponding Parser calls
-// so we don't have to expose the entire Parser class in the .h file.
-
-static bool always_allow_natives_syntax = false;
-
-
ParserMessage::~ParserMessage() {
for (int i = 0; i < args().length(); i++)
DeleteArray(args()[i]);
@@ -5479,9 +5506,7 @@ ScriptDataImpl* PartialPreParse(Handle<String> source,
v8::Extension* extension) {
Handle<Script> no_script;
bool allow_natives_syntax =
- always_allow_natives_syntax ||
- FLAG_allow_natives_syntax ||
- Bootstrapper::IsActive();
+ FLAG_allow_natives_syntax || Bootstrapper::IsActive();
PartialPreParser parser(no_script, allow_natives_syntax, extension);
if (!parser.PreParseProgram(source, stream)) return NULL;
// Extract the accumulated data from the recorder as a single
@@ -5492,7 +5517,9 @@ ScriptDataImpl* PartialPreParse(Handle<String> source,
void ScriptDataImpl::Initialize() {
+ // Prepares state for use.
if (store_.length() >= kHeaderSize) {
+ function_index_ = kHeaderSize;
int symbol_data_offset = kHeaderSize + store_[kFunctionsSizeOffset];
if (store_.length() > symbol_data_offset) {
symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]);
@@ -5537,9 +5564,7 @@ ScriptDataImpl* PreParse(Handle<String> source,
v8::Extension* extension) {
Handle<Script> no_script;
bool allow_natives_syntax =
- always_allow_natives_syntax ||
- FLAG_allow_natives_syntax ||
- Bootstrapper::IsActive();
+ FLAG_allow_natives_syntax || Bootstrapper::IsActive();
CompletePreParser parser(no_script, allow_natives_syntax, extension);
if (!parser.PreParseProgram(source, stream)) return NULL;
// Extract the accumulated data from the recorder as a single
@@ -5571,15 +5596,15 @@ bool ParseRegExp(FlatStringReader* input,
}
+// MakeAST is just a wrapper for the corresponding Parser calls so we don't
+// have to expose the entire Parser class in the .h file.
FunctionLiteral* MakeAST(bool compile_in_global_context,
Handle<Script> script,
v8::Extension* extension,
ScriptDataImpl* pre_data,
bool is_json) {
bool allow_natives_syntax =
- always_allow_natives_syntax ||
- FLAG_allow_natives_syntax ||
- Bootstrapper::IsActive();
+ FLAG_allow_natives_syntax || Bootstrapper::IsActive();
AstBuildingParser parser(script, allow_natives_syntax, extension, pre_data);
if (pre_data != NULL && pre_data->has_error()) {
Scanner::Location loc = pre_data->MessageLocation();
@@ -5605,25 +5630,13 @@ FunctionLiteral* MakeAST(bool compile_in_global_context,
}
-FunctionLiteral* MakeLazyAST(Handle<Script> script,
- Handle<String> name,
- int start_position,
- int end_position,
- bool is_expression) {
- bool allow_natives_syntax_before = always_allow_natives_syntax;
- always_allow_natives_syntax = true;
- AstBuildingParser parser(script, true, NULL, NULL); // always allow
- always_allow_natives_syntax = allow_natives_syntax_before;
- // Parse the function by pointing to the function source in the script source.
- Handle<String> script_source(String::cast(script->source()));
- FunctionLiteral* result =
- parser.ParseLazy(script_source, name,
- start_position, end_position, is_expression);
+FunctionLiteral* MakeLazyAST(Handle<SharedFunctionInfo> info) {
+ Handle<Script> script(Script::cast(info->script()));
+ AstBuildingParser parser(script, true, NULL, NULL);
+ FunctionLiteral* result = parser.ParseLazy(info);
return result;
}
-
#undef NEW
-
} } // namespace v8::internal
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 8bab92d5a..9a8288926 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -72,19 +72,9 @@ class FunctionEntry BASE_EMBEDDED {
backing_[kPropertyCountOffset] = value;
}
- int predata_function_skip() { return backing_[kPredataFunctionSkipOffset]; }
- void set_predata_function_skip(int value) {
- backing_[kPredataFunctionSkipOffset] = value;
- }
-
- int predata_symbol_skip() { return backing_[kPredataSymbolSkipOffset]; }
- void set_predata_symbol_skip(int value) {
- backing_[kPredataSymbolSkipOffset] = value;
- }
-
bool is_valid() { return backing_.length() > 0; }
- static const int kSize = 6;
+ static const int kSize = 4;
private:
Vector<unsigned> backing_;
@@ -92,8 +82,6 @@ class FunctionEntry BASE_EMBEDDED {
static const int kEndPosOffset = 1;
static const int kLiteralCountOffset = 2;
static const int kPropertyCountOffset = 3;
- static const int kPredataFunctionSkipOffset = 4;
- static const int kPredataSymbolSkipOffset = 5;
};
@@ -101,10 +89,7 @@ class ScriptDataImpl : public ScriptData {
public:
explicit ScriptDataImpl(Vector<unsigned> store)
: store_(store),
- function_index_(kHeaderSize),
- owns_store_(true) {
- Initialize();
- }
+ owns_store_(true) { }
// Create an empty ScriptDataImpl that is guaranteed to not satisfy
// a SanityCheck.
@@ -120,7 +105,6 @@ class ScriptDataImpl : public ScriptData {
FunctionEntry GetFunctionEntry(int start);
int GetSymbolIdentifier();
- void SkipFunctionEntry(int start);
bool SanityCheck();
Scanner::Location MessageLocation();
@@ -136,28 +120,8 @@ class ScriptDataImpl : public ScriptData {
unsigned magic() { return store_[kMagicOffset]; }
unsigned version() { return store_[kVersionOffset]; }
- // Skip forward in the preparser data by the given number
- // of unsigned ints of function entries and the given number of bytes of
- // symbol id encoding.
- void Skip(int function_entries, int symbol_entries) {
- ASSERT(function_entries >= 0);
- ASSERT(function_entries
- <= (static_cast<int>(store_[kFunctionsSizeOffset])
- - (function_index_ - kHeaderSize)));
- ASSERT(symbol_entries >= 0);
- ASSERT(symbol_entries <= symbol_data_end_ - symbol_data_);
-
- unsigned max_function_skip = store_[kFunctionsSizeOffset] -
- static_cast<unsigned>(function_index_ - kHeaderSize);
- function_index_ +=
- Min(static_cast<unsigned>(function_entries), max_function_skip);
- symbol_data_ +=
- Min(static_cast<unsigned>(symbol_entries),
- static_cast<unsigned>(symbol_data_end_ - symbol_data_));
- }
-
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 3;
+ static const unsigned kCurrentVersion = 4;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
@@ -189,11 +153,10 @@ class ScriptDataImpl : public ScriptData {
ScriptDataImpl(const char* backing_store, int length)
: store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
- length / sizeof(unsigned)),
- function_index_(kHeaderSize),
+ length / static_cast<int>(sizeof(unsigned))),
owns_store_(false) {
- ASSERT_EQ(0, reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned));
- Initialize();
+ ASSERT_EQ(0, static_cast<int>(
+ reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned)));
}
// Read strings written by ParserRecorder::WriteString.
@@ -229,20 +192,8 @@ bool ParseRegExp(FlatStringReader* input,
RegExpCompileData* result);
-// Support for doing lazy compilation. The script is the script containing full
-// source of the script where the function is declared. The start_position and
-// end_position specifies the part of the script source which has the source
-// for the function declaration in the form:
-//
-// (<formal parameters>) { <function body> }
-//
-// without any function keyword or name.
-//
-FunctionLiteral* MakeLazyAST(Handle<Script> script,
- Handle<String> name,
- int start_position,
- int end_position,
- bool is_expression);
+// Support for doing lazy compilation.
+FunctionLiteral* MakeLazyAST(Handle<SharedFunctionInfo> info);
// Support for handling complex values (array and object literals) that
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index cef825da8..cdfa9e2d7 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -46,8 +46,7 @@ const char* StringsStorage::GetFunctionName(const char* name) {
CodeEntry::CodeEntry(int security_token_id)
- : call_uid_(0),
- tag_(Logger::FUNCTION_TAG),
+ : tag_(Logger::FUNCTION_TAG),
name_prefix_(kEmptyNamePrefix),
name_(""),
resource_name_(""),
@@ -62,8 +61,7 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
const char* resource_name,
int line_number,
int security_token_id)
- : call_uid_(next_call_uid_++),
- tag_(tag),
+ : tag_(tag),
name_prefix_(name_prefix),
name_(name),
resource_name_(resource_name),
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index f8fa23de4..525dea2fb 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -121,11 +121,9 @@ const char* StringsStorage::GetName(String* name) {
const char* CodeEntry::kEmptyNamePrefix = "";
-unsigned CodeEntry::next_call_uid_ = 1;
void CodeEntry::CopyData(const CodeEntry& source) {
- call_uid_ = source.call_uid_;
tag_ = source.tag_;
name_prefix_ = source.name_prefix_;
name_ = source.name_;
@@ -134,6 +132,29 @@ void CodeEntry::CopyData(const CodeEntry& source) {
}
+uint32_t CodeEntry::GetCallUid() const {
+ uint32_t hash = ComputeIntegerHash(tag_);
+ hash ^= ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
+ hash ^= ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
+ hash ^= ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
+ hash ^= ComputeIntegerHash(line_number_);
+ return hash;
+}
+
+
+bool CodeEntry::IsSameAs(CodeEntry* entry) const {
+ return this == entry
+ || (tag_ == entry->tag_
+ && name_prefix_ == entry->name_prefix_
+ && name_ == entry->name_
+ && resource_name_ == entry->resource_name_
+ && line_number_ == entry->line_number_);
+}
+
+
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), false);
@@ -424,9 +445,10 @@ void CodeMap::AddAlias(Address start, CodeEntry* entry, Address code_start) {
CodeTree::Locator locator;
if (tree_.Find(code_start, &locator)) {
const CodeEntryInfo& code_info = locator.value();
- entry->CopyData(*code_info.entry);
- tree_.Insert(start, &locator);
- locator.set_value(CodeEntryInfo(entry, code_info.size));
+ if (tree_.Insert(start, &locator)) {
+ entry->CopyData(*code_info.entry);
+ locator.set_value(CodeEntryInfo(entry, code_info.size));
+ }
}
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index c2bc4ce28..1e949a2cf 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -100,17 +100,17 @@ class CodeEntry {
INLINE(const char* name() const) { return name_; }
INLINE(const char* resource_name() const) { return resource_name_; }
INLINE(int line_number() const) { return line_number_; }
- INLINE(unsigned call_uid() const) { return call_uid_; }
INLINE(int security_token_id() const) { return security_token_id_; }
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
void CopyData(const CodeEntry& source);
+ uint32_t GetCallUid() const;
+ bool IsSameAs(CodeEntry* entry) const;
static const char* kEmptyNamePrefix;
private:
- unsigned call_uid_;
Logger::LogEventsAndTags tag_;
const char* name_prefix_;
const char* name_;
@@ -118,8 +118,6 @@ class CodeEntry {
int line_number_;
int security_token_id_;
- static unsigned next_call_uid_;
-
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@@ -147,11 +145,12 @@ class ProfileNode {
private:
INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
- return entry1 == entry2;
+ return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
+ reinterpret_cast<CodeEntry*>(entry2));
}
INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(entry));
+ return entry->GetCallUid();
}
ProfileTree* tree_;
@@ -746,7 +745,8 @@ class HeapObjectsMap {
}
static uint32_t AddressHash(Address addr) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(addr));
+ return ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
}
bool initial_fill_mode_;
@@ -889,7 +889,8 @@ class HeapEntriesMap {
};
uint32_t Hash(HeapObject* object) {
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
+ return ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object)));
}
static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; }
@@ -996,7 +997,8 @@ class HeapSnapshotJSONSerializer {
}
INLINE(static uint32_t ObjectHash(const void* key)) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(key));
+ return ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)));
}
void EnumerateNodes();
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index c08602eb1..41c674b2a 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -47,8 +47,15 @@ RegExpMacroAssemblerTracer::~RegExpMacroAssemblerTracer() {
}
+// This is used for printing out debugging information. It makes an integer
+// that is closely related to the address of an object.
+static int LabelToInt(Label* label) {
+ return static_cast<int>(reinterpret_cast<intptr_t>(label));
+}
+
+
void RegExpMacroAssemblerTracer::Bind(Label* label) {
- PrintF("label[%08x]: (Bind)\n", label, label);
+ PrintF("label[%08x]: (Bind)\n", LabelToInt(label));
assembler_->Bind(label);
}
@@ -60,7 +67,7 @@ void RegExpMacroAssemblerTracer::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerTracer::CheckGreedyLoop(Label* label) {
- PrintF(" CheckGreedyLoop(label[%08x]);\n\n", label);
+ PrintF(" CheckGreedyLoop(label[%08x]);\n\n", LabelToInt(label));
assembler_->CheckGreedyLoop(label);
}
@@ -84,14 +91,13 @@ void RegExpMacroAssemblerTracer::Backtrack() {
void RegExpMacroAssemblerTracer::GoTo(Label* label) {
- PrintF(" GoTo(label[%08x]);\n\n", label);
+ PrintF(" GoTo(label[%08x]);\n\n", LabelToInt(label));
assembler_->GoTo(label);
}
void RegExpMacroAssemblerTracer::PushBacktrack(Label* label) {
- PrintF(" PushBacktrack(label[%08x]);\n",
- label);
+ PrintF(" PushBacktrack(label[%08x]);\n", LabelToInt(label));
assembler_->PushBacktrack(label);
}
@@ -176,7 +182,7 @@ void RegExpMacroAssemblerTracer::LoadCurrentCharacter(int cp_offset,
const char* check_msg = check_bounds ? "" : " (unchecked)";
PrintF(" LoadCurrentCharacter(cp_offset=%d, label[%08x]%s (%d chars));\n",
cp_offset,
- on_end_of_input,
+ LabelToInt(on_end_of_input),
check_msg,
characters);
assembler_->LoadCurrentCharacter(cp_offset,
@@ -187,39 +193,43 @@ void RegExpMacroAssemblerTracer::LoadCurrentCharacter(int cp_offset,
void RegExpMacroAssemblerTracer::CheckCharacterLT(uc16 limit, Label* on_less) {
- PrintF(" CheckCharacterLT(c='u%04x', label[%08x]);\n", limit, on_less);
+ PrintF(" CheckCharacterLT(c='u%04x', label[%08x]);\n",
+ limit, LabelToInt(on_less));
assembler_->CheckCharacterLT(limit, on_less);
}
void RegExpMacroAssemblerTracer::CheckCharacterGT(uc16 limit,
Label* on_greater) {
- PrintF(" CheckCharacterGT(c='u%04x', label[%08x]);\n", limit, on_greater);
+ PrintF(" CheckCharacterGT(c='u%04x', label[%08x]);\n",
+ limit, LabelToInt(on_greater));
assembler_->CheckCharacterGT(limit, on_greater);
}
void RegExpMacroAssemblerTracer::CheckCharacter(uint32_t c, Label* on_equal) {
- PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n", c, on_equal);
+ PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n",
+ c, LabelToInt(on_equal));
assembler_->CheckCharacter(c, on_equal);
}
void RegExpMacroAssemblerTracer::CheckAtStart(Label* on_at_start) {
- PrintF(" CheckAtStart(label[%08x]);\n", on_at_start);
+ PrintF(" CheckAtStart(label[%08x]);\n", LabelToInt(on_at_start));
assembler_->CheckAtStart(on_at_start);
}
void RegExpMacroAssemblerTracer::CheckNotAtStart(Label* on_not_at_start) {
- PrintF(" CheckNotAtStart(label[%08x]);\n", on_not_at_start);
+ PrintF(" CheckNotAtStart(label[%08x]);\n", LabelToInt(on_not_at_start));
assembler_->CheckNotAtStart(on_not_at_start);
}
void RegExpMacroAssemblerTracer::CheckNotCharacter(uint32_t c,
Label* on_not_equal) {
- PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n", c, on_not_equal);
+ PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n",
+ c, LabelToInt(on_not_equal));
assembler_->CheckNotCharacter(c, on_not_equal);
}
@@ -231,7 +241,7 @@ void RegExpMacroAssemblerTracer::CheckCharacterAfterAnd(
PrintF(" CheckCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
c,
mask,
- on_equal);
+ LabelToInt(on_equal));
assembler_->CheckCharacterAfterAnd(c, mask, on_equal);
}
@@ -243,7 +253,7 @@ void RegExpMacroAssemblerTracer::CheckNotCharacterAfterAnd(
PrintF(" CheckNotCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
c,
mask,
- on_not_equal);
+ LabelToInt(on_not_equal));
assembler_->CheckNotCharacterAfterAnd(c, mask, on_not_equal);
}
@@ -258,7 +268,7 @@ void RegExpMacroAssemblerTracer::CheckNotCharacterAfterMinusAnd(
c,
minus,
mask,
- on_not_equal);
+ LabelToInt(on_not_equal));
assembler_->CheckNotCharacterAfterMinusAnd(c, minus, mask, on_not_equal);
}
@@ -266,7 +276,7 @@ void RegExpMacroAssemblerTracer::CheckNotCharacterAfterMinusAnd(
void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
Label* on_no_match) {
PrintF(" CheckNotBackReference(register=%d, label[%08x]);\n", start_reg,
- on_no_match);
+ LabelToInt(on_no_match));
assembler_->CheckNotBackReference(start_reg, on_no_match);
}
@@ -275,7 +285,7 @@ void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
int start_reg,
Label* on_no_match) {
PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, label[%08x]);\n",
- start_reg, on_no_match);
+ start_reg, LabelToInt(on_no_match));
assembler_->CheckNotBackReferenceIgnoreCase(start_reg, on_no_match);
}
@@ -286,7 +296,7 @@ void RegExpMacroAssemblerTracer::CheckNotRegistersEqual(int reg1,
PrintF(" CheckNotRegistersEqual(reg1=%d, reg2=%d, label[%08x]);\n",
reg1,
reg2,
- on_not_equal);
+ LabelToInt(on_not_equal));
assembler_->CheckNotRegistersEqual(reg1, reg2, on_not_equal);
}
@@ -300,7 +310,8 @@ void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
for (int i = 0; i < str.length(); i++) {
PrintF("u%04x", str[i]);
}
- PrintF("\", cp_offset=%d, label[%08x])\n", cp_offset, on_failure);
+ PrintF("\", cp_offset=%d, label[%08x])\n",
+ cp_offset, LabelToInt(on_failure));
assembler_->CheckCharacters(str, cp_offset, on_failure, check_end_of_string);
}
@@ -312,7 +323,7 @@ bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
on_no_match);
PrintF(" CheckSpecialCharacterClass(type='%c', label[%08x]): %s;\n",
type,
- on_no_match,
+ LabelToInt(on_no_match),
supported ? "true" : "false");
return supported;
}
@@ -321,7 +332,7 @@ bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
void RegExpMacroAssemblerTracer::IfRegisterLT(int register_index,
int comparand, Label* if_lt) {
PrintF(" IfRegisterLT(register=%d, number=%d, label[%08x]);\n",
- register_index, comparand, if_lt);
+ register_index, comparand, LabelToInt(if_lt));
assembler_->IfRegisterLT(register_index, comparand, if_lt);
}
@@ -329,7 +340,7 @@ void RegExpMacroAssemblerTracer::IfRegisterLT(int register_index,
void RegExpMacroAssemblerTracer::IfRegisterEqPos(int register_index,
Label* if_eq) {
PrintF(" IfRegisterEqPos(register=%d, label[%08x]);\n",
- register_index, if_eq);
+ register_index, LabelToInt(if_eq));
assembler_->IfRegisterEqPos(register_index, if_eq);
}
@@ -337,7 +348,7 @@ void RegExpMacroAssemblerTracer::IfRegisterEqPos(int register_index,
void RegExpMacroAssemblerTracer::IfRegisterGE(int register_index,
int comparand, Label* if_ge) {
PrintF(" IfRegisterGE(register=%d, number=%d, label[%08x]);\n",
- register_index, comparand, if_ge);
+ register_index, comparand, LabelToInt(if_ge));
assembler_->IfRegisterGE(register_index, comparand, if_ge);
}
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index 566a96c34..faa525d62 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -186,6 +186,10 @@ function RegExpExec(string) {
%_IsRegExpEquivalent(cache.regExp, this) &&
%_ObjectEquals(cache.subject, string)) {
if (cache.answerSaved) {
+ // If this regexp is not global, cache.lastIndex is zero, so we only get
+ // here if this.lastIndex is zero, and resulting this.lastIndex
+ // must be zero too, so no change is necessary.
+ if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
return %_RegExpCloneResult(cache.answer);
} else {
saveAnswer = true;
@@ -282,6 +286,10 @@ function RegExpTest(string) {
%_IsRegExpEquivalent(cache.regExp, this) &&
%_ObjectEquals(cache.subject, string) &&
%_ObjectEquals(cache.lastIndex, lastIndex)) {
+ // If this regexp is not global, cache.lastIndex is zero, so we only get
+ // here if this.lastIndex is zero, and resulting this.lastIndex
+ // must be zero too, so no change is necessary.
+ if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
return cache.answer;
}
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 4ddf1bf6f..f253ec533 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -525,8 +525,8 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
Variable* rvar = rvar_proxy->AsVariable();
if (lvar != NULL && rvar != NULL) {
if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
- Slot* lslot = lvar->slot();
- Slot* rslot = rvar->slot();
+ Slot* lslot = lvar->AsSlot();
+ Slot* rslot = rvar->AsSlot();
if (lslot->type() == rslot->type() &&
(lslot->type() == Slot::PARAMETER ||
lslot->type() == Slot::LOCAL) &&
@@ -692,7 +692,7 @@ class Processor: public AstVisitor {
}
void Process(ZoneList<Statement*>* statements);
- bool result_assigned() const { return result_assigned_; }
+ bool result_assigned() const { return result_assigned_; }
private:
VariableProxy* result_;
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 627ea1217..c80f1fc34 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -638,56 +638,78 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
Handle<FixedArray> elms = Factory::NewFixedArray(DESCRIPTOR_SIZE);
Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
LookupResult result;
- CONVERT_CHECKED(JSObject, obj, args[0]);
- CONVERT_CHECKED(String, name, args[1]);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_CHECKED(String, name, 1);
// This could be an element.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- if (!obj->HasLocalElement(index)) {
- return Heap::undefined_value();
- }
+ switch (obj->HasLocalElement(index)) {
+ case JSObject::UNDEFINED_ELEMENT:
+ return Heap::undefined_value();
- // Special handling of string objects according to ECMAScript 5 15.5.5.2.
- // Note that this might be a string object with elements other than the
- // actual string value. This is covered by the subsequent cases.
- if (obj->IsStringObjectWithCharacterAt(index)) {
- JSValue* js_value = JSValue::cast(obj);
- String* str = String::cast(js_value->value());
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- elms->set(VALUE_INDEX, str->SubString(index, index+1));
- elms->set(WRITABLE_INDEX, Heap::false_value());
- elms->set(ENUMERABLE_INDEX, Heap::false_value());
- elms->set(CONFIGURABLE_INDEX, Heap::false_value());
- return *desc;
- }
-
- // This can potentially be an element in the elements dictionary or
- // a fast element.
- if (obj->HasDictionaryElements()) {
- NumberDictionary* dictionary = obj->element_dictionary();
- int entry = dictionary->FindEntry(index);
- PropertyDetails details = dictionary->DetailsAt(entry);
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
- elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
- elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
- return *desc;
- } else {
- // Elements that are stored as array elements always has:
- // writable: true, configurable: true, enumerable: true.
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- elms->set(VALUE_INDEX, obj->GetElement(index));
- elms->set(WRITABLE_INDEX, Heap::true_value());
- elms->set(ENUMERABLE_INDEX, Heap::true_value());
- elms->set(CONFIGURABLE_INDEX, Heap::true_value());
- return *desc;
+ case JSObject::STRING_CHARACTER_ELEMENT: {
+ // Special handling of string objects according to ECMAScript 5
+ // 15.5.5.2. Note that this might be a string object with elements
+ // other than the actual string value. This is covered by the
+ // subsequent cases.
+ Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
+ Handle<String> str(String::cast(js_value->value()));
+ Handle<String> substr = SubString(str, index, index+1, NOT_TENURED);
+
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, *substr);
+ elms->set(WRITABLE_INDEX, Heap::false_value());
+ elms->set(ENUMERABLE_INDEX, Heap::false_value());
+ elms->set(CONFIGURABLE_INDEX, Heap::false_value());
+ return *desc;
+ }
+
+ case JSObject::INTERCEPTED_ELEMENT:
+ case JSObject::FAST_ELEMENT: {
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ Handle<Object> element = GetElement(Handle<Object>(obj), index);
+ elms->set(VALUE_INDEX, *element);
+ elms->set(WRITABLE_INDEX, Heap::true_value());
+ elms->set(ENUMERABLE_INDEX, Heap::true_value());
+ elms->set(CONFIGURABLE_INDEX, Heap::true_value());
+ return *desc;
+ }
+
+ case JSObject::DICTIONARY_ELEMENT: {
+ NumberDictionary* dictionary = obj->element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ ASSERT(entry != NumberDictionary::kNotFound);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ switch (details.type()) {
+ case CALLBACKS: {
+ // This is an accessor property with getter and/or setter.
+ FixedArray* callbacks =
+ FixedArray::cast(dictionary->ValueAt(entry));
+ elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
+ elms->set(GETTER_INDEX, callbacks->get(0));
+ elms->set(SETTER_INDEX, callbacks->get(1));
+ break;
+ }
+ case NORMAL:
+ // This is a data property.
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
+ elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
+ return *desc;
+ }
}
}
// Use recursive implementation to also traverse hidden prototypes
- GetOwnPropertyImplementation(obj, name, &result);
+ GetOwnPropertyImplementation(*obj, *name, &result);
if (!result.IsProperty()) {
return Heap::undefined_value();
@@ -698,7 +720,8 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
// Property that is internally implemented as a callback or
// an API defined callback.
Object* value = obj->GetPropertyWithCallback(
- obj, structure, name, result.holder());
+ *obj, structure, *name, result.holder());
+ if (value->IsFailure()) return value;
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, value);
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
@@ -946,7 +969,7 @@ static Object* Runtime_DeclareContextSlot(Arguments args) {
Handle<String> name(String::cast(args[1]));
PropertyAttributes mode =
static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
- ASSERT(mode == READ_ONLY || mode == NONE);
+ RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
Handle<Object> initial_value(args[3]);
// Declarations are always done in the function context.
@@ -2601,15 +2624,15 @@ int Runtime::StringMatch(Handle<String> sub,
if (seq_pat->IsAsciiRepresentation()) {
Vector<const char> pat_vector = seq_pat->ToAsciiVector();
if (seq_sub->IsAsciiRepresentation()) {
- return StringSearch(seq_sub->ToAsciiVector(), pat_vector, start_index);
+ return SearchString(seq_sub->ToAsciiVector(), pat_vector, start_index);
}
- return StringSearch(seq_sub->ToUC16Vector(), pat_vector, start_index);
+ return SearchString(seq_sub->ToUC16Vector(), pat_vector, start_index);
}
Vector<const uc16> pat_vector = seq_pat->ToUC16Vector();
if (seq_sub->IsAsciiRepresentation()) {
- return StringSearch(seq_sub->ToAsciiVector(), pat_vector, start_index);
+ return SearchString(seq_sub->ToAsciiVector(), pat_vector, start_index);
}
- return StringSearch(seq_sub->ToUC16Vector(), pat_vector, start_index);
+ return SearchString(seq_sub->ToUC16Vector(), pat_vector, start_index);
}
@@ -2837,7 +2860,8 @@ static Object* Runtime_StringMatch(Arguments args) {
for (int i = 0; i < matches ; i++) {
int from = offsets.at(i * 2);
int to = offsets.at(i * 2 + 1);
- elements->set(i, *Factory::NewSubString(subject, from, to));
+ Handle<String> match = Factory::NewSubString(subject, from, to);
+ elements->set(i, *match);
}
Handle<JSArray> result = Factory::NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(matches));
@@ -2865,67 +2889,39 @@ static void SetLastMatchInfoNoCaptures(Handle<String> subject,
}
-template <typename schar, typename pchar>
-static bool SearchStringMultiple(Vector<schar> subject,
- String* pattern,
- Vector<pchar> pattern_string,
+template <typename SubjectChar, typename PatternChar>
+static bool SearchStringMultiple(Vector<const SubjectChar> subject,
+ Vector<const PatternChar> pattern,
+ String* pattern_string,
FixedArrayBuilder* builder,
int* match_pos) {
int pos = *match_pos;
int subject_length = subject.length();
- int pattern_length = pattern_string.length();
+ int pattern_length = pattern.length();
int max_search_start = subject_length - pattern_length;
- bool is_ascii = (sizeof(schar) == 1);
- StringSearchStrategy strategy =
- InitializeStringSearch(pattern_string, is_ascii);
- switch (strategy) {
- case SEARCH_FAIL: break;
- case SEARCH_SHORT:
- while (pos <= max_search_start) {
- if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
- *match_pos = pos;
- return false;
- }
- // Position of end of previous match.
- int match_end = pos + pattern_length;
- int new_pos = SimpleIndexOf(subject, pattern_string, match_end);
- if (new_pos >= 0) {
- // A match.
- if (new_pos > match_end) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- new_pos);
- }
- pos = new_pos;
- builder->Add(pattern);
- } else {
- break;
- }
- }
- break;
- case SEARCH_LONG:
- while (pos <= max_search_start) {
- if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
- *match_pos = pos;
- return false;
- }
- int match_end = pos + pattern_length;
- int new_pos = ComplexIndexOf(subject, pattern_string, match_end);
- if (new_pos >= 0) {
- // A match has been found.
- if (new_pos > match_end) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- new_pos);
- }
- pos = new_pos;
- builder->Add(pattern);
- } else {
- break;
- }
+ StringSearch<PatternChar, SubjectChar> search(pattern);
+ while (pos <= max_search_start) {
+ if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
+ *match_pos = pos;
+ return false;
+ }
+ // Position of end of previous match.
+ int match_end = pos + pattern_length;
+ int new_pos = search.Search(subject, match_end);
+ if (new_pos >= 0) {
+ // A match.
+ if (new_pos > match_end) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ match_end,
+ new_pos);
}
+ pos = new_pos;
+ builder->Add(pattern_string);
+ } else {
break;
+ }
}
+
if (pos < max_search_start) {
ReplacementStringBuilder::AddSubjectSlice(builder,
pos + pattern_length,
@@ -2953,14 +2949,14 @@ static bool SearchStringMultiple(Handle<String> subject,
Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) {
if (SearchStringMultiple(subject_vector,
- *pattern,
pattern->ToAsciiVector(),
+ *pattern,
builder,
&match_pos)) break;
} else {
if (SearchStringMultiple(subject_vector,
- *pattern,
pattern->ToUC16Vector(),
+ *pattern,
builder,
&match_pos)) break;
}
@@ -2968,14 +2964,14 @@ static bool SearchStringMultiple(Handle<String> subject,
Vector<const uc16> subject_vector = subject->ToUC16Vector();
if (pattern->IsAsciiRepresentation()) {
if (SearchStringMultiple(subject_vector,
- *pattern,
pattern->ToAsciiVector(),
+ *pattern,
builder,
&match_pos)) break;
} else {
if (SearchStringMultiple(subject_vector,
- *pattern,
pattern->ToUC16Vector(),
+ *pattern,
builder,
&match_pos)) break;
}
@@ -3105,9 +3101,10 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
// Arguments array to replace function is match, captures, index and
// subject, i.e., 3 + capture count in total.
Handle<FixedArray> elements = Factory::NewFixedArray(3 + capture_count);
- elements->set(0, *Factory::NewSubString(subject,
- match_start,
- match_end));
+ Handle<String> match = Factory::NewSubString(subject,
+ match_start,
+ match_end);
+ elements->set(0, *match);
for (int i = 1; i <= capture_count; i++) {
int start = register_vector[i * 2];
if (start >= 0) {
@@ -4756,51 +4753,23 @@ static Object* Runtime_StringTrim(Arguments args) {
}
-// Define storage for buffers declared in header file.
-// TODO(lrn): Remove these when rewriting search code.
-int BMBuffers::bad_char_occurrence[kBMAlphabetSize];
-BMGoodSuffixBuffers BMBuffers::bmgs_buffers;
-
-
-template <typename schar, typename pchar>
-void FindStringIndices(Vector<const schar> subject,
- Vector<const pchar> pattern,
+template <typename SubjectChar, typename PatternChar>
+void FindStringIndices(Vector<const SubjectChar> subject,
+ Vector<const PatternChar> pattern,
ZoneList<int>* indices,
unsigned int limit) {
ASSERT(limit > 0);
// Collect indices of pattern in subject, and the end-of-string index.
// Stop after finding at most limit values.
- StringSearchStrategy strategy =
- InitializeStringSearch(pattern, sizeof(schar) == 1);
- switch (strategy) {
- case SEARCH_FAIL: return;
- case SEARCH_SHORT: {
- int pattern_length = pattern.length();
- int index = 0;
- while (limit > 0) {
- index = SimpleIndexOf(subject, pattern, index);
- if (index < 0) return;
- indices->Add(index);
- index += pattern_length;
- limit--;
- }
- return;
- }
- case SEARCH_LONG: {
- int pattern_length = pattern.length();
- int index = 0;
- while (limit > 0) {
- index = ComplexIndexOf(subject, pattern, index);
- if (index < 0) return;
- indices->Add(index);
- index += pattern_length;
- limit--;
- }
- return;
- }
- default:
- UNREACHABLE();
- return;
+ StringSearch<PatternChar, SubjectChar> search(pattern);
+ int pattern_length = pattern.length();
+ int index = 0;
+ while (limit > 0) {
+ index = search.Search(subject, index);
+ if (index < 0) return;
+ indices->Add(index);
+ index += pattern_length;
+ limit--;
}
}
@@ -4953,12 +4922,14 @@ static Object* Runtime_StringToArray(Arguments args) {
length);
for (int i = num_copied_from_cache; i < length; ++i) {
- elements->set(i, *LookupSingleCharacterStringFromCode(chars[i]));
+ Handle<Object> str = LookupSingleCharacterStringFromCode(chars[i]);
+ elements->set(i, *str);
}
} else {
elements = Factory::NewFixedArray(length);
for (int i = 0; i < length; ++i) {
- elements->set(i, *LookupSingleCharacterStringFromCode(s->Get(i)));
+ Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i));
+ elements->set(i, *str);
}
}
@@ -6279,7 +6250,7 @@ static Object* Runtime_NewObjectFromBound(Arguments args) {
}
-static Code* ComputeConstructStub(Handle<JSFunction> function) {
+static void TrySettingInlineConstructStub(Handle<JSFunction> function) {
Handle<Object> prototype = Factory::null_value();
if (function->has_instance_prototype()) {
prototype = Handle<Object>(function->instance_prototype());
@@ -6287,13 +6258,10 @@ static Code* ComputeConstructStub(Handle<JSFunction> function) {
if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
ConstructStubCompiler compiler;
Object* code = compiler.CompileConstructStub(function->shared());
- if (code->IsFailure()) {
- return Builtins::builtin(Builtins::JSConstructStubGeneric);
+ if (!code->IsFailure()) {
+ function->shared()->set_construct_stub(Code::cast(code));
}
- return Code::cast(code);
}
-
- return function->shared()->construct_stub();
}
@@ -6350,12 +6318,20 @@ static Object* Runtime_NewObject(Arguments args) {
Handle<SharedFunctionInfo> shared(function->shared());
EnsureCompiled(shared, CLEAR_EXCEPTION);
- bool first_allocation = !function->has_initial_map();
+ if (!function->has_initial_map() &&
+ shared->IsInobjectSlackTrackingInProgress()) {
+ // The tracking is already in progress for another function. We can only
+ // track one initial_map at a time, so we force the completion before the
+ // function is called as a constructor for the first time.
+ shared->CompleteInobjectSlackTracking();
+ TrySettingInlineConstructStub(function);
+ }
+
+ bool first_allocation = !shared->live_objects_may_exist();
Handle<JSObject> result = Factory::NewJSObject(function);
- if (first_allocation) {
- Handle<Code> stub = Handle<Code>(
- ComputeConstructStub(Handle<JSFunction>(function)));
- shared->set_construct_stub(*stub);
+ // Delay setting the stub if inobject slack tracking is in progress.
+ if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
+ TrySettingInlineConstructStub(function);
}
Counters::constructed_objects.Increment();
@@ -6365,6 +6341,18 @@ static Object* Runtime_NewObject(Arguments args) {
}
+static Object* Runtime_FinalizeInstanceSize(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ function->shared()->CompleteInobjectSlackTracking();
+ TrySettingInlineConstructStub(function);
+
+ return Heap::undefined_value();
+}
+
+
static Object* Runtime_LazyCompile(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
@@ -6386,7 +6374,7 @@ static Object* Runtime_LazyCompile(Arguments args) {
// this means that things called through constructors are never known to
// be in loops. We compile them as if they are in loops here just in case.
ASSERT(!function->is_compiled());
- if (!CompileLazyInLoop(function, Handle<Object>::null(), KEEP_EXCEPTION)) {
+ if (!CompileLazyInLoop(function, KEEP_EXCEPTION)) {
return Failure::Exception();
}
@@ -6757,7 +6745,7 @@ static void PrintObject(Object* obj) {
} else if (obj->IsFalse()) {
PrintF("<false>");
} else {
- PrintF("%p", obj);
+ PrintF("%p", reinterpret_cast<void*>(obj));
}
}
@@ -7209,15 +7197,15 @@ static uint32_t IterateExternalArrayElements(Handle<JSObject> receiver,
Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
visitor->visit(j, e);
} else {
- Handle<Object> e(
- Heap::AllocateHeapNumber(static_cast<ElementType>(val)));
+ Handle<Object> e =
+ Factory::NewNumber(static_cast<ElementType>(val));
visitor->visit(j, e);
}
}
}
} else {
for (uint32_t j = 0; j < len; j++) {
- Handle<Object> e(Heap::AllocateHeapNumber(array->get(j)));
+ Handle<Object> e = Factory::NewNumber(array->get(j));
visitor->visit(j, e);
}
}
@@ -7498,14 +7486,18 @@ static Object* Runtime_ArrayConcat(Arguments args) {
// The backing storage array must have non-existing elements to
// preserve holes across concat operations.
storage = Factory::NewFixedArrayWithHoles(result_length);
- result->set_map(*Factory::GetFastElementsMap(Handle<Map>(result->map())));
+ Handle<Map> fast_map =
+ Factory::GetFastElementsMap(Handle<Map>(result->map()));
+ result->set_map(*fast_map);
} else {
// TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast(
Factory::NewNumberDictionary(at_least_space_for));
- result->set_map(*Factory::GetSlowElementsMap(Handle<Map>(result->map())));
+ Handle<Map> slow_map =
+ Factory::GetSlowElementsMap(Handle<Map>(result->map()));
+ result->set_map(*slow_map);
}
Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
@@ -7826,7 +7818,8 @@ static Object* Runtime_DebugGetPropertyDetails(Arguments args) {
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<FixedArray> details = Factory::NewFixedArray(2);
- details->set(0, Runtime::GetElementOrCharAt(obj, index));
+ Object* element_or_char = Runtime::GetElementOrCharAt(obj, index);
+ details->set(0, element_or_char);
details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
return *Factory::NewJSArrayWithElements(details);
}
@@ -8628,7 +8621,8 @@ static Object* Runtime_GetScopeDetails(Arguments args) {
// Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
- details->set(kScopeDetailsObjectIndex, *it.ScopeObject());
+ Handle<JSObject> scope_object = it.ScopeObject();
+ details->set(kScopeDetailsObjectIndex, *scope_object);
return *Factory::NewJSArrayWithElements(details);
}
@@ -8673,10 +8667,10 @@ static Object* Runtime_GetCFrames(Arguments args) {
Handle<FixedArray> frames_array = Factory::NewFixedArray(frames_count);
for (int i = 0; i < frames_count; i++) {
Handle<JSObject> frame_value = Factory::NewJSObject(Top::object_function());
- frame_value->SetProperty(
- *address_str,
- *Factory::NewNumberFromInt(reinterpret_cast<int>(frames[i].address)),
- NONE);
+ Handle<Object> frame_address =
+ Factory::NewNumberFromInt(reinterpret_cast<int>(frames[i].address));
+
+ frame_value->SetProperty(*address_str, *frame_address, NONE);
// Get the stack walk text for this frame.
Handle<String> frame_text;
@@ -8944,24 +8938,39 @@ static Object* Runtime_ClearBreakPoint(Arguments args) {
}
-// Change the state of break on exceptions
-// args[0]: boolean indicating uncaught exceptions
-// args[1]: boolean indicating on/off
+// Change the state of break on exceptions.
+// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
+// args[1]: Boolean indicating on/off.
static Object* Runtime_ChangeBreakOnException(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
- ASSERT(args[0]->IsNumber());
- ASSERT(args[1]->IsBoolean());
+ RUNTIME_ASSERT(args[0]->IsNumber());
+ CONVERT_BOOLEAN_CHECKED(enable, args[1]);
- // Update break point state
+ // If the number doesn't match an enum value, the ChangeBreakOnException
+ // function will default to affecting caught exceptions.
ExceptionBreakType type =
static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
- bool enable = args[1]->ToBoolean()->IsTrue();
+ // Update break point state.
Debug::ChangeBreakOnException(type, enable);
return Heap::undefined_value();
}
+// Returns the state of break on exceptions
+// args[0]: boolean indicating uncaught exceptions
+static Object* Runtime_IsBreakOnException(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ RUNTIME_ASSERT(args[0]->IsNumber());
+
+ ExceptionBreakType type =
+ static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
+ bool result = Debug::IsBreakOnException(type);
+ return Smi::FromInt(result);
+}
+
+
// Prepare for stepping
// args[0]: break id for checking execution state
// args[1]: step action from the enumeration StepAction
@@ -9023,10 +9032,10 @@ static Handle<Context> CopyWithContextChain(Handle<Context> context_chain,
// Recursively copy the with contexts.
Handle<Context> previous(context_chain->previous());
Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
- return Factory::NewWithContext(
- CopyWithContextChain(function_context, previous),
- extension,
- context_chain->IsCatchContext());
+ Handle<Context> context = CopyWithContextChain(function_context, previous);
+ return Factory::NewWithContext(context,
+ extension,
+ context_chain->IsCatchContext());
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 8a3671a54..19f41441c 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -263,6 +263,7 @@ namespace internal {
F(NewClosure, 2, 1) \
F(NewObject, 1, 1) \
F(NewObjectFromBound, 2, 1) \
+ F(FinalizeInstanceSize, 1, 1) \
F(Throw, 1, 1) \
F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \
@@ -332,6 +333,7 @@ namespace internal {
F(SetScriptBreakPoint, 3, 1) \
F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
+ F(IsBreakOnException, 1, 1) \
F(PrepareStep, 3, 1) \
F(ClearStepping, 0, 1) \
F(DebugEvaluate, 4, 1) \
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 15b1d4420..79d63f177 100755
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -342,8 +342,11 @@ void Scanner::LiteralScope::Complete() {
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(ParserMode pre)
- : is_pre_parsing_(pre == PREPARSE), stack_overflow_(false) { }
+Scanner::Scanner()
+ : has_line_terminator_before_next_(false),
+ is_parsing_json_(false),
+ source_(NULL),
+ stack_overflow_(false) {}
void Scanner::Initialize(Handle<String> source,
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 8d6184697..6e5333bce 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -281,8 +281,7 @@ class Scanner {
bool complete_;
};
- // Construction
- explicit Scanner(ParserMode parse_mode);
+ Scanner();
// Initialize the Scanner to scan source.
void Initialize(Handle<String> source,
@@ -298,7 +297,7 @@ class Scanner {
Token::Value Next();
// One token look-ahead (past the token returned by Next()).
- Token::Value peek() const { return next_.token; }
+ Token::Value peek() const { return next_.token; }
// Returns true if there was a line terminator before the peek'ed token.
bool has_line_terminator_before_next() const {
@@ -314,8 +313,8 @@ class Scanner {
// Returns the location information for the current token
// (the token returned by Next()).
- Location location() const { return current_.location; }
- Location peek_location() const { return next_.location; }
+ Location location() const { return current_.location; }
+ Location peek_location() const { return next_.location; }
// Returns the literal string, if any, for the current token (the
// token returned by Next()). The string is 0-terminated and in
@@ -488,7 +487,6 @@ class Scanner {
TokenDesc current_; // desc for current token (as returned by Next())
TokenDesc next_; // desc for next token (one token look-ahead)
bool has_line_terminator_before_next_;
- bool is_pre_parsing_;
bool is_parsing_json_;
// Different UTF16 buffers used to pull characters from. Based on input one of
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 7e7f15259..e054d7de5 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -37,8 +37,8 @@ namespace internal {
static int CompareLocal(Variable* const* v, Variable* const* w) {
- Slot* s = (*v)->slot();
- Slot* t = (*w)->slot();
+ Slot* s = (*v)->AsSlot();
+ Slot* t = (*w)->AsSlot();
// We may have rewritten parameters (that are in the arguments object)
// and which may have a NULL slot... - find a better solution...
int x = (s != NULL ? s->index() : 0);
@@ -83,7 +83,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
for (int i = 0; i < locals.length(); i++) {
Variable* var = locals[i];
if (var->is_used()) {
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
if (slot != NULL) {
switch (slot->type()) {
case Slot::PARAMETER:
@@ -112,9 +112,9 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
if (scope->num_heap_slots() > 0) {
// Add user-defined slots.
for (int i = 0; i < heap_locals.length(); i++) {
- ASSERT(heap_locals[i]->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+ ASSERT(heap_locals[i]->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
context_slots_.length());
- ASSERT(heap_locals[i]->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+ ASSERT(heap_locals[i]->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
context_modes_.length());
context_slots_.Add(heap_locals[i]->name());
context_modes_.Add(heap_locals[i]->mode());
@@ -131,15 +131,15 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
Variable* var = scope->function();
if (var != NULL &&
var->is_used() &&
- var->slot()->type() == Slot::CONTEXT) {
+ var->AsSlot()->type() == Slot::CONTEXT) {
function_name_ = var->name();
// Note that we must not find the function name in the context slot
// list - instead it must be handled separately in the
// Contexts::Lookup() function. Thus record an empty symbol here so we
// get the correct number of context slots.
- ASSERT(var->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+ ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
context_slots_.length());
- ASSERT(var->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+ ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
context_modes_.length());
context_slots_.Add(Factory::empty_symbol());
context_modes_.Add(Variable::INTERNAL);
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index 0fdab56db..b210ae76a 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -63,13 +63,13 @@ class ScopeInfo BASE_EMBEDDED {
// --------------------------------------------------------------------------
// Lookup
- Handle<String> function_name() const { return function_name_; }
+ Handle<String> function_name() const { return function_name_; }
- Handle<String> parameter_name(int i) const { return parameters_[i]; }
- int number_of_parameters() const { return parameters_.length(); }
+ Handle<String> parameter_name(int i) const { return parameters_[i]; }
+ int number_of_parameters() const { return parameters_.length(); }
- Handle<String> stack_slot_name(int i) const { return stack_slots_[i]; }
- int number_of_stack_slots() const { return stack_slots_.length(); }
+ Handle<String> stack_slot_name(int i) const { return stack_slots_[i]; }
+ int number_of_stack_slots() const { return stack_slots_.length(); }
Handle<String> context_slot_name(int i) const {
return context_slots_[i - Context::MIN_CONTEXT_SLOTS];
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index b55e5d5d7..7f1987e84 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -201,7 +201,6 @@ void Scope::Initialize(bool inside_with) {
}
-
Variable* Scope::LocalLookup(Handle<String> name) {
return variables_.Lookup(name);
}
@@ -810,8 +809,7 @@ void Scope::AllocateParameterLocals() {
// We are using 'arguments'. Tell the code generator that is needs to
// allocate the arguments object by setting 'arguments_'.
- arguments_ = new VariableProxy(Factory::arguments_symbol(), false, false);
- arguments_->BindTo(arguments);
+ arguments_ = arguments;
// We also need the '.arguments' shadow variable. Declare it and create
// and bind the corresponding proxy. It's ok to declare it only now
@@ -822,13 +820,13 @@ void Scope::AllocateParameterLocals() {
// NewTemporary() because the mode needs to be INTERNAL since this
// variable may be allocated in the heap-allocated context (temporaries
// are never allocated in the context).
- Variable* arguments_shadow =
- new Variable(this, Factory::arguments_shadow_symbol(),
- Variable::INTERNAL, true, Variable::ARGUMENTS);
- arguments_shadow_ =
- new VariableProxy(Factory::arguments_shadow_symbol(), false, false);
- arguments_shadow_->BindTo(arguments_shadow);
- temps_.Add(arguments_shadow);
+ arguments_shadow_ = new Variable(this,
+ Factory::arguments_shadow_symbol(),
+ Variable::INTERNAL,
+ true,
+ Variable::ARGUMENTS);
+ arguments_shadow_->set_is_used(true);
+ temps_.Add(arguments_shadow_);
// Allocate the parameters by rewriting them into '.arguments[i]' accesses.
for (int i = 0; i < params_.length(); i++) {
@@ -839,14 +837,13 @@ void Scope::AllocateParameterLocals() {
// It is ok to set this only now, because arguments is a local
// variable that is allocated after the parameters have been
// allocated.
- arguments_shadow->is_accessed_from_inner_scope_ = true;
+ arguments_shadow_->is_accessed_from_inner_scope_ = true;
}
var->rewrite_ =
- new Property(arguments_shadow_,
- new Literal(Handle<Object>(Smi::FromInt(i))),
- RelocInfo::kNoPosition,
- Property::SYNTHETIC);
- if (var->is_used()) arguments_shadow->set_is_used(true);
+ new Property(new VariableProxy(arguments_shadow_),
+ new Literal(Handle<Object>(Smi::FromInt(i))),
+ RelocInfo::kNoPosition,
+ Property::SYNTHETIC);
}
}
@@ -862,7 +859,8 @@ void Scope::AllocateParameterLocals() {
if (MustAllocate(var)) {
if (MustAllocateInContext(var)) {
ASSERT(var->rewrite_ == NULL ||
- (var->slot() != NULL && var->slot()->type() == Slot::CONTEXT));
+ (var->AsSlot() != NULL &&
+ var->AsSlot()->type() == Slot::CONTEXT));
if (var->rewrite_ == NULL) {
// Only set the heap allocation if the parameter has not
// been allocated yet.
@@ -870,8 +868,8 @@ void Scope::AllocateParameterLocals() {
}
} else {
ASSERT(var->rewrite_ == NULL ||
- (var->slot() != NULL &&
- var->slot()->type() == Slot::PARAMETER));
+ (var->AsSlot() != NULL &&
+ var->AsSlot()->type() == Slot::PARAMETER));
// Set the parameter index always, even if the parameter
// was seen before! (We need to access the actual parameter
// supplied for the last occurrence of a multiply declared
@@ -888,7 +886,7 @@ void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this);
ASSERT(var->rewrite_ == NULL ||
(!var->IsVariable(Factory::result_symbol())) ||
- (var->slot() == NULL || var->slot()->type() != Slot::LOCAL));
+ (var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
if (var->rewrite_ == NULL && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
AllocateHeapSlot(var);
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index c2354b205..21040b7c0 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -34,7 +34,6 @@
namespace v8 {
namespace internal {
-
// A hash map to support fast variable declaration and lookup.
class VariableMap: public HashMap {
public:
@@ -100,8 +99,12 @@ class Scope: public ZoneObject {
// The scope name is only used for printing/debugging.
void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
- void Initialize(bool inside_with);
+ virtual void Initialize(bool inside_with);
+ // Called just before leaving a scope.
+ virtual void Leave() {
+ // No cleanup or fixup necessary.
+ }
// ---------------------------------------------------------------------------
// Declarations
@@ -187,21 +190,21 @@ class Scope: public ZoneObject {
// Predicates.
// Specific scope types.
- bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
- bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
- bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
+ bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
+ bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
+ bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
// Information about which scopes calls eval.
- bool calls_eval() const { return scope_calls_eval_; }
- bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
+ bool calls_eval() const { return scope_calls_eval_; }
+ bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
// Is this scope inside a with statement.
- bool inside_with() const { return scope_inside_with_; }
+ bool inside_with() const { return scope_inside_with_; }
// Does this scope contain a with statement.
- bool contains_with() const { return scope_contains_with_; }
+ bool contains_with() const { return scope_contains_with_; }
// The scope immediately surrounding this scope, or NULL.
- Scope* outer_scope() const { return outer_scope_; }
+ Scope* outer_scope() const { return outer_scope_; }
// ---------------------------------------------------------------------------
// Accessors.
@@ -217,27 +220,27 @@ class Scope: public ZoneObject {
// The variable holding the function literal for named function
// literals, or NULL.
// Only valid for function scopes.
- Variable* function() const {
+ Variable* function() const {
ASSERT(is_function_scope());
return function_;
}
// Parameters. The left-most parameter has index 0.
// Only valid for function scopes.
- Variable* parameter(int index) const {
+ Variable* parameter(int index) const {
ASSERT(is_function_scope());
return params_[index];
}
- int num_parameters() const { return params_.length(); }
+ int num_parameters() const { return params_.length(); }
// The local variable 'arguments' if we need to allocate it; NULL otherwise.
// If arguments() exist, arguments_shadow() exists, too.
- VariableProxy* arguments() const { return arguments_; }
+ Variable* arguments() const { return arguments_; }
// The '.arguments' shadow variable if we need to allocate it; NULL otherwise.
// If arguments_shadow() exist, arguments() exists, too.
- VariableProxy* arguments_shadow() const { return arguments_shadow_; }
+ Variable* arguments_shadow() const { return arguments_shadow_; }
// Declarations list.
ZoneList<Declaration*>* declarations() { return &decls_; }
@@ -262,8 +265,8 @@ class Scope: public ZoneObject {
void AllocateVariables(Handle<Context> context);
// Result of variable allocation.
- int num_stack_slots() const { return num_stack_slots_; }
- int num_heap_slots() const { return num_heap_slots_; }
+ int num_stack_slots() const { return num_stack_slots_; }
+ int num_heap_slots() const { return num_heap_slots_; }
// Make sure this scope and all outer scopes are eagerly compiled.
void ForceEagerCompilation() { force_eager_compilation_ = true; }
@@ -272,7 +275,7 @@ class Scope: public ZoneObject {
bool AllowsLazyCompilation() const;
// True if the outer context of this scope is always the global context.
- bool HasTrivialOuterContext() const;
+ virtual bool HasTrivialOuterContext() const;
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
@@ -322,9 +325,9 @@ class Scope: public ZoneObject {
// Function variable, if any; function scopes only.
Variable* function_;
// Convenience variable; function scopes only.
- VariableProxy* arguments_;
+ Variable* arguments_;
// Convenience variable; function scopes only.
- VariableProxy* arguments_shadow_;
+ Variable* arguments_shadow_;
// Illegal redeclaration.
Expression* illegal_redecl_;
@@ -378,20 +381,53 @@ class Scope: public ZoneObject {
};
+// Scope used during pre-parsing.
class DummyScope : public Scope {
public:
- DummyScope() : Scope(GLOBAL_SCOPE) {
+ DummyScope()
+ : Scope(GLOBAL_SCOPE),
+ nesting_level_(1), // Allows us to Leave the initial scope.
+ inside_with_level_(kNotInsideWith) {
outer_scope_ = this;
+ scope_inside_with_ = false;
}
- virtual Variable* Lookup(Handle<String> name) { return NULL; }
- virtual Variable* Declare(Handle<String> name, Variable::Mode mode) {
- return NULL;
+ virtual void Initialize(bool inside_with) {
+ nesting_level_++;
+ if (inside_with && inside_with_level_ == kNotInsideWith) {
+ inside_with_level_ = nesting_level_;
+ }
+ ASSERT(inside_with_level_ <= nesting_level_);
}
+
+ virtual void Leave() {
+ nesting_level_--;
+ ASSERT(nesting_level_ >= 0);
+ if (nesting_level_ < inside_with_level_) {
+ inside_with_level_ = kNotInsideWith;
+ }
+ ASSERT(inside_with_level_ <= nesting_level_);
+ }
+
+ virtual Variable* Lookup(Handle<String> name) { return NULL; }
+
virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with) {
return NULL;
}
+
virtual VariableProxy* NewTemporary(Handle<String> name) { return NULL; }
+
+ virtual bool HasTrivialOuterContext() const {
+ return (nesting_level_ == 0 || inside_with_level_ <= 0);
+ }
+
+ private:
+ static const int kNotInsideWith = -1;
+ // Number of surrounding scopes of the current scope.
+ int nesting_level_;
+ // Nesting level of outermost scope that is contained in a with statement,
+ // or kNotInsideWith if there are no with's around the current scope.
+ int inside_with_level_;
};
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 3d2d42f09..d824c3014 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -270,9 +270,9 @@ void CodeRange::TearDown() {
// -----------------------------------------------------------------------------
// MemoryAllocator
//
-int MemoryAllocator::capacity_ = 0;
-int MemoryAllocator::size_ = 0;
-int MemoryAllocator::size_executable_ = 0;
+intptr_t MemoryAllocator::capacity_ = 0;
+intptr_t MemoryAllocator::size_ = 0;
+intptr_t MemoryAllocator::size_executable_ = 0;
List<MemoryAllocator::MemoryAllocationCallbackRegistration>
MemoryAllocator::memory_allocation_callbacks_;
@@ -302,7 +302,7 @@ int MemoryAllocator::Pop() {
}
-bool MemoryAllocator::Setup(int capacity) {
+bool MemoryAllocator::Setup(intptr_t capacity) {
capacity_ = RoundUp(capacity, Page::kPageSize);
// Over-estimate the size of chunks_ array. It assumes the expansion of old
@@ -314,7 +314,8 @@ bool MemoryAllocator::Setup(int capacity) {
//
// Reserve two chunk ids for semispaces, one for map space, one for old
// space, and one for code space.
- max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
+ max_nof_chunks_ =
+ static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
if (max_nof_chunks_ > kMaxNofChunks) return false;
size_ = 0;
@@ -691,7 +692,9 @@ Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
#ifdef DEBUG
void MemoryAllocator::ReportStatistics() {
float pct = static_cast<float>(capacity_ - size_) / capacity_;
- PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
+ PrintF(" capacity: %" V8_PTR_PREFIX "d"
+ ", used: %" V8_PTR_PREFIX "d"
+ ", available: %%%d\n\n",
capacity_, size_, static_cast<int>(pct*100));
}
#endif
@@ -769,7 +772,7 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
// -----------------------------------------------------------------------------
// PagedSpace implementation
-PagedSpace::PagedSpace(int max_capacity,
+PagedSpace::PagedSpace(intptr_t max_capacity,
AllocationSpace id,
Executability executable)
: Space(id, executable) {
@@ -797,8 +800,9 @@ bool PagedSpace::Setup(Address start, size_t size) {
Page::kPageSize * pages_in_chunk,
this, &num_pages);
} else {
- int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
- max_capacity_ / Page::kObjectAreaSize);
+ int requested_pages =
+ Min(MemoryAllocator::kPagesPerChunk,
+ static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
first_page_ =
MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
if (!first_page_->is_valid()) return false;
@@ -984,7 +988,8 @@ bool PagedSpace::Expand(Page* last_page) {
// Last page must be valid and its next page is invalid.
ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
- int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
+ int available_pages =
+ static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
if (available_pages <= 0) return false;
int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
@@ -1264,7 +1269,7 @@ void NewSpace::Grow() {
void NewSpace::Shrink() {
- int new_capacity = Max(InitialCapacity(), 2 * Size());
+ int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
int rounded_new_capacity =
RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
if (rounded_new_capacity < Capacity() &&
@@ -1643,7 +1648,8 @@ void NewSpace::ReportStatistics() {
#ifdef DEBUG
if (FLAG_heap_stats) {
float pct = static_cast<float>(Available()) / Capacity();
- PrintF(" capacity: %d, available: %d, %%%d\n",
+ PrintF(" capacity: %" V8_PTR_PREFIX "d"
+ ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Available(), static_cast<int>(pct*100));
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
@@ -2401,8 +2407,10 @@ void PagedSpace::CollectCodeStatistics() {
void OldSpace::ReportStatistics() {
- int pct = Available() * 100 / Capacity();
- PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
+ int pct = static_cast<int>(Available() * 100 / Capacity());
+ PrintF(" capacity: %" V8_PTR_PREFIX "d"
+ ", waste: %" V8_PTR_PREFIX "d"
+ ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
ClearHistograms();
@@ -2558,8 +2566,10 @@ void FixedSpace::DeallocateBlock(Address start,
#ifdef DEBUG
void FixedSpace::ReportStatistics() {
- int pct = Available() * 100 / Capacity();
- PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
+ int pct = static_cast<int>(Available() * 100 / Capacity());
+ PrintF(" capacity: %" V8_PTR_PREFIX "d"
+ ", waste: %" V8_PTR_PREFIX "d"
+ ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
ClearHistograms();
@@ -3011,7 +3021,7 @@ void LargeObjectSpace::Print() {
void LargeObjectSpace::ReportStatistics() {
- PrintF(" size: %d\n", size_);
+ PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
int num_objects = 0;
ClearHistograms();
LargeObjectIterator it(this);
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 9ffa94048..2fdb96f3f 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -243,8 +243,10 @@ class Page {
static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
kIntSize + kPointerSize;
- // The start offset of the object area in a page.
- static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize);
+ // The start offset of the object area in a page. Aligned to both maps and
+ // code alignment to be suitable for both.
+ static const int kObjectStartOffset =
+ CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
@@ -369,7 +371,7 @@ class Space : public Malloced {
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
- virtual int Size() = 0;
+ virtual intptr_t Size() = 0;
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
@@ -489,7 +491,7 @@ class MemoryAllocator : public AllStatic {
public:
// Initializes its internal bookkeeping structures.
// Max capacity of the total space.
- static bool Setup(int max_capacity);
+ static bool Setup(intptr_t max_capacity);
// Deletes valid chunks.
static void TearDown();
@@ -580,16 +582,18 @@ class MemoryAllocator : public AllStatic {
MemoryAllocationCallback callback);
// Returns the maximum available bytes of heaps.
- static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
+ static intptr_t Available() {
+ return capacity_ < size_ ? 0 : capacity_ - size_;
+ }
// Returns allocated spaces in bytes.
- static int Size() { return size_; }
+ static intptr_t Size() { return size_; }
// Returns allocated executable spaces in bytes.
- static int SizeExecutable() { return size_executable_; }
+ static intptr_t SizeExecutable() { return size_executable_; }
// Returns maximum available bytes that the old space can have.
- static int MaxAvailable() {
+ static intptr_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
}
@@ -647,12 +651,12 @@ class MemoryAllocator : public AllStatic {
private:
// Maximum space size in bytes.
- static int capacity_;
+ static intptr_t capacity_;
// Allocated space size in bytes.
- static int size_;
+ static intptr_t size_;
// Allocated executable space size in bytes.
- static int size_executable_;
+ static intptr_t size_executable_;
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -925,10 +929,10 @@ class AllocationStats BASE_EMBEDDED {
}
// Accessors for the allocation statistics.
- int Capacity() { return capacity_; }
- int Available() { return available_; }
- int Size() { return size_; }
- int Waste() { return waste_; }
+ intptr_t Capacity() { return capacity_; }
+ intptr_t Available() { return available_; }
+ intptr_t Size() { return size_; }
+ intptr_t Waste() { return waste_; }
// Grow the space by adding available bytes.
void ExpandSpace(int size_in_bytes) {
@@ -943,13 +947,13 @@ class AllocationStats BASE_EMBEDDED {
}
// Allocate from available bytes (available -> size).
- void AllocateBytes(int size_in_bytes) {
+ void AllocateBytes(intptr_t size_in_bytes) {
available_ -= size_in_bytes;
size_ += size_in_bytes;
}
// Free allocated bytes, making them available (size -> available).
- void DeallocateBytes(int size_in_bytes) {
+ void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes;
available_ += size_in_bytes;
}
@@ -962,23 +966,25 @@ class AllocationStats BASE_EMBEDDED {
// Consider the wasted bytes to be allocated, as they contain filler
// objects (waste -> size).
- void FillWastedBytes(int size_in_bytes) {
+ void FillWastedBytes(intptr_t size_in_bytes) {
waste_ -= size_in_bytes;
size_ += size_in_bytes;
}
private:
- int capacity_;
- int available_;
- int size_;
- int waste_;
+ intptr_t capacity_;
+ intptr_t available_;
+ intptr_t size_;
+ intptr_t waste_;
};
class PagedSpace : public Space {
public:
// Creates a space with a maximum capacity, and an id.
- PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
+ PagedSpace(intptr_t max_capacity,
+ AllocationSpace id,
+ Executability executable);
virtual ~PagedSpace() {}
@@ -1029,21 +1035,21 @@ class PagedSpace : public Space {
}
// Current capacity without growing (Size() + Available() + Waste()).
- int Capacity() { return accounting_stats_.Capacity(); }
+ intptr_t Capacity() { return accounting_stats_.Capacity(); }
// Total amount of memory committed for this space. For paged
// spaces this equals the capacity.
- int CommittedMemory() { return Capacity(); }
+ intptr_t CommittedMemory() { return Capacity(); }
// Available bytes without growing.
- int Available() { return accounting_stats_.Available(); }
+ intptr_t Available() { return accounting_stats_.Available(); }
// Allocated bytes in this space.
- virtual int Size() { return accounting_stats_.Size(); }
+ virtual intptr_t Size() { return accounting_stats_.Size(); }
// Wasted bytes due to fragmentation and not recoverable until the
// next GC of this space.
- int Waste() { return accounting_stats_.Waste(); }
+ intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the address of the first object in this space.
Address bottom() { return first_page_->ObjectAreaStart(); }
@@ -1135,7 +1141,7 @@ class PagedSpace : public Space {
protected:
// Maximum capacity of this space.
- int max_capacity_;
+ intptr_t max_capacity_;
// Accounting information for this space.
AllocationStats accounting_stats_;
@@ -1326,7 +1332,7 @@ class SemiSpace : public Space {
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called.
- virtual int Size() {
+ virtual intptr_t Size() {
UNREACHABLE();
return 0;
}
@@ -1469,22 +1475,26 @@ class NewSpace : public Space {
}
// Return the allocated bytes in the active semispace.
- virtual int Size() { return static_cast<int>(top() - bottom()); }
+ virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
+ // The same, but returning an int. We have to have the one that returns
+ // intptr_t because it is inherited, but if we know we are dealing with the
+ // new space, which can't get as big as the other spaces then this is useful:
+ int SizeAsInt() { return static_cast<int>(Size()); }
// Return the current capacity of a semispace.
- int Capacity() {
+ intptr_t Capacity() {
ASSERT(to_space_.Capacity() == from_space_.Capacity());
return to_space_.Capacity();
}
// Return the total amount of memory committed for new space.
- int CommittedMemory() {
+ intptr_t CommittedMemory() {
if (from_space_.is_committed()) return 2 * Capacity();
return Capacity();
}
// Return the available bytes without growing in the active semispace.
- int Available() { return Capacity() - Size(); }
+ intptr_t Available() { return Capacity() - Size(); }
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
@@ -1679,7 +1689,7 @@ class OldSpaceFreeList BASE_EMBEDDED {
void Reset();
// Return the number of bytes available on the free list.
- int available() { return available_; }
+ intptr_t available() { return available_; }
// Place a node on the free list. The block of size 'size_in_bytes'
// starting at 'start' is placed on the free list. The return value is the
@@ -1781,7 +1791,7 @@ class FixedSizeFreeList BASE_EMBEDDED {
void Reset();
// Return the number of bytes available on the free list.
- int available() { return available_; }
+ intptr_t available() { return available_; }
// Place a node on the free list. The block starting at 'start' (assumed to
// have size object_size_) is placed on the free list. Bookkeeping
@@ -1795,7 +1805,7 @@ class FixedSizeFreeList BASE_EMBEDDED {
private:
// Available bytes on the free list.
- int available_;
+ intptr_t available_;
// The head of the free list.
Address head_;
@@ -1821,7 +1831,7 @@ class OldSpace : public PagedSpace {
public:
// Creates an old space object with a given maximum capacity.
// The constructor does not allocate pages from OS.
- explicit OldSpace(int max_capacity,
+ explicit OldSpace(intptr_t max_capacity,
AllocationSpace id,
Executability executable)
: PagedSpace(max_capacity, id, executable), free_list_(id) {
@@ -1830,7 +1840,7 @@ class OldSpace : public PagedSpace {
// The bytes available on the free list (ie, not above the linear allocation
// pointer).
- int AvailableFree() { return free_list_.available(); }
+ intptr_t AvailableFree() { return free_list_.available(); }
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
@@ -1891,7 +1901,7 @@ class OldSpace : public PagedSpace {
class FixedSpace : public PagedSpace {
public:
- FixedSpace(int max_capacity,
+ FixedSpace(intptr_t max_capacity,
AllocationSpace id,
int object_size_in_bytes,
const char* name)
@@ -1966,7 +1976,7 @@ class FixedSpace : public PagedSpace {
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
- MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
+ MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id)
: FixedSpace(max_capacity, id, Map::kSize, "map"),
max_map_space_pages_(max_map_space_pages) {
ASSERT(max_map_space_pages < kMaxMapPageIndex);
@@ -2071,7 +2081,7 @@ class MapSpace : public FixedSpace {
class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
- CellSpace(int max_capacity, AllocationSpace id)
+ CellSpace(intptr_t max_capacity, AllocationSpace id)
: FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
protected:
@@ -2127,7 +2137,7 @@ class LargeObjectChunk {
// Given a chunk size, returns the object size it can accommodate. Used by
// LargeObjectSpace::Available.
- static int ObjectSizeFor(int chunk_size) {
+ static intptr_t ObjectSizeFor(intptr_t chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
}
@@ -2163,11 +2173,11 @@ class LargeObjectSpace : public Space {
Object* AllocateRawFixedArray(int size_in_bytes);
// Available bytes for objects in this space.
- int Available() {
+ intptr_t Available() {
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
}
- virtual int Size() {
+ virtual intptr_t Size() {
return size_;
}
@@ -2221,7 +2231,7 @@ class LargeObjectSpace : public Space {
private:
// The head of the linked list of large object chunks.
LargeObjectChunk* first_chunk_;
- int size_; // allocated bytes
+ intptr_t size_; // allocated bytes
int page_count_; // number of chunks
diff --git a/deps/v8/src/string-search.cc b/deps/v8/src/string-search.cc
new file mode 100644
index 000000000..56874432f
--- /dev/null
+++ b/deps/v8/src/string-search.cc
@@ -0,0 +1,40 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "string-search.h"
+
+namespace v8 {
+namespace internal {
+
+// Storage for constants used by string-search.
+
+int StringSearchBase::kBadCharShiftTable[kUC16AlphabetSize];
+int StringSearchBase::kGoodSuffixShiftTable[kBMMaxShift + 1];
+int StringSearchBase::kSuffixTable[kBMMaxShift + 1];
+
+}} // namespace v8::internal
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h
index d7959c0be..eac84757e 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/string-search.h
@@ -32,278 +32,484 @@ namespace v8 {
namespace internal {
-// Cap on the maximal shift in the Boyer-Moore implementation. By setting a
-// limit, we can fix the size of tables. For a needle longer than this limit,
-// search will not be optimal, since we only build tables for a smaller suffix
-// of the string, which is a safe approximation.
-static const int kBMMaxShift = 250;
-// Reduce alphabet to this size.
-// One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size
-// proportional to the input alphabet. We reduce the alphabet size by
-// equating input characters modulo a smaller alphabet size. This gives
-// a potentially less efficient searching, but is a safe approximation.
-// For needles using only characters in the same Unicode 256-code point page,
-// there is no search speed degradation.
-static const int kBMAlphabetSize = 256;
-// For patterns below this length, the skip length of Boyer-Moore is too short
-// to compensate for the algorithmic overhead compared to simple brute force.
-static const int kBMMinPatternLength = 7;
-
-// Holds the two buffers used by Boyer-Moore string search's Good Suffix
-// shift. Only allows the last kBMMaxShift characters of the needle
-// to be indexed.
-class BMGoodSuffixBuffers {
+//---------------------------------------------------------------------
+// String Search object.
+//---------------------------------------------------------------------
+
+// Class holding constants and methods that apply to all string search variants,
+// independently of subject and pattern char size.
+class StringSearchBase {
+ protected:
+ // Cap on the maximal shift in the Boyer-Moore implementation. By setting a
+ // limit, we can fix the size of tables. For a needle longer than this limit,
+ // search will not be optimal, since we only build tables for a suffix
+ // of the string, but it is a safe approximation.
+ static const int kBMMaxShift = 250;
+
+ // Reduce alphabet to this size.
+ // One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size
+ // proportional to the input alphabet. We reduce the alphabet size by
+ // equating input characters modulo a smaller alphabet size. This gives
+ // a potentially less efficient searching, but is a safe approximation.
+ // For needles using only characters in the same Unicode 256-code point page,
+ // there is no search speed degradation.
+ static const int kAsciiAlphabetSize = 128;
+ static const int kUC16AlphabetSize = 256;
+
+ // Bad-char shift table stored in the state. It's length is the alphabet size.
+ // For patterns below this length, the skip length of Boyer-Moore is too short
+ // to compensate for the algorithmic overhead compared to simple brute force.
+ static const int kBMMinPatternLength = 7;
+
+ static inline bool IsAsciiString(Vector<const char>) {
+ return true;
+ }
+
+ static inline bool IsAsciiString(Vector<const uc16> string) {
+ for (int i = 0, n = string.length(); i < n; i++) {
+ if (static_cast<unsigned>(string[i]) > String::kMaxAsciiCharCodeU) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // The following tables are shared by all searches.
+ // TODO(lrn): Introduce a way for a pattern to keep its tables
+ // between searches (e.g., for an Atom RegExp).
+
+ // Store for the BoyerMoore(Horspool) bad char shift table.
+ static int kBadCharShiftTable[kUC16AlphabetSize];
+ // Store for the BoyerMoore good suffix shift table.
+ static int kGoodSuffixShiftTable[kBMMaxShift + 1];
+ // Table used temporarily while building the BoyerMoore good suffix
+ // shift table.
+ static int kSuffixTable[kBMMaxShift + 1];
+};
+
+
+template <typename PatternChar, typename SubjectChar>
+class StringSearch : private StringSearchBase {
public:
- BMGoodSuffixBuffers() {}
- inline void Initialize(int needle_length) {
- ASSERT(needle_length > 1);
- int start = needle_length < kBMMaxShift ? 0 : needle_length - kBMMaxShift;
- int len = needle_length - start;
- biased_suffixes_ = suffixes_ - start;
- biased_good_suffix_shift_ = good_suffix_shift_ - start;
- for (int i = 0; i <= len; i++) {
- good_suffix_shift_[i] = len;
+ explicit StringSearch(Vector<const PatternChar> pattern)
+ : pattern_(pattern),
+ start_(Max(0, pattern.length() - kBMMaxShift)) {
+ if (sizeof(PatternChar) > sizeof(SubjectChar)) {
+ if (!IsAsciiString(pattern_)) {
+ strategy_ = &FailSearch;
+ return;
+ }
+ }
+ int pattern_length = pattern_.length();
+ if (pattern_length < kBMMinPatternLength) {
+ if (pattern_length == 1) {
+ strategy_ = &SingleCharSearch;
+ return;
+ }
+ strategy_ = &LinearSearch;
+ return;
}
+ strategy_ = &InitialSearch;
}
- inline int& suffix(int index) {
- ASSERT(biased_suffixes_ + index >= suffixes_);
- return biased_suffixes_[index];
+
+ int Search(Vector<const SubjectChar> subject, int index) {
+ return strategy_(this, subject, index);
}
- inline int& shift(int index) {
- ASSERT(biased_good_suffix_shift_ + index >= good_suffix_shift_);
- return biased_good_suffix_shift_[index];
+
+ static inline int AlphabetSize() {
+ if (sizeof(PatternChar) == 1) {
+ // ASCII needle.
+ return kAsciiAlphabetSize;
+ } else {
+ ASSERT(sizeof(PatternChar) == 2);
+ // UC16 needle.
+ return kUC16AlphabetSize;
+ }
}
+
private:
- int suffixes_[kBMMaxShift + 1];
- int good_suffix_shift_[kBMMaxShift + 1];
- int* biased_suffixes_;
- int* biased_good_suffix_shift_;
- DISALLOW_COPY_AND_ASSIGN(BMGoodSuffixBuffers);
-};
+ typedef int (*SearchFunction)( // NOLINT - it's not a cast!
+ StringSearch<PatternChar, SubjectChar>*,
+ Vector<const SubjectChar>,
+ int);
+
+ static int FailSearch(StringSearch<PatternChar, SubjectChar>*,
+ Vector<const SubjectChar>,
+ int) {
+ return -1;
+ }
-// buffers reused by BoyerMoore
-struct BMBuffers {
- public:
- static int bad_char_occurrence[kBMAlphabetSize];
- static BMGoodSuffixBuffers bmgs_buffers;
+ static int SingleCharSearch(StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index);
+
+ static int LinearSearch(StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index);
+
+ static int InitialSearch(StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index);
+
+ static int BoyerMooreHorspoolSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index);
+
+ static int BoyerMooreSearch(StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index);
+
+ void PopulateBoyerMooreHorspoolTable();
+
+ void PopulateBoyerMooreTable();
+
+ static inline int CharOccurrence(int* bad_char_occurrence,
+ SubjectChar char_code) {
+ if (sizeof(SubjectChar) == 1) {
+ return bad_char_occurrence[static_cast<int>(char_code)];
+ }
+ if (sizeof(PatternChar) == 1) {
+ if (static_cast<unsigned int>(char_code) > String::kMaxAsciiCharCodeU) {
+ return -1;
+ }
+ return bad_char_occurrence[static_cast<unsigned int>(char_code)];
+ }
+ // Both pattern and subject are UC16. Reduce character to equivalence class.
+ int equiv_class = char_code % kUC16AlphabetSize;
+ return bad_char_occurrence[equiv_class];
+ }
+
+ // Return a table covering the last kBMMaxShift+1 positions of
+ // pattern.
+ int* bad_char_table() {
+ return kBadCharShiftTable;
+ }
+
+ int* good_suffix_shift_table() {
+ // Return biased pointer that maps the range [start_..pattern_.length()
+ // to the kGoodSuffixShiftTable array.
+ return kGoodSuffixShiftTable - start_;
+ }
+
+ int* suffix_table() {
+ // Return biased pointer that maps the range [start_..pattern_.length()
+ // to the kSuffixTable array.
+ return kSuffixTable - start_;
+ }
+
+ // The pattern to search for.
+ Vector<const PatternChar> pattern_;
+ // Pointer to implementation of the search.
+ SearchFunction strategy_;
+ // Cache value of Max(0, pattern_length() - kBMMaxShift)
+ int start_;
};
-// State of the string match tables.
-// SIMPLE: No usable content in the buffers.
-// BOYER_MOORE_HORSPOOL: The bad_char_occurence table has been populated.
-// BOYER_MOORE: The bmgs_buffers tables have also been populated.
-// Whenever starting with a new needle, one should call InitializeStringSearch
-// to determine which search strategy to use, and in the case of a long-needle
-// strategy, the call also initializes the algorithm to SIMPLE.
-enum StringSearchAlgorithm { SIMPLE_SEARCH, BOYER_MOORE_HORSPOOL, BOYER_MOORE };
-static StringSearchAlgorithm algorithm;
+//---------------------------------------------------------------------
+// Single Character Pattern Search Strategy
+//---------------------------------------------------------------------
-// Compute the bad-char table for Boyer-Moore in the static buffer.
-template <typename PatternChar>
-static void BoyerMoorePopulateBadCharTable(Vector<const PatternChar> pattern) {
- // Only preprocess at most kBMMaxShift last characters of pattern.
- int start = Max(pattern.length() - kBMMaxShift, 0);
- // Run forwards to populate bad_char_table, so that *last* instance
- // of character equivalence class is the one registered.
- // Notice: Doesn't include the last character.
- int table_size = (sizeof(PatternChar) == 1) ? String::kMaxAsciiCharCode + 1
- : kBMAlphabetSize;
- if (start == 0) { // All patterns less than kBMMaxShift in length.
- memset(BMBuffers::bad_char_occurrence,
- -1,
- table_size * sizeof(*BMBuffers::bad_char_occurrence));
+template <typename PatternChar, typename SubjectChar>
+int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int index) {
+ ASSERT_EQ(1, search->pattern_.length());
+ PatternChar pattern_first_char = search->pattern_[0];
+ int i = index;
+ if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
+ const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
+ memchr(subject.start() + i,
+ pattern_first_char,
+ subject.length() - i));
+ if (pos == NULL) return -1;
+ return static_cast<int>(pos - subject.start());
} else {
- for (int i = 0; i < table_size; i++) {
- BMBuffers::bad_char_occurrence[i] = start - 1;
+ if (sizeof(PatternChar) > sizeof(SubjectChar)) {
+ if (static_cast<uc16>(pattern_first_char) > String::kMaxAsciiCharCodeU) {
+ return -1;
+ }
}
+ SubjectChar search_char = static_cast<SubjectChar>(pattern_first_char);
+ int n = subject.length();
+ while (i < n) {
+ if (subject[i++] == search_char) return i - 1;
+ }
+ return -1;
}
- for (int i = start; i < pattern.length() - 1; i++) {
- PatternChar c = pattern[i];
- int bucket = (sizeof(PatternChar) ==1) ? c : c % kBMAlphabetSize;
- BMBuffers::bad_char_occurrence[bucket] = i;
+}
+
+//---------------------------------------------------------------------
+// Linear Search Strategy
+//---------------------------------------------------------------------
+
+
+template <typename PatternChar, typename SubjectChar>
+static inline bool CharCompare(const PatternChar* pattern,
+ const SubjectChar* subject,
+ int length) {
+ ASSERT(length > 0);
+ int pos = 0;
+ do {
+ if (pattern[pos] != subject[pos]) {
+ return false;
+ }
+ pos++;
+ } while (pos < length);
+ return true;
+}
+
+
+// Simple linear search for short patterns. Never bails out.
+template <typename PatternChar, typename SubjectChar>
+int StringSearch<PatternChar, SubjectChar>::LinearSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int index) {
+ Vector<const PatternChar> pattern = search->pattern_;
+ ASSERT(pattern.length() > 1);
+ int pattern_length = pattern.length();
+ PatternChar pattern_first_char = pattern[0];
+ int i = index;
+ int n = subject.length() - pattern_length;
+ while (i <= n) {
+ if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
+ const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
+ memchr(subject.start() + i,
+ pattern_first_char,
+ n - i + 1));
+ if (pos == NULL) return -1;
+ i = static_cast<int>(pos - subject.start()) + 1;
+ } else {
+ if (subject[i++] != pattern_first_char) continue;
+ }
+ // Loop extracted to separate function to allow using return to do
+ // a deeper break.
+ if (CharCompare(pattern.start() + 1,
+ subject.start() + i,
+ pattern_length - 1)) {
+ return i - 1;
+ }
+ }
+ return -1;
+}
+
+//---------------------------------------------------------------------
+// Boyer-Moore string search
+//---------------------------------------------------------------------
+
+template <typename PatternChar, typename SubjectChar>
+int StringSearch<PatternChar, SubjectChar>::BoyerMooreSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index) {
+ Vector<const PatternChar> pattern = search->pattern_;
+ int subject_length = subject.length();
+ int pattern_length = pattern.length();
+ // Only preprocess at most kBMMaxShift last characters of pattern.
+ int start = search->start_;
+
+ int* bad_char_occurence = search->bad_char_table();
+ int* good_suffix_shift = search->good_suffix_shift_table();
+
+ PatternChar last_char = pattern[pattern_length - 1];
+ int index = start_index;
+ // Continue search from i.
+ while (index <= subject_length - pattern_length) {
+ int j = pattern_length - 1;
+ int c;
+ while (last_char != (c = subject[index + j])) {
+ int shift =
+ j - CharOccurrence(bad_char_occurence, c);
+ index += shift;
+ if (index > subject_length - pattern_length) {
+ return -1;
+ }
+ }
+ while (j >= 0 && pattern[j] == (c = subject[index + j])) j--;
+ if (j < 0) {
+ return index;
+ } else if (j < start) {
+ // we have matched more than our tables allow us to be smart about.
+ // Fall back on BMH shift.
+ index += pattern_length - 1
+ - CharOccurrence(bad_char_occurence,
+ static_cast<SubjectChar>(last_char));
+ } else {
+ int gs_shift = good_suffix_shift[j + 1];
+ int bc_occ =
+ CharOccurrence(bad_char_occurence, c);
+ int shift = j - bc_occ;
+ if (gs_shift > shift) {
+ shift = gs_shift;
+ }
+ index += shift;
+ }
}
+
+ return -1;
}
-template <typename PatternChar>
-static void BoyerMoorePopulateGoodSuffixTable(
- Vector<const PatternChar> pattern) {
- int m = pattern.length();
- int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
- int len = m - start;
- // Compute Good Suffix tables.
- BMBuffers::bmgs_buffers.Initialize(m);
+template <typename PatternChar, typename SubjectChar>
+void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreTable() {
+ int pattern_length = pattern_.length();
+ const PatternChar* pattern = pattern_.start();
+ // Only look at the last kBMMaxShift characters of pattern (from start_
+ // to pattern_length).
+ int start = start_;
+ int length = pattern_length - start;
+
+ // Biased tables so that we can use pattern indices as table indices,
+ // even if we only cover the part of the pattern from offset start.
+ int* shift_table = good_suffix_shift_table();
+ int* suffix_table = this->suffix_table();
+
+ // Initialize table.
+ for (int i = start; i < pattern_length; i++) {
+ shift_table[i] = length;
+ }
+ shift_table[pattern_length] = 1;
+ suffix_table[pattern_length] = pattern_length + 1;
- BMBuffers::bmgs_buffers.shift(m-1) = 1;
- BMBuffers::bmgs_buffers.suffix(m) = m + 1;
- PatternChar last_char = pattern[m - 1];
- int suffix = m + 1;
+ // Find suffixes.
+ PatternChar last_char = pattern[pattern_length - 1];
+ int suffix = pattern_length + 1;
{
- int i = m;
+ int i = pattern_length;
while (i > start) {
PatternChar c = pattern[i - 1];
- while (suffix <= m && c != pattern[suffix - 1]) {
- if (BMBuffers::bmgs_buffers.shift(suffix) == len) {
- BMBuffers::bmgs_buffers.shift(suffix) = suffix - i;
+ while (suffix <= pattern_length && c != pattern[suffix - 1]) {
+ if (shift_table[suffix] == length) {
+ shift_table[suffix] = suffix - i;
}
- suffix = BMBuffers::bmgs_buffers.suffix(suffix);
+ suffix = suffix_table[suffix];
}
- BMBuffers::bmgs_buffers.suffix(--i) = --suffix;
- if (suffix == m) {
+ suffix_table[--i] = --suffix;
+ if (suffix == pattern_length) {
// No suffix to extend, so we check against last_char only.
while ((i > start) && (pattern[i - 1] != last_char)) {
- if (BMBuffers::bmgs_buffers.shift(m) == len) {
- BMBuffers::bmgs_buffers.shift(m) = m - i;
+ if (shift_table[pattern_length] == length) {
+ shift_table[pattern_length] = pattern_length - i;
}
- BMBuffers::bmgs_buffers.suffix(--i) = m;
+ suffix_table[--i] = pattern_length;
}
if (i > start) {
- BMBuffers::bmgs_buffers.suffix(--i) = --suffix;
+ suffix_table[--i] = --suffix;
}
}
}
}
- if (suffix < m) {
- for (int i = start; i <= m; i++) {
- if (BMBuffers::bmgs_buffers.shift(i) == len) {
- BMBuffers::bmgs_buffers.shift(i) = suffix - start;
+ // Build shift table using suffixes.
+ if (suffix < pattern_length) {
+ for (int i = start; i <= pattern_length; i++) {
+ if (shift_table[i] == length) {
+ shift_table[i] = suffix - start;
}
if (i == suffix) {
- suffix = BMBuffers::bmgs_buffers.suffix(suffix);
+ suffix = suffix_table[suffix];
}
}
}
}
+//---------------------------------------------------------------------
+// Boyer-Moore-Horspool string search.
+//---------------------------------------------------------------------
-template <typename SubjectChar, typename PatternChar>
-static inline int CharOccurrence(int char_code) {
- if (sizeof(SubjectChar) == 1) {
- return BMBuffers::bad_char_occurrence[char_code];
- }
- if (sizeof(PatternChar) == 1) {
- if (char_code > String::kMaxAsciiCharCode) {
- return -1;
- }
- return BMBuffers::bad_char_occurrence[char_code];
- }
- return BMBuffers::bad_char_occurrence[char_code % kBMAlphabetSize];
-}
-
-
-// Restricted simplified Boyer-Moore string matching.
-// Uses only the bad-shift table of Boyer-Moore and only uses it
-// for the character compared to the last character of the needle.
-template <typename SubjectChar, typename PatternChar>
-static int BoyerMooreHorspool(Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int start_index,
- bool* complete) {
- ASSERT(algorithm <= BOYER_MOORE_HORSPOOL);
- int n = subject.length();
- int m = pattern.length();
-
- int badness = -m;
+template <typename PatternChar, typename SubjectChar>
+int StringSearch<PatternChar, SubjectChar>::BoyerMooreHorspoolSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index) {
+ Vector<const PatternChar> pattern = search->pattern_;
+ int subject_length = subject.length();
+ int pattern_length = pattern.length();
+ int* char_occurrences = search->bad_char_table();
+ int badness = -pattern_length;
// How bad we are doing without a good-suffix table.
- int idx; // No matches found prior to this index.
- PatternChar last_char = pattern[m - 1];
- int last_char_shift =
- m - 1 - CharOccurrence<SubjectChar, PatternChar>(last_char);
+ PatternChar last_char = pattern[pattern_length - 1];
+ int last_char_shift = pattern_length - 1 -
+ CharOccurrence(char_occurrences, static_cast<SubjectChar>(last_char));
// Perform search
- for (idx = start_index; idx <= n - m;) {
- int j = m - 1;
- int c;
- while (last_char != (c = subject[idx + j])) {
- int bc_occ = CharOccurrence<SubjectChar, PatternChar>(c);
+ int index = start_index; // No matches found prior to this index.
+ while (index <= subject_length - pattern_length) {
+ int j = pattern_length - 1;
+ int subject_char;
+ while (last_char != (subject_char = subject[index + j])) {
+ int bc_occ = CharOccurrence(char_occurrences, subject_char);
int shift = j - bc_occ;
- idx += shift;
+ index += shift;
badness += 1 - shift; // at most zero, so badness cannot increase.
- if (idx > n - m) {
- *complete = true;
+ if (index > subject_length - pattern_length) {
return -1;
}
}
j--;
- while (j >= 0 && pattern[j] == (subject[idx + j])) j--;
+ while (j >= 0 && pattern[j] == (subject[index + j])) j--;
if (j < 0) {
- *complete = true;
- return idx;
+ return index;
} else {
- idx += last_char_shift;
+ index += last_char_shift;
// Badness increases by the number of characters we have
// checked, and decreases by the number of characters we
// can skip by shifting. It's a measure of how we are doing
// compared to reading each character exactly once.
- badness += (m - j) - last_char_shift;
+ badness += (pattern_length - j) - last_char_shift;
if (badness > 0) {
- *complete = false;
- return idx;
+ search->PopulateBoyerMooreTable();
+ search->strategy_ = &BoyerMooreSearch;
+ return BoyerMooreSearch(search, subject, index);
}
}
}
- *complete = true;
return -1;
}
-template <typename SubjectChar, typename PatternChar>
-static int BoyerMooreIndexOf(Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int idx) {
- ASSERT(algorithm <= BOYER_MOORE);
- int n = subject.length();
- int m = pattern.length();
- // Only preprocess at most kBMMaxShift last characters of pattern.
- int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
+template <typename PatternChar, typename SubjectChar>
+void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreHorspoolTable() {
+ int pattern_length = pattern_.length();
- PatternChar last_char = pattern[m - 1];
- // Continue search from i.
- while (idx <= n - m) {
- int j = m - 1;
- SubjectChar c;
- while (last_char != (c = subject[idx + j])) {
- int shift = j - CharOccurrence<SubjectChar, PatternChar>(c);
- idx += shift;
- if (idx > n - m) {
- return -1;
- }
- }
- while (j >= 0 && pattern[j] == (c = subject[idx + j])) j--;
- if (j < 0) {
- return idx;
- } else if (j < start) {
- // we have matched more than our tables allow us to be smart about.
- // Fall back on BMH shift.
- idx += m - 1 - CharOccurrence<SubjectChar, PatternChar>(last_char);
- } else {
- int gs_shift = BMBuffers::bmgs_buffers.shift(j + 1);
- int bc_occ = CharOccurrence<SubjectChar, PatternChar>(c);
- int shift = j - bc_occ;
- if (gs_shift > shift) {
- shift = gs_shift;
- }
- idx += shift;
+ int* bad_char_occurrence = bad_char_table();
+
+ // Only preprocess at most kBMMaxShift last characters of pattern.
+ int start = start_;
+ // Run forwards to populate bad_char_table, so that *last* instance
+ // of character equivalence class is the one registered.
+ // Notice: Doesn't include the last character.
+ int table_size = AlphabetSize();
+ if (start == 0) { // All patterns less than kBMMaxShift in length.
+ memset(bad_char_occurrence,
+ -1,
+ table_size * sizeof(*bad_char_occurrence));
+ } else {
+ for (int i = 0; i < table_size; i++) {
+ bad_char_occurrence[i] = start - 1;
}
}
-
- return -1;
+ for (int i = start; i < pattern_length - 1; i++) {
+ PatternChar c = pattern_[i];
+ int bucket = (sizeof(PatternChar) == 1) ? c : c % AlphabetSize();
+ bad_char_occurrence[bucket] = i;
+ }
}
+//---------------------------------------------------------------------
+// Linear string search with bailout to BMH.
+//---------------------------------------------------------------------
-// Trivial string search for shorter strings.
-// On return, if "complete" is set to true, the return value is the
-// final result of searching for the patter in the subject.
-// If "complete" is set to false, the return value is the index where
-// further checking should start, i.e., it's guaranteed that the pattern
-// does not occur at a position prior to the returned index.
+// Simple linear search for short patterns, which bails out if the string
+// isn't found very early in the subject. Upgrades to BoyerMooreHorspool.
template <typename PatternChar, typename SubjectChar>
-static int SimpleIndexOf(Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int idx,
- bool* complete) {
- ASSERT(pattern.length() > 1);
+int StringSearch<PatternChar, SubjectChar>::InitialSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int index) {
+ Vector<const PatternChar> pattern = search->pattern_;
int pattern_length = pattern.length();
// Badness is a count of how much work we have done. When we have
// done enough work we decide it's probably worth switching to a better
@@ -313,149 +519,52 @@ static int SimpleIndexOf(Vector<const SubjectChar> subject,
// We know our pattern is at least 2 characters, we cache the first so
// the common case of the first character not matching is faster.
PatternChar pattern_first_char = pattern[0];
- for (int i = idx, n = subject.length() - pattern_length; i <= n; i++) {
+ for (int i = index, n = subject.length() - pattern_length; i <= n; i++) {
badness++;
- if (badness > 0) {
- *complete = false;
- return i;
- }
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- n - i + 1));
- if (pos == NULL) {
- *complete = true;
- return -1;
+ if (badness <= 0) {
+ if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
+ const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
+ memchr(subject.start() + i,
+ pattern_first_char,
+ n - i + 1));
+ if (pos == NULL) {
+ return -1;
+ }
+ i = static_cast<int>(pos - subject.start());
+ } else {
+ if (subject[i] != pattern_first_char) continue;
}
- i = static_cast<int>(pos - subject.start());
- } else {
- if (subject[i] != pattern_first_char) continue;
- }
- int j = 1;
- do {
- if (pattern[j] != subject[i+j]) {
- break;
+ int j = 1;
+ do {
+ if (pattern[j] != subject[i + j]) {
+ break;
+ }
+ j++;
+ } while (j < pattern_length);
+ if (j == pattern_length) {
+ return i;
}
- j++;
- } while (j < pattern_length);
- if (j == pattern_length) {
- *complete = true;
- return i;
- }
- badness += j;
- }
- *complete = true;
- return -1;
-}
-
-// Simple indexOf that never bails out. For short patterns only.
-template <typename PatternChar, typename SubjectChar>
-static int SimpleIndexOf(Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int idx) {
- int pattern_length = pattern.length();
- PatternChar pattern_first_char = pattern[0];
- for (int i = idx, n = subject.length() - pattern_length; i <= n; i++) {
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- n - i + 1));
- if (pos == NULL) return -1;
- i = static_cast<int>(pos - subject.start());
+ badness += j;
} else {
- if (subject[i] != pattern_first_char) continue;
- }
- int j = 1;
- while (j < pattern_length) {
- if (pattern[j] != subject[i+j]) {
- break;
- }
- j++;
- }
- if (j == pattern_length) {
- return i;
+ search->PopulateBoyerMooreHorspoolTable();
+ search->strategy_ = &BoyerMooreHorspoolSearch;
+ return BoyerMooreHorspoolSearch(search, subject, i);
}
}
return -1;
}
-// Strategy for searching for a string in another string.
-enum StringSearchStrategy { SEARCH_FAIL, SEARCH_SHORT, SEARCH_LONG };
-
-
-template <typename PatternChar>
-static inline StringSearchStrategy InitializeStringSearch(
- Vector<const PatternChar> pat, bool ascii_subject) {
- // We have an ASCII haystack and a non-ASCII needle. Check if there
- // really is a non-ASCII character in the needle and bail out if there
- // is.
- if (ascii_subject && sizeof(PatternChar) > 1) {
- for (int i = 0; i < pat.length(); i++) {
- uc16 c = pat[i];
- if (c > String::kMaxAsciiCharCode) {
- return SEARCH_FAIL;
- }
- }
- }
- if (pat.length() < kBMMinPatternLength) {
- return SEARCH_SHORT;
- }
- algorithm = SIMPLE_SEARCH;
- return SEARCH_LONG;
-}
-
-
-// Dispatch long needle searches to different algorithms.
+// Perform a a single stand-alone search.
+// If searching multiple times for the same pattern, a search
+// object should be constructed once and the Search function then called
+// for each search.
template <typename SubjectChar, typename PatternChar>
-static int ComplexIndexOf(Vector<const SubjectChar> sub,
- Vector<const PatternChar> pat,
- int start_index) {
- ASSERT(pat.length() >= kBMMinPatternLength);
- // Try algorithms in order of increasing setup cost and expected performance.
- bool complete;
- int idx = start_index;
- switch (algorithm) {
- case SIMPLE_SEARCH:
- idx = SimpleIndexOf(sub, pat, idx, &complete);
- if (complete) return idx;
- BoyerMoorePopulateBadCharTable(pat);
- algorithm = BOYER_MOORE_HORSPOOL;
- // FALLTHROUGH.
- case BOYER_MOORE_HORSPOOL:
- idx = BoyerMooreHorspool(sub, pat, idx, &complete);
- if (complete) return idx;
- // Build the Good Suffix table and continue searching.
- BoyerMoorePopulateGoodSuffixTable(pat);
- algorithm = BOYER_MOORE;
- // FALLTHROUGH.
- case BOYER_MOORE:
- return BoyerMooreIndexOf(sub, pat, idx);
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// Dispatch to different search strategies for a single search.
-// If searching multiple times on the same needle, the search
-// strategy should only be computed once and then dispatch to different
-// loops.
-template <typename SubjectChar, typename PatternChar>
-static int StringSearch(Vector<const SubjectChar> sub,
- Vector<const PatternChar> pat,
+static int SearchString(Vector<const SubjectChar> subject,
+ Vector<const PatternChar> pattern,
int start_index) {
- bool ascii_subject = (sizeof(SubjectChar) == 1);
- StringSearchStrategy strategy = InitializeStringSearch(pat, ascii_subject);
- switch (strategy) {
- case SEARCH_FAIL: return -1;
- case SEARCH_SHORT: return SimpleIndexOf(sub, pat, start_index);
- case SEARCH_LONG: return ComplexIndexOf(sub, pat, start_index);
- }
- UNREACHABLE();
- return -1;
+ StringSearch<PatternChar, SubjectChar> search(pattern);
+ return search.Search(subject, start_index);
}
}} // namespace v8::internal
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 34989d371..e6df1b49e 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -988,6 +988,7 @@ Object* StoreInterceptorProperty(Arguments args) {
Object* KeyedLoadPropertyWithInterceptor(Arguments args) {
JSObject* receiver = JSObject::cast(args[0]);
+ ASSERT(Smi::cast(args[1])->value() >= 0);
uint32_t index = Smi::cast(args[1])->value();
return receiver->GetElementWithInterceptor(receiver, index);
}
@@ -1186,25 +1187,43 @@ void StubCompiler::LookupPostInterceptor(JSObject* holder,
Object* LoadStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
- return GetCodeWithFlags(flags, name);
+ Object* result = GetCodeWithFlags(flags, name);
+ if (!result->IsFailure()) {
+ PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(result), name));
+ }
+ return result;
}
Object* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type);
- return GetCodeWithFlags(flags, name);
+ Object* result = GetCodeWithFlags(flags, name);
+ if (!result->IsFailure()) {
+ PROFILE(
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(result), name));
+ }
+ return result;
}
Object* StoreStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type);
- return GetCodeWithFlags(flags, name);
+ Object* result = GetCodeWithFlags(flags, name);
+ if (!result->IsFailure()) {
+ PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(result), name));
+ }
+ return result;
}
Object* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type);
- return GetCodeWithFlags(flags, name);
+ Object* result = GetCodeWithFlags(flags, name);
+ if (!result->IsFailure()) {
+ PROFILE(
+ CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(result), name));
+ }
+ return result;
}
@@ -1227,7 +1246,7 @@ Object* CallStubCompiler::CompileCustomCall(int generator_id,
String* fname) {
ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators);
switch (generator_id) {
-#define CALL_GENERATOR_CASE(ignored1, ignored2, ignored3, name) \
+#define CALL_GENERATOR_CASE(ignored1, ignored2, name) \
case k##name##CallGenerator: \
return CallStubCompiler::Compile##name##Call(object, \
holder, \
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 388bb52ca..e4a9e955f 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -370,13 +370,15 @@ class StubCompiler BASE_EMBEDDED {
Register prototype);
// Generates prototype loading code that uses the objects from the
- // context we were in when this function was called. This ties the
- // generated code to a particular context and so must not be used in
- // cases where the generated code is not allowed to have references
- // to objects from a context.
+ // context we were in when this function was called. If the context
+ // has changed, a jump to miss is performed. This ties the generated
+ // code to a particular context and so must not be used in cases
+ // where the generated code is not allowed to have references to
+ // objects from a context.
static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
- Register prototype);
+ Register prototype,
+ Label* miss);
static void GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register src,
@@ -612,29 +614,26 @@ class KeyedStoreStubCompiler: public StubCompiler {
// Installation of custom call generators for the selected builtins is
// handled by the bootstrapper.
//
-// Each entry has a name of a global function (lowercased), a flag
-// controlling whether the generator is set on the function itself or
-// on its instance prototype, a name of a builtin function on the
-// function or its instance prototype (the one the generator is set
-// for), and a name of a generator itself (used to build ids and
-// generator function names).
-#define CUSTOM_CALL_IC_GENERATORS(V) \
- V(array, INSTANCE_PROTOTYPE, push, ArrayPush) \
- V(array, INSTANCE_PROTOTYPE, pop, ArrayPop) \
- V(string, INSTANCE_PROTOTYPE, charCodeAt, StringCharCodeAt) \
- V(string, INSTANCE_PROTOTYPE, charAt, StringCharAt) \
- V(string, FUNCTION, fromCharCode, StringFromCharCode)
+// Each entry has a name of a global object property holding an object
+// optionally followed by ".prototype" (this controls whether the
+// generator is set on the object itself or, in case it's a function,
+// on the its instance prototype), a name of a builtin function on the
+// object (the one the generator is set for), and a name of the
+// generator (used to build ids and generator function names).
+#define CUSTOM_CALL_IC_GENERATORS(V) \
+ V(Array.prototype, push, ArrayPush) \
+ V(Array.prototype, pop, ArrayPop) \
+ V(String.prototype, charCodeAt, StringCharCodeAt) \
+ V(String.prototype, charAt, StringCharAt) \
+ V(String, fromCharCode, StringFromCharCode) \
+ V(Math, floor, MathFloor) \
+ V(Math, abs, MathAbs)
class CallStubCompiler: public StubCompiler {
public:
- enum CustomGeneratorOwner {
- FUNCTION,
- INSTANCE_PROTOTYPE
- };
-
enum {
-#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, ignored3, name) \
+#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, name) \
k##name##CallGenerator,
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID)
#undef DECLARE_CALL_GENERATOR_ID
@@ -673,11 +672,11 @@ class CallStubCompiler: public StubCompiler {
JSFunction* function,
String* name);
-#define DECLARE_CALL_GENERATOR(ignored1, ignored2, ignored3, name) \
- Object* Compile##name##Call(Object* object, \
- JSObject* holder, \
- JSGlobalPropertyCell* cell, \
- JSFunction* function, \
+#define DECLARE_CALL_GENERATOR(ignored1, ignored2, name) \
+ Object* Compile##name##Call(Object* object, \
+ JSObject* holder, \
+ JSGlobalPropertyCell* cell, \
+ JSFunction* function, \
String* fname);
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
#undef DECLARE_CALL_GENERATOR
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index daf267fe6..052176778 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -222,11 +222,21 @@ uint32_t ComputeIntegerHash(uint32_t key);
// ----------------------------------------------------------------------------
// I/O support.
-// Our version of printf(). Avoids compilation errors that we get
-// with standard printf when attempting to print pointers, etc.
-// (the errors are due to the extra compilation flags, which we
-// want elsewhere).
-void PrintF(const char* format, ...);
+#if __GNUC__ >= 4
+// On gcc we can ask the compiler to check the types of %d-style format
+// specifiers and their associated arguments. TODO(erikcorry) fix this
+// so it works on MacOSX.
+#if defined(__MACH__) && defined(__APPLE__)
+#define PRINTF_CHECKING
+#else // MacOsX.
+#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
+#endif
+#else
+#define PRINTF_CHECKING
+#endif
+
+// Our version of printf().
+void PRINTF_CHECKING PrintF(const char* format, ...);
// Our version of fflush.
void Flush();
diff --git a/deps/v8/src/utils.h.orig b/deps/v8/src/utils.h.orig
deleted file mode 100644
index fefbfe9af..000000000
--- a/deps/v8/src/utils.h.orig
+++ /dev/null
@@ -1,962 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_UTILS_H_
-#define V8_UTILS_H_
-
-#include <stdlib.h>
-#include <string.h>
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// General helper functions
-
-#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
-
-// Returns true iff x is a power of 2 (or zero). Cannot be used with the
-// maximally negative value of the type T (the -1 overflows).
-template <typename T>
-static inline bool IsPowerOf2(T x) {
- return IS_POWER_OF_TWO(x);
-}
-
-
-// X must be a power of 2. Returns the number of trailing zeros.
-template <typename T>
-static inline int WhichPowerOf2(T x) {
- ASSERT(IsPowerOf2(x));
- ASSERT(x != 0);
- if (x < 0) return 31;
- int bits = 0;
-#ifdef DEBUG
- int original_x = x;
-#endif
- if (x >= 0x10000) {
- bits += 16;
- x >>= 16;
- }
- if (x >= 0x100) {
- bits += 8;
- x >>= 8;
- }
- if (x >= 0x10) {
- bits += 4;
- x >>= 4;
- }
- switch (x) {
- default: UNREACHABLE();
- case 8: bits++; // Fall through.
- case 4: bits++; // Fall through.
- case 2: bits++; // Fall through.
- case 1: break;
- }
- ASSERT_EQ(1 << bits, original_x);
- return bits;
- return 0;
-}
-
-
-// The C++ standard leaves the semantics of '>>' undefined for
-// negative signed operands. Most implementations do the right thing,
-// though.
-static inline int ArithmeticShiftRight(int x, int s) {
- return x >> s;
-}
-
-
-// Compute the 0-relative offset of some absolute value x of type T.
-// This allows conversion of Addresses and integral types into
-// 0-relative int offsets.
-template <typename T>
-static inline intptr_t OffsetFrom(T x) {
- return x - static_cast<T>(0);
-}
-
-
-// Compute the absolute value of type T for some 0-relative offset x.
-// This allows conversion of 0-relative int offsets into Addresses and
-// integral types.
-template <typename T>
-static inline T AddressFrom(intptr_t x) {
- return static_cast<T>(static_cast<T>(0) + x);
-}
-
-
-// Return the largest multiple of m which is <= x.
-template <typename T>
-static inline T RoundDown(T x, int m) {
- ASSERT(IsPowerOf2(m));
- return AddressFrom<T>(OffsetFrom(x) & -m);
-}
-
-
-// Return the smallest multiple of m which is >= x.
-template <typename T>
-static inline T RoundUp(T x, int m) {
- return RoundDown(x + m - 1, m);
-}
-
-
-template <typename T>
-static int Compare(const T& a, const T& b) {
- if (a == b)
- return 0;
- else if (a < b)
- return -1;
- else
- return 1;
-}
-
-
-template <typename T>
-static int PointerValueCompare(const T* a, const T* b) {
- return Compare<T>(*a, *b);
-}
-
-
-// Returns the smallest power of two which is >= x. If you pass in a
-// number that is already a power of two, it is returned as is.
-uint32_t RoundUpToPowerOf2(uint32_t x);
-
-
-template <typename T>
-static inline bool IsAligned(T value, T alignment) {
- ASSERT(IsPowerOf2(alignment));
- return (value & (alignment - 1)) == 0;
-}
-
-
-// Returns true if (addr + offset) is aligned.
-static inline bool IsAddressAligned(Address addr,
- intptr_t alignment,
- int offset) {
- intptr_t offs = OffsetFrom(addr + offset);
- return IsAligned(offs, alignment);
-}
-
-
-// Returns the maximum of the two parameters.
-template <typename T>
-static T Max(T a, T b) {
- return a < b ? b : a;
-}
-
-
-// Returns the minimum of the two parameters.
-template <typename T>
-static T Min(T a, T b) {
- return a < b ? a : b;
-}
-
-
-inline int StrLength(const char* string) {
- size_t length = strlen(string);
- ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
- return static_cast<int>(length);
-}
-
-
-// ----------------------------------------------------------------------------
-// BitField is a help template for encoding and decode bitfield with
-// unsigned content.
-template<class T, int shift, int size>
-class BitField {
- public:
- // Tells whether the provided value fits into the bit field.
- static bool is_valid(T value) {
- return (static_cast<uint32_t>(value) & ~((1U << (size)) - 1)) == 0;
- }
-
- // Returns a uint32_t mask of bit field.
- static uint32_t mask() {
- // To use all bits of a uint32 in a bitfield without compiler warnings we
- // have to compute 2^32 without using a shift count of 32.
- return ((1U << shift) << size) - (1U << shift);
- }
-
- // Returns a uint32_t with the bit field value encoded.
- static uint32_t encode(T value) {
- ASSERT(is_valid(value));
- return static_cast<uint32_t>(value) << shift;
- }
-
- // Extracts the bit field from the value.
- static T decode(uint32_t value) {
- return static_cast<T>((value & mask()) >> shift);
- }
-};
-
-
-// ----------------------------------------------------------------------------
-// Hash function.
-
-uint32_t ComputeIntegerHash(uint32_t key);
-
-
-// ----------------------------------------------------------------------------
-// I/O support.
-
-// Our version of printf(). Avoids compilation errors that we get
-// with standard printf when attempting to print pointers, etc.
-// (the errors are due to the extra compilation flags, which we
-// want elsewhere).
-void PrintF(const char* format, ...);
-
-// Our version of fflush.
-void Flush();
-
-
-// Read a line of characters after printing the prompt to stdout. The resulting
-// char* needs to be disposed off with DeleteArray by the caller.
-char* ReadLine(const char* prompt);
-
-
-// Read and return the raw bytes in a file. the size of the buffer is returned
-// in size.
-// The returned buffer must be freed by the caller.
-byte* ReadBytes(const char* filename, int* size, bool verbose = true);
-
-
-// Write size chars from str to the file given by filename.
-// The file is overwritten. Returns the number of chars written.
-int WriteChars(const char* filename,
- const char* str,
- int size,
- bool verbose = true);
-
-
-// Write size bytes to the file given by filename.
-// The file is overwritten. Returns the number of bytes written.
-int WriteBytes(const char* filename,
- const byte* bytes,
- int size,
- bool verbose = true);
-
-
-// Write the C code
-// const char* <varname> = "<str>";
-// const int <varname>_len = <len>;
-// to the file given by filename. Only the first len chars are written.
-int WriteAsCFile(const char* filename, const char* varname,
- const char* str, int size, bool verbose = true);
-
-
-// ----------------------------------------------------------------------------
-// Miscellaneous
-
-// A static resource holds a static instance that can be reserved in
-// a local scope using an instance of Access. Attempts to re-reserve
-// the instance will cause an error.
-template <typename T>
-class StaticResource {
- public:
- StaticResource() : is_reserved_(false) {}
-
- private:
- template <typename S> friend class Access;
- T instance_;
- bool is_reserved_;
-};
-
-
-// Locally scoped access to a static resource.
-template <typename T>
-class Access {
- public:
- explicit Access(StaticResource<T>* resource)
- : resource_(resource)
- , instance_(&resource->instance_) {
- ASSERT(!resource->is_reserved_);
- resource->is_reserved_ = true;
- }
-
- ~Access() {
- resource_->is_reserved_ = false;
- resource_ = NULL;
- instance_ = NULL;
- }
-
- T* value() { return instance_; }
- T* operator -> () { return instance_; }
-
- private:
- StaticResource<T>* resource_;
- T* instance_;
-};
-
-
-template <typename T>
-class Vector {
- public:
- Vector() : start_(NULL), length_(0) {}
- Vector(T* data, int length) : start_(data), length_(length) {
- ASSERT(length == 0 || (length > 0 && data != NULL));
- }
-
- static Vector<T> New(int length) {
- return Vector<T>(NewArray<T>(length), length);
- }
-
- // Returns a vector using the same backing storage as this one,
- // spanning from and including 'from', to but not including 'to'.
- Vector<T> SubVector(int from, int to) {
- ASSERT(to <= length_);
- ASSERT(from < to);
- ASSERT(0 <= from);
- return Vector<T>(start() + from, to - from);
- }
-
- // Returns the length of the vector.
- int length() const { return length_; }
-
- // Returns whether or not the vector is empty.
- bool is_empty() const { return length_ == 0; }
-
- // Returns the pointer to the start of the data in the vector.
- T* start() const { return start_; }
-
- // Access individual vector elements - checks bounds in debug mode.
- T& operator[](int index) const {
- ASSERT(0 <= index && index < length_);
- return start_[index];
- }
-
- T& first() { return start_[0]; }
-
- T& last() { return start_[length_ - 1]; }
-
- // Returns a clone of this vector with a new backing store.
- Vector<T> Clone() const {
- T* result = NewArray<T>(length_);
- for (int i = 0; i < length_; i++) result[i] = start_[i];
- return Vector<T>(result, length_);
- }
-
- void Sort(int (*cmp)(const T*, const T*)) {
- typedef int (*RawComparer)(const void*, const void*);
- qsort(start(),
- length(),
- sizeof(T),
- reinterpret_cast<RawComparer>(cmp));
- }
-
- void Sort() {
- Sort(PointerValueCompare<T>);
- }
-
- void Truncate(int length) {
- ASSERT(length <= length_);
- length_ = length;
- }
-
- // Releases the array underlying this vector. Once disposed the
- // vector is empty.
- void Dispose() {
- DeleteArray(start_);
- start_ = NULL;
- length_ = 0;
- }
-
- inline Vector<T> operator+(int offset) {
- ASSERT(offset < length_);
- return Vector<T>(start_ + offset, length_ - offset);
- }
-
- // Factory method for creating empty vectors.
- static Vector<T> empty() { return Vector<T>(NULL, 0); }
-
- template<typename S>
- static Vector<T> cast(Vector<S> input) {
- return Vector<T>(reinterpret_cast<T*>(input.start()),
- input.length() * sizeof(S) / sizeof(T));
- }
-
- protected:
- void set_start(T* start) { start_ = start; }
-
- private:
- T* start_;
- int length_;
-};
-
-
-// A temporary assignment sets a (non-local) variable to a value on
-// construction and resets it the value on destruction.
-template <typename T>
-class TempAssign {
- public:
- TempAssign(T* var, T value): var_(var), old_value_(*var) {
- *var = value;
- }
-
- ~TempAssign() { *var_ = old_value_; }
-
- private:
- T* var_;
- T old_value_;
-};
-
-
-template <typename T, int kSize>
-class EmbeddedVector : public Vector<T> {
- public:
- EmbeddedVector() : Vector<T>(buffer_, kSize) { }
-
- // When copying, make underlying Vector to reference our buffer.
- EmbeddedVector(const EmbeddedVector& rhs)
- : Vector<T>(rhs) {
- memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
- set_start(buffer_);
- }
-
- EmbeddedVector& operator=(const EmbeddedVector& rhs) {
- if (this == &rhs) return *this;
- Vector<T>::operator=(rhs);
- memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
- this->set_start(buffer_);
- return *this;
- }
-
- private:
- T buffer_[kSize];
-};
-
-
-template <typename T>
-class ScopedVector : public Vector<T> {
- public:
- explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
- ~ScopedVector() {
- DeleteArray(this->start());
- }
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
-};
-
-
-inline Vector<const char> CStrVector(const char* data) {
- return Vector<const char>(data, StrLength(data));
-}
-
-inline Vector<char> MutableCStrVector(char* data) {
- return Vector<char>(data, StrLength(data));
-}
-
-inline Vector<char> MutableCStrVector(char* data, int max) {
- int length = StrLength(data);
- return Vector<char>(data, (length < max) ? length : max);
-}
-
-template <typename T>
-inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
- int length) {
- return Vector< Handle<Object> >(
- reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
-}
-
-
-/*
- * A class that collects values into a backing store.
- * Specialized versions of the class can allow access to the backing store
- * in different ways.
- * There is no guarantee that the backing store is contiguous (and, as a
- * consequence, no guarantees that consecutively added elements are adjacent
- * in memory). The collector may move elements unless it has guaranteed not
- * to.
- */
-template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
-class Collector {
- public:
- explicit Collector(int initial_capacity = kMinCapacity)
- : index_(0), size_(0) {
- if (initial_capacity < kMinCapacity) {
- initial_capacity = kMinCapacity;
- }
- current_chunk_ = Vector<T>::New(initial_capacity);
- }
-
- virtual ~Collector() {
- // Free backing store (in reverse allocation order).
- current_chunk_.Dispose();
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
- }
- }
-
- // Add a single element.
- inline void Add(T value) {
- if (index_ >= current_chunk_.length()) {
- Grow(1);
- }
- current_chunk_[index_] = value;
- index_++;
- size_++;
- }
-
- // Add a block of contiguous elements and return a Vector backed by the
- // memory area.
- // A basic Collector will keep this vector valid as long as the Collector
- // is alive.
- inline Vector<T> AddBlock(int size, T initial_value) {
- ASSERT(size > 0);
- if (size > current_chunk_.length() - index_) {
- Grow(size);
- }
- T* position = current_chunk_.start() + index_;
- index_ += size;
- size_ += size;
- for (int i = 0; i < size; i++) {
- position[i] = initial_value;
- }
- return Vector<T>(position, size);
- }
-
-
- // Write the contents of the collector into the provided vector.
- void WriteTo(Vector<T> destination) {
- ASSERT(size_ <= destination.length());
- int position = 0;
- for (int i = 0; i < chunks_.length(); i++) {
- Vector<T> chunk = chunks_.at(i);
- for (int j = 0; j < chunk.length(); j++) {
- destination[position] = chunk[j];
- position++;
- }
- }
- for (int i = 0; i < index_; i++) {
- destination[position] = current_chunk_[i];
- position++;
- }
- }
-
- // Allocate a single contiguous vector, copy all the collected
- // elements to the vector, and return it.
- // The caller is responsible for freeing the memory of the returned
- // vector (e.g., using Vector::Dispose).
- Vector<T> ToVector() {
- Vector<T> new_store = Vector<T>::New(size_);
- WriteTo(new_store);
- return new_store;
- }
-
- // Resets the collector to be empty.
- virtual void Reset() {
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
- }
- chunks_.Rewind(0);
- index_ = 0;
- size_ = 0;
- }
-
- // Total number of elements added to collector so far.
- inline int size() { return size_; }
-
- protected:
- static const int kMinCapacity = 16;
- List<Vector<T> > chunks_;
- Vector<T> current_chunk_; // Block of memory currently being written into.
- int index_; // Current index in current chunk.
- int size_; // Total number of elements in collector.
-
- // Creates a new current chunk, and stores the old chunk in the chunks_ list.
- void Grow(int min_capacity) {
- ASSERT(growth_factor > 1);
- int growth = current_chunk_.length() * (growth_factor - 1);
- if (growth > max_growth) {
- growth = max_growth;
- }
- int new_capacity = current_chunk_.length() + growth;
- if (new_capacity < min_capacity) {
- new_capacity = min_capacity + growth;
- }
- Vector<T> new_chunk = Vector<T>::New(new_capacity);
- int new_index = PrepareGrow(new_chunk);
- if (index_ > 0) {
- chunks_.Add(current_chunk_.SubVector(0, index_));
- } else {
- // Can happen if the call to PrepareGrow moves everything into
- // the new chunk.
- current_chunk_.Dispose();
- }
- current_chunk_ = new_chunk;
- index_ = new_index;
- ASSERT(index_ + min_capacity <= current_chunk_.length());
- }
-
- // Before replacing the current chunk, give a subclass the option to move
- // some of the current data into the new chunk. The function may update
- // the current index_ value to represent data no longer in the current chunk.
- // Returns the initial index of the new chunk (after copied data).
- virtual int PrepareGrow(Vector<T> new_chunk) {
- return 0;
- }
-};
-
-
-/*
- * A collector that allows sequences of values to be guaranteed to
- * stay consecutive.
- * If the backing store grows while a sequence is active, the current
- * sequence might be moved, but after the sequence is ended, it will
- * not move again.
- * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
- * as well, if inside an active sequence where another element is added.
- */
-template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
-class SequenceCollector : public Collector<T, growth_factor, max_growth> {
- public:
- explicit SequenceCollector(int initial_capacity)
- : Collector<T, growth_factor, max_growth>(initial_capacity),
- sequence_start_(kNoSequence) { }
-
- virtual ~SequenceCollector() {}
-
- void StartSequence() {
- ASSERT(sequence_start_ == kNoSequence);
- sequence_start_ = this->index_;
- }
-
- Vector<T> EndSequence() {
- ASSERT(sequence_start_ != kNoSequence);
- int sequence_start = sequence_start_;
- sequence_start_ = kNoSequence;
- if (sequence_start == this->index_) return Vector<T>();
- return this->current_chunk_.SubVector(sequence_start, this->index_);
- }
-
- // Drops the currently added sequence, and all collected elements in it.
- void DropSequence() {
- ASSERT(sequence_start_ != kNoSequence);
- int sequence_length = this->index_ - sequence_start_;
- this->index_ = sequence_start_;
- this->size_ -= sequence_length;
- sequence_start_ = kNoSequence;
- }
-
- virtual void Reset() {
- sequence_start_ = kNoSequence;
- this->Collector<T, growth_factor, max_growth>::Reset();
- }
-
- private:
- static const int kNoSequence = -1;
- int sequence_start_;
-
- // Move the currently active sequence to the new chunk.
- virtual int PrepareGrow(Vector<T> new_chunk) {
- if (sequence_start_ != kNoSequence) {
- int sequence_length = this->index_ - sequence_start_;
- // The new chunk is always larger than the current chunk, so there
- // is room for the copy.
- ASSERT(sequence_length < new_chunk.length());
- for (int i = 0; i < sequence_length; i++) {
- new_chunk[i] = this->current_chunk_[sequence_start_ + i];
- }
- this->index_ = sequence_start_;
- sequence_start_ = 0;
- return sequence_length;
- }
- return 0;
- }
-};
-
-
-// Simple support to read a file into a 0-terminated C-string.
-// The returned buffer must be freed by the caller.
-// On return, *exits tells whether the file existed.
-Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose = true);
-
-
-// Simple wrapper that allows an ExternalString to refer to a
-// Vector<const char>. Doesn't assume ownership of the data.
-class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
- public:
- explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
-
- virtual const char* data() const { return data_.start(); }
-
- virtual size_t length() const { return data_.length(); }
-
- private:
- Vector<const char> data_;
-};
-
-
-// Helper class for building result strings in a character buffer. The
-// purpose of the class is to use safe operations that checks the
-// buffer bounds on all operations in debug mode.
-class StringBuilder {
- public:
- // Create a string builder with a buffer of the given size. The
- // buffer is allocated through NewArray<char> and must be
- // deallocated by the caller of Finalize().
- explicit StringBuilder(int size);
-
- StringBuilder(char* buffer, int size)
- : buffer_(buffer, size), position_(0) { }
-
- ~StringBuilder() { if (!is_finalized()) Finalize(); }
-
- int size() const { return buffer_.length(); }
-
- // Get the current position in the builder.
- int position() const {
- ASSERT(!is_finalized());
- return position_;
- }
-
- // Reset the position.
- void Reset() { position_ = 0; }
-
- // Add a single character to the builder. It is not allowed to add
- // 0-characters; use the Finalize() method to terminate the string
- // instead.
- void AddCharacter(char c) {
- ASSERT(c != '\0');
- ASSERT(!is_finalized() && position_ < buffer_.length());
- buffer_[position_++] = c;
- }
-
- // Add an entire string to the builder. Uses strlen() internally to
- // compute the length of the input string.
- void AddString(const char* s);
-
- // Add the first 'n' characters of the given string 's' to the
- // builder. The input string must have enough characters.
- void AddSubstring(const char* s, int n);
-
- // Add formatted contents to the builder just like printf().
- void AddFormatted(const char* format, ...);
-
- // Add character padding to the builder. If count is non-positive,
- // nothing is added to the builder.
- void AddPadding(char c, int count);
-
- // Finalize the string by 0-terminating it and returning the buffer.
- char* Finalize();
-
- private:
- Vector<char> buffer_;
- int position_;
-
- bool is_finalized() const { return position_ < 0; }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
-};
-
-
-// Custom memcpy implementation for platforms where the standard version
-// may not be good enough.
-// TODO(lrn): Check whether some IA32 platforms should be excluded.
-#if defined(V8_TARGET_ARCH_IA32)
-
-// TODO(lrn): Extend to other platforms as needed.
-
-typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
-
-// Implemented in codegen-<arch>.cc.
-MemCopyFunction CreateMemCopyFunction();
-
-// Copy memory area to disjoint memory area.
-static inline void MemCopy(void* dest, const void* src, size_t size) {
- static MemCopyFunction memcopy = CreateMemCopyFunction();
- (*memcopy)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
-}
-
-
-// Limit below which the extra overhead of the MemCopy function is likely
-// to outweigh the benefits of faster copying.
-// TODO(lrn): Try to find a more precise value.
-static const int kMinComplexMemCopy = 64;
-
-#else // V8_TARGET_ARCH_IA32
-
-static inline void MemCopy(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
-}
-
-static const int kMinComplexMemCopy = 256;
-
-#endif // V8_TARGET_ARCH_IA32
-
-
-// Copy from ASCII/16bit chars to ASCII/16bit chars.
-template <typename sourcechar, typename sinkchar>
-static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
- sinkchar* limit = dest + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- if (sizeof(*dest) == sizeof(*src)) {
- if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
- MemCopy(dest, src, chars * sizeof(*dest));
- return;
- }
- // Number of characters in a uintptr_t.
- static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
- while (dest <= limit - kStepSize) {
- *reinterpret_cast<uintptr_t*>(dest) =
- *reinterpret_cast<const uintptr_t*>(src);
- dest += kStepSize;
- src += kStepSize;
- }
- }
-#endif
- while (dest < limit) {
- *dest++ = static_cast<sinkchar>(*src++);
- }
-}
-
-
-// Compare ASCII/16bit chars to ASCII/16bit chars.
-template <typename lchar, typename rchar>
-static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
- const lchar* limit = lhs + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- if (sizeof(*lhs) == sizeof(*rhs)) {
- // Number of characters in a uintptr_t.
- static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs); // NOLINT
- while (lhs <= limit - kStepSize) {
- if (*reinterpret_cast<const uintptr_t*>(lhs) !=
- *reinterpret_cast<const uintptr_t*>(rhs)) {
- break;
- }
- lhs += kStepSize;
- rhs += kStepSize;
- }
- }
-#endif
- while (lhs < limit) {
- int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
- if (r != 0) return r;
- ++lhs;
- ++rhs;
- }
- return 0;
-}
-
-
-template <typename T>
-static inline void MemsetPointer(T** dest, T* value, int counter) {
-#if defined(V8_HOST_ARCH_IA32)
-#define STOS "stosl"
-#elif defined(V8_HOST_ARCH_X64)
-#define STOS "stosq"
-#endif
-
-#if defined(__GNUC__) && defined(STOS)
- asm volatile(
- "cld;"
- "rep ; " STOS
- : "+&c" (counter), "+&D" (dest)
- : "a" (value)
- : "memory", "cc");
-#else
- for (int i = 0; i < counter; i++) {
- dest[i] = value;
- }
-#endif
-
-#undef STOS
-}
-
-
-// Copies data from |src| to |dst|. The data spans MUST not overlap.
-inline void CopyWords(Object** dst, Object** src, int num_words) {
- ASSERT(Min(dst, src) + num_words <= Max(dst, src));
- ASSERT(num_words > 0);
-
- // Use block copying memcpy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const int kBlockCopyLimit = 16;
-
- if (num_words >= kBlockCopyLimit) {
- memcpy(dst, src, num_words * kPointerSize);
- } else {
- int remaining = num_words;
- do {
- remaining--;
- *dst++ = *src++;
- } while (remaining > 0);
- }
-}
-
-
-// Calculate 10^exponent.
-int TenToThe(int exponent);
-
-
-// The type-based aliasing rule allows the compiler to assume that pointers of
-// different types (for some definition of different) never alias each other.
-// Thus the following code does not work:
-//
-// float f = foo();
-// int fbits = *(int*)(&f);
-//
-// The compiler 'knows' that the int pointer can't refer to f since the types
-// don't match, so the compiler may cache f in a register, leaving random data
-// in fbits. Using C++ style casts makes no difference, however a pointer to
-// char data is assumed to alias any other pointer. This is the 'memcpy
-// exception'.
-//
-// Bit_cast uses the memcpy exception to move the bits from a variable of one
-// type of a variable of another type. Of course the end result is likely to
-// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
-// will completely optimize BitCast away.
-//
-// There is an additional use for BitCast.
-// Recent gccs will warn when they see casts that may result in breakage due to
-// the type-based aliasing rule. If you have checked that there is no breakage
-// you can use BitCast to cast one pointer type to another. This confuses gcc
-// enough that it can no longer see that you have cast one pointer type to
-// another thus avoiding the warning.
-template <class Dest, class Source>
-inline Dest BitCast(const Source& source) {
- // Compile time assertion: sizeof(Dest) == sizeof(Source)
- // A compile error here means your Dest and Source have different sizes.
- typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
-
- Dest dest;
- memcpy(&dest, &source, sizeof(dest));
- return dest;
-}
-
-template <class Dest, class Source>
-inline Dest BitCast(Source* source) {
- return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
-}
-
-} } // namespace v8::internal
-
-#endif // V8_UTILS_H_
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 8c948cc7b..a8eb9d2a6 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -161,6 +161,8 @@ namespace internal {
SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
SC(named_load_global_inline, V8.NamedLoadGlobalInline) \
SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \
+ SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
+ SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss) \
SC(keyed_store_field, V8.KeyedStoreField) \
SC(keyed_store_inline, V8.KeyedStoreInline) \
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index f46a54d6e..504e22442 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -70,24 +70,19 @@ const char* Variable::Mode2String(Mode mode) {
}
-Property* Variable::AsProperty() {
+Property* Variable::AsProperty() const {
return rewrite_ == NULL ? NULL : rewrite_->AsProperty();
}
-Variable* Variable::AsVariable() {
- return rewrite_ == NULL || rewrite_->AsSlot() != NULL ? this : NULL;
-}
-
-
-Slot* Variable::slot() const {
- return rewrite_ != NULL ? rewrite_->AsSlot() : NULL;
+Slot* Variable::AsSlot() const {
+ return rewrite_ == NULL ? NULL : rewrite_->AsSlot();
}
bool Variable::IsStackAllocated() const {
- Slot* s = slot();
- return s != NULL && s->IsStackAllocated();
+ Slot* slot = AsSlot();
+ return slot != NULL && slot->IsStackAllocated();
}
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 618f6ace7..ec76fee4f 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -122,19 +122,20 @@ class Variable: public ZoneObject {
static const char* Mode2String(Mode mode);
// Type testing & conversion
- Property* AsProperty();
- Variable* AsVariable();
+ Property* AsProperty() const;
+ Slot* AsSlot() const;
+
bool IsValidLeftHandSide() { return is_valid_LHS_; }
// The source code for an eval() call may refer to a variable that is
// in an outer scope about which we don't know anything (it may not
// be the global scope). scope() is NULL in that case. Currently the
// scope is only used to follow the context chain length.
- Scope* scope() const { return scope_; }
+ Scope* scope() const { return scope_; }
- Handle<String> name() const { return name_; }
- Mode mode() const { return mode_; }
- bool is_accessed_from_inner_scope() const {
+ Handle<String> name() const { return name_; }
+ Mode mode() const { return mode_; }
+ bool is_accessed_from_inner_scope() const {
return is_accessed_from_inner_scope_;
}
bool is_used() { return is_used_; }
@@ -171,8 +172,7 @@ class Variable: public ZoneObject {
local_if_not_shadowed_ = local;
}
- Expression* rewrite() const { return rewrite_; }
- Slot* slot() const;
+ Expression* rewrite() const { return rewrite_; }
StaticType* type() { return &type_; }
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index c144aded8..33874aa75 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 4
-#define BUILD_NUMBER 4
-#define PATCH_LEVEL 0
+#define BUILD_NUMBER 7
+#define PATCH_LEVEL 1
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 9318bb855..6e29b7a5a 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -418,6 +418,20 @@ void Assembler::bind(Label* L) {
}
+void Assembler::bind(NearLabel* L) {
+ ASSERT(!L->is_bound());
+ last_pc_ = NULL;
+ while (L->unresolved_branches_ > 0) {
+ int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
+ int disp = pc_offset() - branch_pos;
+ ASSERT(is_int8(disp));
+ set_byte_at(branch_pos - sizeof(int8_t), disp);
+ L->unresolved_branches_--;
+ }
+ L->bind_to(pc_offset());
+}
+
+
void Assembler::GrowBuffer() {
ASSERT(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
@@ -1227,6 +1241,27 @@ void Assembler::j(Condition cc,
}
+void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(0 <= cc && cc < 16);
+ if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ ASSERT(is_int8(offs - short_size));
+ // 0111 tttn #8-bit disp
+ emit(0x70 | cc);
+ emit((offs - short_size) & 0xFF);
+ } else {
+ emit(0x70 | cc);
+ emit(0x00); // The displacement will be resolved later.
+ L->link_to(pc_offset());
+ }
+}
+
+
void Assembler::jmp(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1269,6 +1304,25 @@ void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
}
+void Assembler::jmp(NearLabel* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (L->is_bound()) {
+ const int short_size = sizeof(int8_t);
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ ASSERT(is_int8(offs - short_size));
+ // 1110 1011 #8-bit disp.
+ emit(0xEB);
+ emit((offs - short_size) & 0xFF);
+ } else {
+ emit(0xEB);
+ emit(0x00); // The displacement will be resolved later.
+ L->link_to(pc_offset());
+ }
+}
+
+
void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 7082af7fe..bbc101062 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -92,13 +92,13 @@ struct Register {
Register r = { code };
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
ASSERT(is_valid());
return code_;
}
- int bit() const {
+ int bit() const {
return 1 << code_;
}
@@ -138,8 +138,8 @@ const Register no_reg = { -1 };
struct XMMRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- int code() const {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ int code() const {
ASSERT(is_valid());
return code_;
}
@@ -1005,6 +1005,7 @@ class Assembler : public Malloced {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
+ void bind(NearLabel* L);
// Calls
// Call near relative 32-bit displacement, relative to next instruction.
@@ -1029,10 +1030,16 @@ class Assembler : public Malloced {
// Jump near absolute indirect (m64)
void jmp(const Operand& src);
+ // Short jump
+ void jmp(NearLabel* L);
+
// Conditional jumps
void j(Condition cc, Label* L);
void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
+ // Conditional short jump
+ void j(Condition cc, NearLabel* L, Hint hint = no_hint);
+
// Floating-point operations
void fld(int i);
@@ -1171,9 +1178,9 @@ class Assembler : public Malloced {
void RecordStatementPosition(int pos);
bool WriteRecordedPositions();
- int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
+ int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
int current_statement_position() const { return current_statement_position_; }
- int current_position() const { return current_position_; }
+ int current_position() const { return current_position_; }
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
@@ -1196,6 +1203,7 @@ class Assembler : public Malloced {
private:
byte* addr_at(int pos) { return buffer_ + pos; }
byte byte_at(int pos) { return buffer_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}
@@ -1371,7 +1379,6 @@ class Assembler : public Malloced {
// labels
// void print(Label* L);
void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 85ad63719..08c19ba6d 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -913,7 +913,11 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool count_constructions) {
+ // Should never count constructions for api objects.
+ ASSERT(!is_api_function || !count_constructions);
+
// Enter a construct frame.
__ EnterConstructFrame();
@@ -958,6 +962,26 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpInstanceType(rax, JS_FUNCTION_TYPE);
__ j(equal, &rt_call);
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset));
+ __ j(not_zero, &allocate);
+
+ __ push(rax);
+ __ push(rdi);
+
+ __ push(rdi); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(rdi);
+ __ pop(rax);
+
+ __ bind(&allocate);
+ }
+
// Now allocate the JSObject on the heap.
__ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
@@ -981,7 +1005,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rbx: JSObject
// rdi: start of next object
{ Label loop, entry;
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ // To allow for truncation.
+ if (count_constructions) {
+ __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+ } else {
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ }
__ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
@@ -1164,13 +1193,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true);
+}
+
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, false);
}
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index b480412aa..9d82e0e31 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -203,7 +203,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
+ NearLabel false_result, true_result, not_string;
__ movq(rax, Operand(rsp, 1 * kPointerSize));
// 'null' => false.
@@ -989,7 +989,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Label runtime_call;
Label runtime_call_clear_stack;
Label input_not_smi;
- Label loaded;
+ NearLabel loaded;
// Test that rax is a number.
__ movq(rax, Operand(rsp, kPointerSize));
__ JumpIfNotSmi(rax, &input_not_smi);
@@ -1069,7 +1069,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ addl(rcx, rcx);
__ lea(rcx, Operand(rax, rcx, times_8, 0));
// Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
+ NearLabel cache_miss;
__ cmpq(rbx, Operand(rcx, 0));
__ j(not_equal, &cache_miss);
// Cache hit!
@@ -1160,7 +1160,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
// Compute st(0) % st(1)
{
- Label partial_remainder_loop;
+ NearLabel partial_remainder_loop;
__ bind(&partial_remainder_loop);
__ fprem1();
__ fwait();
@@ -1202,7 +1202,7 @@ void IntegerConvert(MacroAssembler* masm,
// cvttsd2si (32-bit version) directly.
Register double_exponent = rbx;
Register double_value = rdi;
- Label done, exponent_63_plus;
+ NearLabel done, exponent_63_plus;
// Get double and extract exponent.
__ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
// Clear result preemptively, in case we need to return zero.
@@ -1771,7 +1771,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
+ NearLabel seq_ascii_string, seq_two_byte_string, check_code;
__ movq(rax, Operand(rsp, kSubjectOffset));
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
@@ -1896,7 +1896,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 4: End of string data
// Argument 3: Start of string data
- Label setup_two_byte, setup_rest;
+ NearLabel setup_two_byte, setup_rest;
__ testb(rdi, rdi);
__ j(zero, &setup_two_byte);
__ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
@@ -1923,10 +1923,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ pop(rsi);
// Check the result.
- Label success;
+ NearLabel success;
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
__ j(equal, &success);
- Label failure;
+ NearLabel failure;
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
__ j(equal, &failure);
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
@@ -1981,7 +1981,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rbx: last_match_info backing store (FixedArray)
// rcx: offsets vector
// rdx: number of capture registers
- Label next_capture, done;
+ NearLabel next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
@@ -1989,7 +1989,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(negative, &done);
// Read the value from the static offsets vector buffer and make it a smi.
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
- __ Integer32ToSmi(rdi, rdi, &runtime);
+ __ Integer32ToSmi(rdi, rdi);
// Store the smi value in the last match info.
__ movq(FieldOperand(rbx,
rdx,
@@ -2155,14 +2155,14 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Two identical objects are equal unless they are both NaN or undefined.
{
- Label not_identical;
+ NearLabel not_identical;
__ cmpq(rax, rdx);
__ j(not_equal, &not_identical);
if (cc_ != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
- Label check_for_nan;
+ NearLabel check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &check_for_nan);
__ Set(rax, NegativeComparisonResult(cc_));
@@ -2180,7 +2180,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ Set(rax, EQUAL);
__ ret(0);
} else {
- Label heap_number;
+ NearLabel heap_number;
// If it's not a heap number, then return equal for (in)equality operator.
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Factory::heap_number_map());
@@ -2244,7 +2244,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// If the first object is a JS object, we have done pointer comparison.
STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- Label first_non_object;
+ NearLabel first_non_object;
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
__ j(below, &first_non_object);
// Return non-zero (eax (not rax) is not zero)
@@ -2273,7 +2273,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Generate the number comparison code.
if (include_number_compare_) {
Label non_number_comparison;
- Label unordered;
+ NearLabel unordered;
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
__ xorl(rax, rax);
__ xorl(rcx, rcx);
@@ -2337,7 +2337,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label not_both_objects, return_unequal;
+ NearLabel not_both_objects, return_unequal;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -2495,7 +2495,7 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Before returning we restore the context from the frame pointer if not NULL.
// The frame pointer is NULL in the exception handler of a JS entry frame.
__ xor_(rsi, rsi); // tentatively set context pointer to NULL
- Label skip;
+ NearLabel skip;
__ cmpq(rbp, Immediate(0));
__ j(equal, &skip);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2655,7 +2655,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Handling of failure.
__ bind(&failure_returned);
- Label retry;
+ NearLabel retry;
// If the returned exception is RETRY_AFTER_GC continue at retry label
STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
__ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
@@ -2695,7 +2695,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
__ movq(rsp, Operand(kScratchRegister, 0));
// Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
+ NearLabel loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
@@ -2965,7 +2965,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// rdx is function, rax is map.
// Look up the function and the map in the instanceof cache.
- Label miss;
+ NearLabel miss;
__ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
__ j(not_equal, &miss);
__ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
@@ -2993,7 +2993,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
+ NearLabel loop, is_instance, is_not_instance;
__ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
__ cmpq(rcx, rbx);
@@ -3305,7 +3305,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rax: first string
// rdx: second string
// Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
+ NearLabel second_not_zero_length, both_not_zero_length;
__ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
__ SmiTest(rcx);
__ j(not_zero, &second_not_zero_length);
@@ -3343,7 +3343,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Look at the length of the result of adding the two strings.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
- __ SmiAdd(rbx, rbx, rcx, NULL);
+ __ SmiAdd(rbx, rbx, rcx);
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
__ SmiCompare(rbx, Smi::FromInt(2));
@@ -3561,7 +3561,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
ASSERT(count.is(rcx)); // rep movs count
// Nothing to do for zero characters.
- Label done;
+ NearLabel done;
__ testl(count, count);
__ j(zero, &done);
@@ -3572,7 +3572,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
}
// Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
+ NearLabel last_bytes;
__ testl(count, Immediate(~7));
__ j(zero, &last_bytes);
@@ -3616,7 +3616,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Make sure that both characters are not digits as such strings has a
// different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
+ NearLabel not_array_index;
__ leal(scratch, Operand(c1, -'0'));
__ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(above, &not_array_index);
@@ -3803,7 +3803,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movq(rdx, Operand(rsp, kFromOffset));
__ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
- __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
+ __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
__ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
Label return_rax;
__ j(equal, &return_rax);
@@ -3936,21 +3936,20 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ movq(scratch4, scratch1);
__ SmiSub(scratch4,
scratch4,
- FieldOperand(right, String::kLengthOffset),
- NULL);
+ FieldOperand(right, String::kLengthOffset));
// Register scratch4 now holds left.length - right.length.
const Register length_difference = scratch4;
- Label left_shorter;
+ NearLabel left_shorter;
__ j(less, &left_shorter);
// The right string isn't longer that the left one.
// Get the right string's length by subtracting the (non-negative) difference
// from the left string's length.
- __ SmiSub(scratch1, scratch1, length_difference, NULL);
+ __ SmiSub(scratch1, scratch1, length_difference);
__ bind(&left_shorter);
// Register scratch1 now holds Min(left.length, right.length).
const Register min_length = scratch1;
- Label compare_lengths;
+ NearLabel compare_lengths;
// If min-length is zero, go directly to comparing lengths.
__ SmiTest(min_length);
__ j(zero, &compare_lengths);
@@ -3958,7 +3957,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ SmiToInteger32(min_length, min_length);
// Registers scratch2 and scratch3 are free.
- Label result_not_equal;
+ NearLabel result_not_equal;
Label loop;
{
// Check characters 0 .. min_length - 1 in a loop.
@@ -3994,7 +3993,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ Move(rax, Smi::FromInt(EQUAL));
__ ret(0);
- Label result_greater;
+ NearLabel result_greater;
__ bind(&result_not_equal);
// Unequal comparison of left to right, either character or length.
__ j(greater, &result_greater);
@@ -4022,7 +4021,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
// Check for identity.
- Label not_same;
+ NearLabel not_same;
__ cmpq(rdx, rax);
__ j(not_equal, &not_same);
__ Move(rax, Smi::FromInt(EQUAL));
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 91d9ff027..0d8b827d8 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -248,7 +248,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// the function.
for (int i = 0; i < scope()->num_parameters(); i++) {
Variable* par = scope()->parameter(i);
- Slot* slot = par->slot();
+ Slot* slot = par->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
// The use of SlotOperand below is safe in unspilled code
// because the slot is guaranteed to be a context slot.
@@ -284,7 +284,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) {
frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
+ StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
}
// Initialize the function return target after the locals are set
@@ -601,10 +601,10 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
Property property(&global, &key, RelocInfo::kNoPosition);
Reference ref(this, &property);
ref.GetValue();
- } else if (variable != NULL && variable->slot() != NULL) {
+ } else if (variable != NULL && variable->AsSlot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+ LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
} else {
// Anything else can be handled normally.
Load(expr);
@@ -643,17 +643,17 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
frame_->Push(&result);
}
- Variable* arguments = scope()->arguments()->var();
- Variable* shadow = scope()->arguments_shadow()->var();
- ASSERT(arguments != NULL && arguments->slot() != NULL);
- ASSERT(shadow != NULL && shadow->slot() != NULL);
+ Variable* arguments = scope()->arguments();
+ Variable* shadow = scope()->arguments_shadow();
+ ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
+ ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
JumpTarget done;
bool skip_arguments = false;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
// We have to skip storing into the arguments slot if it has
// already been written to. This can happen if the a function
// has a local variable named 'arguments'.
- LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+ LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
Result probe = frame_->Pop();
if (probe.is_constant()) {
// We have to skip updating the arguments object if it has
@@ -666,10 +666,10 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
}
}
if (!skip_arguments) {
- StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
}
- StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+ StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
return frame_->Pop();
}
@@ -726,7 +726,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
LoadGlobal();
ref->set_type(Reference::NAMED);
} else {
- ASSERT(var->slot() != NULL);
+ ASSERT(var->AsSlot() != NULL);
ref->set_type(Reference::SLOT);
}
} else {
@@ -1350,11 +1350,14 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
overwrite_mode);
Label do_op;
+ // Left operand must be unchanged in left->reg() for deferred code.
+ // Left operand is in answer.reg(), possibly converted to int32, for
+ // inline code.
+ __ movq(answer.reg(), left->reg());
if (right_type_info.IsSmi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(right->reg());
}
- __ movq(answer.reg(), left->reg());
// If left is not known to be a smi, check if it is.
// If left is not known to be a number, and it isn't a smi, check if
// it is a HeapNumber.
@@ -1371,7 +1374,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
FieldOperand(answer.reg(), HeapNumber::kValueOffset));
// Branch if we might have overflowed.
// (False negative for Smi::kMinValue)
- __ cmpq(answer.reg(), Immediate(0x80000000));
+ __ cmpl(answer.reg(), Immediate(0x80000000));
deferred->Branch(equal);
// TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
__ Integer32ToSmi(answer.reg(), answer.reg());
@@ -1390,18 +1393,18 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
// Perform the operation.
switch (op) {
case Token::SAR:
- __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
+ __ SmiShiftArithmeticRight(answer.reg(), answer.reg(), rcx);
break;
case Token::SHR: {
__ SmiShiftLogicalRight(answer.reg(),
- left->reg(),
- rcx,
- deferred->entry_label());
+ answer.reg(),
+ rcx,
+ deferred->entry_label());
break;
}
case Token::SHL: {
__ SmiShiftLeft(answer.reg(),
- left->reg(),
+ answer.reg(),
rcx);
break;
}
@@ -2496,7 +2499,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
Load(receiver);
- LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
// Emit the source position information after having loaded the
// receiver and the arguments.
@@ -2757,7 +2760,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile time,
// we need to "declare" it at runtime to make sure it actually
@@ -3435,7 +3438,7 @@ void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
// Set number type of the loop variable to smi.
CheckStack(); // TODO(1222600): ignore if body contains calls.
- SetTypeForStackSlot(loop_var->slot(), TypeInfo::Smi());
+ SetTypeForStackSlot(loop_var->AsSlot(), TypeInfo::Smi());
Visit(node->body());
if (node->continue_target()->is_linked()) {
@@ -3444,7 +3447,7 @@ void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
if (has_valid_frame()) {
CodeForStatementPosition(node);
- Slot* loop_var_slot = loop_var->slot();
+ Slot* loop_var_slot = loop_var->AsSlot();
if (loop_var_slot->type() == Slot::LOCAL) {
frame_->TakeLocalAt(loop_var_slot->index());
} else {
@@ -3918,8 +3921,8 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// Store the caught exception in the catch variable.
Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->slot() != NULL);
- StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
+ ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
+ StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
// Remove the exception from the stack.
frame_->Drop();
@@ -4517,7 +4520,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
done->Jump(result);
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
if (potential_slot != NULL) {
// Generate fast case for locals that rewrite to slots.
@@ -4552,7 +4555,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
Result arguments = allocator()->Allocate();
ASSERT(arguments.is_valid());
__ movq(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
arguments,
slow));
frame_->Push(&arguments);
@@ -5018,7 +5021,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
Comment cmnt(masm(), "[ Variable Assignment");
Variable* var = node->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
ASSERT(slot != NULL);
// Evaluate the right-hand side.
@@ -5363,14 +5366,14 @@ void CodeGenerator::VisitCall(Call* node) {
// in generated code. If we succeed, there is no need to perform a
// context lookup in the runtime system.
JumpTarget done;
- if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->slot()->type() == Slot::LOOKUP);
+ if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
JumpTarget slow;
// Prepare the stack for the call to
// ResolvePossiblyDirectEvalNoLookup by pushing the loaded
// function, the first argument to the eval call and the
// receiver.
- Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
+ Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&slow);
frame_->Push(&fun);
@@ -5454,8 +5457,8 @@ void CodeGenerator::VisitCall(Call* node) {
// Replace the function on the stack with the result.
frame_->Push(&result);
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
+ } else if (var != NULL && var->AsSlot() != NULL &&
+ var->AsSlot()->type() == Slot::LOOKUP) {
// ----------------------------------
// JavaScript examples:
//
@@ -5474,7 +5477,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Generate fast case for loading functions from slots that
// correspond to local/global variables or arguments unless they
// are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->slot(),
+ EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&function,
&slow,
@@ -7337,7 +7340,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
- Slot* slot = variable->slot();
+ Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->Push(variable->name());
@@ -8693,7 +8696,7 @@ void Reference::GetValue() {
switch (type_) {
case SLOT: {
Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
break;
@@ -8746,7 +8749,7 @@ void Reference::TakeValue() {
return;
}
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP ||
slot->type() == Slot::CONTEXT ||
@@ -8779,7 +8782,7 @@ void Reference::SetValue(InitState init_state) {
switch (type_) {
case SLOT: {
Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL);
cgen_->StoreToSlot(slot, init_state);
set_unloaded();
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 7c9dfc120..4213912b9 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -891,6 +891,10 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
case 0xD9:
switch (modrm_byte & 0xF8) {
+ case 0xC0:
+ mnem = "fld";
+ has_register = true;
+ break;
case 0xC8:
mnem = "fxch";
has_register = true;
@@ -901,6 +905,7 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
case 0xE1: mnem = "fabs"; break;
case 0xE4: mnem = "ftst"; break;
case 0xE8: mnem = "fld1"; break;
+ case 0xEB: mnem = "fldpi"; break;
case 0xEE: mnem = "fldz"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
@@ -1059,6 +1064,21 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%sd %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightOperand(current);
+ } else if (opcode == 0x2C) {
+ // CVTTSD2SI:
+ // Convert with truncation scalar double-precision FP to integer.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("cvttsd2si%c %s,",
+ operand_size_code(), NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x2D) {
+ // CVTSD2SI: Convert scalar double-precision FP to integer.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("cvtsd2si%c %s,",
+ operand_size_code(), NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
} else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
int mod, regop, rm;
@@ -1089,11 +1109,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("%ss %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0x2C) {
- // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
+ // CVTTSS2SI:
+ // Convert with truncation scalar single-precision FP to dword integer.
// Assert that mod is not 3, so source is memory, not an XMM register.
ASSERT_NE(0xC0, *current & 0xC0);
current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
} else if (opcode == 0x5A) {
+ // CVTSS2SD:
+ // Convert scalar single-precision FP to scalar double-precision FP.
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
@@ -1450,12 +1473,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- if (mod == 3 && regop == 1) {
- AppendToBuffer("decb %s", NameOfCPURegister(rm));
+ if (regop == 1) {
+ AppendToBuffer("decb ");
+ data += PrintRightOperand(data);
} else {
UnimplementedInstruction();
}
- data++;
}
break;
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index fd2653515..9c960478a 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -35,18 +35,8 @@ namespace v8 {
namespace internal {
-
-
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
- if (fp == 0) return NONE;
- // Compute the stack pointer.
- Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
- // Fill in the state.
- state->fp = fp;
- state->sp = sp;
- state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- ASSERT(*state->pc_address != NULL);
- return EXIT;
+Address ExitFrame::ComputeStackPointer(Address fp) {
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index e4faafc65..c15860c7d 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -100,7 +100,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->slot();
+ Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -119,7 +119,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
// Possibly allocate an arguments object.
- Variable* arguments = scope()->arguments()->AsVariable();
+ Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Arguments object must be allocated after the context object, in
// case the "arguments" or ".arguments" variables are in the context.
@@ -143,9 +143,8 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ CallStub(&stub);
// Store new arguments object in both "arguments" and ".arguments" slots.
__ movq(rcx, rax);
- Move(arguments->slot(), rax, rbx, rdx);
- Slot* dot_arguments_slot =
- scope()->arguments_shadow()->AsVariable()->slot();
+ Move(arguments->AsSlot(), rax, rbx, rdx);
+ Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
Move(dot_arguments_slot, rcx, rbx, rdx);
}
@@ -165,7 +164,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
{ Comment cmnt(masm_, "[ Stack check");
- Label ok;
+ NearLabel ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
StackCheckStub stub;
@@ -237,221 +236,196 @@ FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
}
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
+void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
+}
- case Expression::kEffect:
- // Nothing to do.
- break;
- case Expression::kValue:
- // Move value into place.
- switch (location_) {
- case kAccumulator:
- if (!reg.is(result_register())) __ movq(result_register(), reg);
- break;
- case kStack:
- __ push(reg);
- break;
- }
- break;
+void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
+ MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
+ __ movq(result_register(), slot_operand);
+}
- case Expression::kTest:
- // For simplicity we always test the accumulator register.
- if (!reg.is(result_register())) __ movq(result_register(), reg);
- DoTest(true_label_, false_label_, fall_through_);
- break;
- }
+
+void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
+ MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
+ __ push(slot_operand);
}
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Nothing to do.
- break;
- case Expression::kValue: {
- MemOperand slot_operand = EmitSlotSearch(slot, result_register());
- switch (location_) {
- case kAccumulator:
- __ movq(result_register(), slot_operand);
- break;
- case kStack:
- // Memory operands can be pushed directly.
- __ push(slot_operand);
- break;
- }
- break;
- }
+void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
+ codegen()->Move(result_register(), slot);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
- case Expression::kTest:
- Move(result_register(), slot);
- DoTest(true_label_, false_label_, fall_through_);
- break;
- }
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
}
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Nothing to do.
- break;
- case Expression::kValue:
- switch (location_) {
- case kAccumulator:
- __ Move(result_register(), lit->handle());
- break;
- case kStack:
- __ Push(lit->handle());
- break;
- }
- break;
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
- case Expression::kTest:
- __ Move(result_register(), lit->handle());
- DoTest(true_label_, false_label_, fall_through_);
- break;
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ PushRoot(index);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ __ jmp(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ __ jmp(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
}
}
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
- case Expression::kEffect:
- __ Drop(1);
- break;
- case Expression::kValue:
- switch (location_) {
- case kAccumulator:
- __ pop(result_register());
- break;
- case kStack:
- break;
- }
- break;
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ Move(result_register(), lit);
+}
- case Expression::kTest:
- __ pop(result_register());
- DoTest(true_label_, false_label_, fall_through_);
- break;
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ __ Push(lit);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ __ jmp(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ __ jmp(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ __ jmp(false_label_);
+ } else {
+ __ jmp(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ __ jmp(false_label_);
+ } else {
+ __ jmp(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ Move(result_register(), lit);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
}
}
-void FullCodeGenerator::DropAndApply(int count,
- Expression::Context context,
- Register reg) {
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
ASSERT(count > 0);
- ASSERT(!reg.is(rsp));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
+ __ Drop(count);
+}
- case Expression::kEffect:
- __ Drop(count);
- break;
- case Expression::kValue:
- switch (location_) {
- case kAccumulator:
- __ Drop(count);
- if (!reg.is(result_register())) __ movq(result_register(), reg);
- break;
- case kStack:
- if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
- break;
- }
- break;
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
- case Expression::kTest:
- __ Drop(count);
- if (!reg.is(result_register())) __ movq(result_register(), reg);
- DoTest(true_label_, false_label_, fall_through_);
- break;
- }
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ movq(Operand(rsp, 0), reg);
}
-void FullCodeGenerator::Apply(Expression::Context context,
- Label* materialize_true,
- Label* materialize_false) {
- switch (context) {
- case Expression::kUninitialized:
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Move(result_register(), reg);
+ codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
- case Expression::kEffect:
- ASSERT_EQ(materialize_true, materialize_false);
- __ bind(materialize_true);
- break;
- case Expression::kValue: {
- Label done;
- switch (location_) {
- case kAccumulator:
- __ bind(materialize_true);
- __ Move(result_register(), Factory::true_value());
- __ jmp(&done);
- __ bind(materialize_false);
- __ Move(result_register(), Factory::false_value());
- break;
- case kStack:
- __ bind(materialize_true);
- __ Push(Factory::true_value());
- __ jmp(&done);
- __ bind(materialize_false);
- __ Push(Factory::false_value());
- break;
- }
- __ bind(&done);
- break;
- }
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT_EQ(materialize_true, materialize_false);
+ __ bind(materialize_true);
+}
- case Expression::kTest:
- break;
- }
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ NearLabel done;
+ __ bind(materialize_true);
+ __ Move(result_register(), Factory::true_value());
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ Move(result_register(), Factory::false_value());
+ __ bind(&done);
}
-// Convert constant control flow (true or false) to the result expected for
-// a given expression context.
-void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- break;
- case Expression::kValue: {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- switch (location_) {
- case kAccumulator:
- __ LoadRoot(result_register(), value_root_index);
- break;
- case kStack:
- __ PushRoot(value_root_index);
- break;
- }
- break;
- }
- case Expression::kTest:
- if (flag) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- }
- break;
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ NearLabel done;
+ __ bind(materialize_true);
+ __ Push(Factory::true_value());
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ Push(Factory::false_value());
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_false == false_label_);
+ ASSERT(materialize_true == true_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ PushRoot(value_root_index);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ if (flag) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
}
}
@@ -544,7 +518,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
FunctionLiteral* function) {
Comment cmnt(masm_, "[ Declaration");
ASSERT(variable != NULL); // Must have been resolved.
- Slot* slot = variable->slot();
+ Slot* slot = variable->AsSlot();
Property* prop = variable->AsProperty();
if (slot != NULL) {
@@ -555,7 +529,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
} else if (function != NULL) {
- VisitForValue(function, kAccumulator);
+ VisitForAccumulatorValue(function);
__ movq(Operand(rbp, SlotOffset(slot)), result_register());
}
break;
@@ -577,7 +551,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ movq(ContextOperand(rsi, slot->index()), kScratchRegister);
// No write barrier since the hole value is in old space.
} else if (function != NULL) {
- VisitForValue(function, kAccumulator);
+ VisitForAccumulatorValue(function);
__ movq(ContextOperand(rsi, slot->index()), result_register());
int offset = Context::SlotOffset(slot->index());
__ movq(rbx, rsi);
@@ -599,7 +573,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
if (mode == Variable::CONST) {
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else if (function != NULL) {
- VisitForValue(function, kStack);
+ VisitForStackValue(function);
} else {
__ Push(Smi::FromInt(0)); // no initial value!
}
@@ -612,23 +586,20 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
- VisitForValue(prop->obj(), kStack);
+ VisitForStackValue(prop->obj());
if (function != NULL) {
- VisitForValue(prop->key(), kStack);
- VisitForValue(function, kAccumulator);
+ VisitForStackValue(prop->key());
+ VisitForAccumulatorValue(function);
__ pop(rcx);
} else {
- VisitForValue(prop->key(), kAccumulator);
+ VisitForAccumulatorValue(prop->key());
__ movq(rcx, result_register());
__ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
}
__ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Absence of a test rax instruction following the call
- // indicates that none of the load was inlined.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
}
}
@@ -654,7 +625,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
// Keep the switch value on the stack until a case matches.
- VisitForValue(stmt->tag(), kStack);
+ VisitForStackValue(stmt->tag());
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -674,7 +645,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
next_test.Unuse();
// Compile the label expression.
- VisitForValue(clause->label(), kAccumulator);
+ VisitForAccumulatorValue(clause->label());
// Perform the comparison as if via '==='.
__ movq(rdx, Operand(rsp, 0)); // Switch value.
@@ -733,7 +704,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. Both SpiderMonkey and JSC
// ignore null and undefined in contrast to the specification; see
// ECMA-262 section 12.6.4.
- VisitForValue(stmt->enumerable(), kAccumulator);
+ VisitForAccumulatorValue(stmt->enumerable());
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &exit);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
@@ -762,7 +733,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
- Label fixed_array;
+ NearLabel fixed_array;
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
__ j(not_equal, &fixed_array);
@@ -808,7 +779,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check if the expected map still matches that of the enumerable.
// If not, we have to filter the key.
- Label update_each;
+ NearLabel update_each;
__ movq(rcx, Operand(rsp, 4 * kPointerSize));
__ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each);
@@ -871,13 +842,13 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
__ Push(info);
__ CallRuntime(Runtime::kNewClosure, 2);
}
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var(), context_);
+ EmitVariableLoad(expr->var());
}
@@ -913,7 +884,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
if (s != NULL && s->is_eval_scope()) {
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
- Label next, fast;
+ NearLabel next, fast;
if (!context.is(temp)) {
__ movq(temp, context);
}
@@ -941,8 +912,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- __ call(ic, mode);
- __ nop(); // Signal no inlined code.
+ EmitCallIC(ic, mode);
}
@@ -989,7 +959,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
__ jmp(done);
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
if (potential_slot != NULL) {
// Generate fast case for locals that rewrite to slots.
@@ -1015,11 +985,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
// variables. Then load the argument from the arguments
// object using keyed load.
__ movq(rdx,
- ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
__ Move(rax, key_literal->handle());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ jmp(done);
}
}
@@ -1028,12 +998,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
+void FullCodeGenerator::EmitVariableLoad(Variable* var) {
// Four cases: non-this global variables, lookup slots, all other
// types of slots, and parameters that rewrite to explicit property
// accesses on the arguments object.
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
Property* property = var->AsProperty();
if (var->is_global() && !var->is_this()) {
@@ -1043,12 +1012,8 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ Move(rcx, var->name());
__ movq(rax, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- // A test rax instruction following the call is used by the IC to
- // indicate that the inobject property case was inlined. Ensure there
- // is no test rax instruction here.
- __ nop();
- Apply(context, rax);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ context()->Plug(rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
Label done, slow;
@@ -1064,7 +1029,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ CallRuntime(Runtime::kLoadContextSlot, 2);
__ bind(&done);
- Apply(context, rax);
+ context()->Plug(rax);
} else if (slot != NULL) {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
@@ -1073,16 +1038,16 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
if (var->mode() == Variable::CONST) {
// Constants may be the hole value if they have not been initialized.
// Unhole them.
- Label done;
+ NearLabel done;
MemOperand slot_operand = EmitSlotSearch(slot, rax);
__ movq(rax, slot_operand);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ bind(&done);
- Apply(context, rax);
+ context()->Plug(rax);
} else {
- Apply(context, slot);
+ context()->Plug(slot);
}
} else {
@@ -1093,7 +1058,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->slot();
+ Slot* object_slot = object_var->AsSlot();
ASSERT_NOT_NULL(object_slot);
// Load the object.
@@ -1110,11 +1075,8 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test rax, ..." instruction after the
- // call. It is treated specially by the LoadIC code.
- __ nop();
- Apply(context, rax);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ context()->Plug(rax);
}
}
@@ -1169,7 +1131,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ movq(rdx, FieldOperand(rbx, size - kPointerSize));
__ movq(FieldOperand(rax, size - kPointerSize), rdx);
}
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -1208,38 +1170,37 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Fall through.
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
- VisitForValue(value, kAccumulator);
+ VisitForAccumulatorValue(value);
__ Move(rcx, key->handle());
__ movq(rdx, Operand(rsp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
// Fall through.
case ObjectLiteral::Property::PROTOTYPE:
__ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForValue(key, kStack);
- VisitForValue(value, kStack);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
__ CallRuntime(Runtime::kSetProperty, 3);
break;
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
__ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForValue(key, kStack);
+ VisitForStackValue(key);
__ Push(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0));
- VisitForValue(value, kStack);
+ VisitForStackValue(value);
__ CallRuntime(Runtime::kDefineAccessor, 4);
break;
}
}
if (result_saved) {
- ApplyTOS(context_);
+ context()->PlugTOS();
} else {
- Apply(context_, rax);
+ context()->Plug(rax);
}
}
@@ -1286,7 +1247,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(rax);
result_saved = true;
}
- VisitForValue(subexpr, kAccumulator);
+ VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
__ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
@@ -1299,9 +1260,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
- ApplyTOS(context_);
+ context()->PlugTOS();
} else {
- Apply(context_, rax);
+ context()->Plug(rax);
}
}
@@ -1334,39 +1295,38 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
- VisitForValue(property->obj(), kAccumulator);
+ VisitForAccumulatorValue(property->obj());
__ push(result_register());
} else {
- VisitForValue(property->obj(), kStack);
+ VisitForStackValue(property->obj());
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- VisitForValue(property->obj(), kStack);
- VisitForValue(property->key(), kAccumulator);
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ movq(rdx, Operand(rsp, 0));
__ push(rax);
} else {
- VisitForValue(property->obj(), kStack);
- VisitForValue(property->key(), kStack);
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
- Expression::kValue);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- break;
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ break;
+ }
}
Token::Value op = expr->binary_op();
@@ -1376,28 +1336,26 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
ASSERT(constant == kRightConstant || constant == kNoConstants);
if (constant == kNoConstants) {
__ push(rax); // Left operand goes on the stack.
- VisitForValue(expr->value(), kAccumulator);
+ VisitForAccumulatorValue(expr->value());
}
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT
: NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr,
op,
- Expression::kValue,
mode,
expr->target(),
expr->value(),
constant);
} else {
- EmitBinaryOp(op, Expression::kValue, mode);
+ EmitBinaryOp(op, mode);
}
- location_ = saved_location;
-
} else {
- VisitForValue(expr->value(), kAccumulator);
+ VisitForAccumulatorValue(expr->value());
}
// Record source position before possible IC call.
@@ -1407,8 +1365,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(),
- context_);
+ expr->op());
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
@@ -1425,22 +1382,19 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
- Expression::Context context,
OverwriteMode mode,
Expression* left,
Expression* right,
@@ -1502,12 +1456,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
}
__ bind(&done);
- Apply(context, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context,
OverwriteMode mode) {
GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS);
if (stub.ArgsInRegistersSupported()) {
@@ -1517,7 +1470,7 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
__ push(result_register());
__ CallStub(&stub);
}
- Apply(context, rax);
+ context()->Plug(rax);
}
@@ -1543,30 +1496,29 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
- EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
break;
}
case NAMED_PROPERTY: {
__ push(rax); // Preserve value.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForAccumulatorValue(prop->obj());
__ movq(rdx, rax);
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop(); // Signal no inlined code.
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
case KEYED_PROPERTY: {
__ push(rax); // Preserve value.
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ movq(rcx, rax);
__ pop(rdx);
__ pop(rax);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop(); // Signal no inlined code.
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
}
@@ -1574,12 +1526,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op,
- Expression::Context context) {
+ Token::Value op) {
// Left-hand sides that rewrite to explicit property accesses do not reach
// here.
ASSERT(var != NULL);
- ASSERT(var->is_global() || var->slot() != NULL);
+ ASSERT(var->is_global() || var->AsSlot() != NULL);
if (var->is_global()) {
ASSERT(!var->is_this());
@@ -1589,14 +1540,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Move(rcx, var->name());
__ movq(rdx, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
// of const variables. Const assignments are simply skipped.
Label done;
- Slot* slot = var->slot();
+ Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -1645,7 +1595,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ bind(&done);
}
- Apply(context, rax);
+ context()->Plug(rax);
}
@@ -1674,8 +1624,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(rdx);
}
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1683,9 +1632,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ push(Operand(rsp, kPointerSize)); // Receiver is under value.
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(rax);
- DropAndApply(1, context_, rax);
+ context()->DropAndPlug(1, rax);
} else {
- Apply(context_, rax);
+ context()->Plug(rax);
}
}
@@ -1713,10 +1662,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1727,7 +1673,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(rax);
}
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -1736,16 +1682,15 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForValue(expr->obj(), kAccumulator);
+ VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
- Apply(context_, rax);
} else {
- VisitForValue(expr->obj(), kStack);
- VisitForValue(expr->key(), kAccumulator);
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
__ pop(rdx);
EmitKeyedPropertyLoad(expr);
- Apply(context_, rax);
}
+ context()->Plug(rax);
}
@@ -1756,7 +1701,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
__ Move(rcx, name);
// Record source position for debugger.
@@ -1765,10 +1710,10 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
in_loop);
- __ Call(ic, mode);
+ EmitCallIC(ic, mode);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -1779,9 +1724,9 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
- VisitForValue(key, kAccumulator);
+ VisitForAccumulatorValue(key);
__ movq(rcx, rax);
// Record source position for debugger.
SetSourcePosition(expr->position());
@@ -1789,10 +1734,10 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
in_loop);
- __ Call(ic, mode);
+ EmitCallIC(ic, mode);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -1801,7 +1746,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
// Record source position for debugger.
SetSourcePosition(expr->position());
@@ -1811,7 +1756,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndApply(1, context_, rax);
+ context()->DropAndPlug(1, rax);
}
@@ -1825,14 +1770,14 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// resolve the function we need to call and the receiver of the
// call. The we call the resolved function using the given
// arguments.
- VisitForValue(fun, kStack);
+ VisitForStackValue(fun);
__ PushRoot(Heap::kUndefinedValueRootIndex); // Reserved receiver slot.
// Push the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
// Push copy of the function - found below the arguments.
@@ -1861,20 +1806,20 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ CallStub(&stub);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- DropAndApply(1, context_, rax);
+ context()->DropAndPlug(1, rax);
} else if (var != NULL && !var->is_this() && var->is_global()) {
// Call to a global variable.
// Push global object as receiver for the call IC lookup.
__ push(CodeGenerator::GlobalObject());
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
+ } else if (var != NULL && var->AsSlot() != NULL &&
+ var->AsSlot()->type() == Slot::LOOKUP) {
// Call to a lookup slot (dynamically introduced variable).
Label slow, done;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLoadFromSlotFastCase(var->slot(),
+ EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&slow,
&done);
@@ -1892,7 +1837,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// function and receiver and have the slow path jump around this
// code.
if (done.is_linked()) {
- Label call;
+ NearLabel call;
__ jmp(&call);
__ bind(&done);
// Push function.
@@ -1911,24 +1856,20 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
- VisitForValue(prop->obj(), kStack);
+ VisitForStackValue(prop->obj());
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use KeyedCallIC.
- VisitForValue(prop->obj(), kStack);
+ VisitForStackValue(prop->obj());
if (prop->is_synthetic()) {
- VisitForValue(prop->key(), kAccumulator);
+ VisitForAccumulatorValue(prop->key());
__ movq(rdx, Operand(rsp, 0));
// Record source code position for IC call.
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test rax,..."
- // instruction after the call as it is treated specially
- // by the LoadIC code.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Pop receiver.
__ pop(rbx);
// Push result (function).
@@ -1951,7 +1892,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
loop_depth() == 0) {
lit->set_try_full_codegen(true);
}
- VisitForValue(fun, kStack);
+ VisitForStackValue(fun);
// Load global receiver object.
__ movq(rbx, CodeGenerator::GlobalObject());
__ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
@@ -1970,13 +1911,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- VisitForValue(expr->expression(), kStack);
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
// Call the construct call builtin that handles allocation and
@@ -1989,59 +1930,59 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_true);
__ jmp(if_false);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
Condition positive_smi = __ CheckPositiveSmi(rax);
Split(positive_smi, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
@@ -2057,41 +1998,41 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
Split(below_equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
Split(above_equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
@@ -2099,7 +2040,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2107,80 +2048,80 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
// this compiler.
__ jmp(if_false);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
Split(equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
Split(equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
Split(equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2192,8 +2133,8 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
__ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@@ -2211,7 +2152,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Smi::FromInt(StackFrame::CONSTRUCT));
Split(equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2219,21 +2160,21 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ pop(rbx);
__ cmpq(rax, rbx);
Split(equal, if_true, if_false, fall_through);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -2242,19 +2183,19 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// ArgumentsAccessStub expects the key in rdx and the formal
// parameter count in rax.
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(scope()->num_parameters()));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
- Label exit;
+ NearLabel exit;
// Get the number of formal parameters.
__ Move(rax, Smi::FromInt(scope()->num_parameters()));
@@ -2270,7 +2211,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
__ bind(&exit);
if (FLAG_debug_code) __ AbortIfNotSmi(rax);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2278,7 +2219,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null.
__ JumpIfSmi(rax, &null);
@@ -2324,7 +2265,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// All done.
__ bind(&done);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2339,14 +2280,14 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2383,7 +2324,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rbx);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2391,11 +2332,11 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
ASSERT(args->length() == 3);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallStub(&stub);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2403,19 +2344,19 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
ASSERT(args->length() == 4);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
- VisitForValue(args->at(3), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
__ CallStub(&stub);
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator); // Load the object.
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
Label done;
// If the object is a smi return the object.
@@ -2426,25 +2367,25 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
__ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
__ bind(&done);
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
// Load the arguments on the stack and call the runtime function.
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
__ CallRuntime(Runtime::kMath_pow, 2);
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack); // Load the object.
- VisitForValue(args->at(1), kAccumulator); // Load the value.
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(rbx); // rax = value. rbx = object.
Label done;
@@ -2463,7 +2404,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
__ bind(&done);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2471,18 +2412,18 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label done;
StringCharFromCodeGenerator generator(rax, rbx);
@@ -2493,15 +2434,15 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, rbx);
+ context()->Plug(rbx);
}
void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
Register object = rbx;
Register index = rax;
@@ -2540,15 +2481,15 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, result);
+ context()->Plug(result);
}
void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
Register object = rbx;
Register index = rax;
@@ -2589,31 +2530,31 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, result);
+ context()->Plug(result);
}
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
StringAddStub stub(NO_STRING_ADD_FLAGS);
__ CallStub(&stub);
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
StringCompareStub stub;
__ CallStub(&stub);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2621,9 +2562,9 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN);
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
__ CallStub(&stub);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2631,18 +2572,18 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS);
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
__ CallStub(&stub);
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the runtime function.
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
+ VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2650,38 +2591,38 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // For receiver and function.
- VisitForValue(args->at(0), kStack); // Receiver.
+ VisitForStackValue(args->at(0)); // Receiver.
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i + 1), kStack);
+ VisitForStackValue(args->at(i + 1));
}
- VisitForValue(args->at(arg_count + 1), kAccumulator); // Function.
+ VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
// InvokeFunction requires function in rdi. Move it in there.
if (!result_register().is(rdi)) __ movq(rdi, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(rdi, count, CALL_FUNCTION);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kRegExpConstructResult, 3);
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kStack);
- VisitForValue(args->at(2), kStack);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kSwapElements, 3);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2696,11 +2637,11 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- Apply(context_, rax);
+ context()->Plug(rax);
return;
}
- VisitForValue(args->at(1), kAccumulator);
+ VisitForAccumulatorValue(args->at(1));
Register key = rax;
Register cache = rbx;
@@ -2713,7 +2654,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
__ movq(cache,
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
- Label done, not_found;
+ NearLabel done, not_found;
// tmp now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
@@ -2737,7 +2678,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
__ CallRuntime(Runtime::kGetFromCache, 2);
__ bind(&done);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2748,11 +2689,11 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
Register left = rbx;
Register tmp = rcx;
- VisitForValue(args->at(0), kStack);
- VisitForValue(args->at(1), kAccumulator);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
__ pop(left);
- Label done, fail, ok;
+ NearLabel done, fail, ok;
__ cmpq(left, right);
__ j(equal, &ok);
// Fail if either is a non-HeapObject.
@@ -2775,41 +2716,41 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ Move(rax, Factory::true_value());
__ bind(&done);
- Apply(context_, rax);
+ context()->Plug(rax);
}
void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ testl(FieldOperand(rax, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
__ j(zero, if_true);
__ jmp(if_false);
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kAccumulator);
+ VisitForAccumulatorValue(args->at(0));
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
__ IndexFromHash(rax, rax);
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2833,7 +2774,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- VisitForValue(args->at(i), kStack);
+ VisitForStackValue(args->at(i));
}
if (expr->is_jsruntime()) {
@@ -2841,13 +2782,13 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Move(rcx, expr->name());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- __ call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
__ CallRuntime(expr->function(), arg_count);
}
- Apply(context_, rax);
+ context()->Plug(rax);
}
@@ -2861,20 +2802,20 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
- Apply(context_, true);
+ context()->Plug(true);
} else if (var != NULL &&
!var->is_global() &&
- var->slot() != NULL &&
- var->slot()->type() != Slot::LOOKUP) {
+ var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- Apply(context_, false);
+ context()->Plug(false);
} else {
// Property or variable reference. Call the delete builtin with
// object and property name as arguments.
if (prop != NULL) {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
} else if (var->is_global()) {
__ push(CodeGenerator::GlobalObject());
__ Push(var->name());
@@ -2888,7 +2829,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ Push(var->name());
}
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- Apply(context_, rax);
+ context()->Plug(rax);
}
break;
}
@@ -2896,26 +2837,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
VisitForEffect(expr->expression());
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- break;
- case Expression::kValue:
- switch (location_) {
- case kAccumulator:
- __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
- break;
- case kStack:
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- break;
- }
- break;
- case Expression::kTest:
- __ jmp(false_label_);
- break;
- }
+ context()->Plug(Heap::kUndefinedValueRootIndex);
break;
}
@@ -2926,31 +2848,33 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label* if_false = NULL;
Label* fall_through = NULL;
// Notice that the labels are swapped.
- PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
- Apply(context_, if_false, if_true); // Labels swapped.
+ context()->Plug(if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- VisitForTypeofValue(expr->expression(), kStack);
+ { StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
__ CallRuntime(Runtime::kTypeof, 1);
- Apply(context_, rax);
+ context()->Plug(rax);
break;
}
case Token::ADD: {
Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForValue(expr->expression(), kAccumulator);
- Label no_conversion;
+ VisitForAccumulatorValue(expr->expression());
+ NearLabel no_conversion;
Condition is_smi = masm_->CheckSmi(result_register());
__ j(is_smi, &no_conversion);
__ push(result_register());
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
__ bind(&no_conversion);
- Apply(context_, result_register());
+ context()->Plug(result_register());
break;
}
@@ -2962,9 +2886,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register rax.
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
__ CallStub(&stub);
- Apply(context_, rax);
+ context()->Plug(rax);
break;
}
@@ -2972,7 +2896,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
// The generic unary operation stub expects the argument to be
// in the accumulator register rax.
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
Label done;
bool inline_smi_case = ShouldInlineSmiCase(expr->op());
if (inline_smi_case) {
@@ -2991,7 +2915,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
__ CallStub(&stub);
__ bind(&done);
- Apply(context_, rax);
+ context()->Plug(rax);
break;
}
@@ -3027,23 +2951,20 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
- Expression::kValue);
- location_ = saved_location;
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
} else {
// Reserve space for result of postfix operation.
- if (expr->is_postfix() && context_ != Expression::kEffect) {
+ if (expr->is_postfix() && !context()->IsEffect()) {
__ Push(Smi::FromInt(0));
}
if (assign_type == NAMED_PROPERTY) {
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForAccumulatorValue(prop->obj());
__ push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
__ push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
@@ -3051,7 +2972,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ NearLabel no_conversion;
Condition is_smi;
is_smi = masm_->CheckSmi(rax);
__ j(is_smi, &no_conversion);
@@ -3061,29 +2982,21 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Save result for postfix expressions.
if (expr->is_postfix()) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Do not save result.
- break;
- case Expression::kValue:
- case Expression::kTest:
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(rax);
- break;
- case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
- break;
- case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
- break;
- }
- break;
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(rax);
+ break;
+ case NAMED_PROPERTY:
+ __ movq(Operand(rsp, kPointerSize), rax);
+ break;
+ case KEYED_PROPERTY:
+ __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ break;
+ }
}
}
@@ -3120,35 +3033,32 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case VARIABLE:
if (expr->is_postfix()) {
// Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN,
- Expression::kEffect);
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ }
// For all contexts except kEffect: We have the result on
// top of the stack.
- if (context_ != Expression::kEffect) {
- ApplyTOS(context_);
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
}
} else {
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN,
- context_);
+ Token::ASSIGN);
}
break;
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->handle());
__ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
- if (context_ != Expression::kEffect) {
- ApplyTOS(context_);
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
}
} else {
- Apply(context_, rax);
+ context()->Plug(rax);
}
break;
}
@@ -3156,16 +3066,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(rcx);
__ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
- if (context_ != Expression::kEffect) {
- ApplyTOS(context_);
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
}
} else {
- Apply(context_, rax);
+ context()->Plug(rax);
}
break;
}
@@ -3173,8 +3080,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
+
if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
Comment cmnt(masm_, "Global variable");
__ Move(rcx, proxy->name());
@@ -3182,17 +3092,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ nop(); // Signal no inlined code.
- if (where == kStack) __ push(rax);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ context()->Plug(rax);
} else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
+ proxy->var()->AsSlot() != NULL &&
+ proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- Slot* slot = proxy->var()->slot();
+ Slot* slot = proxy->var()->AsSlot();
EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
@@ -3201,10 +3110,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
__ bind(&done);
- if (where == kStack) __ push(rax);
+ context()->Plug(rax);
} else {
// This expression cannot throw a reference error at the top level.
- VisitForValue(expr, where);
+ Visit(expr);
}
}
@@ -3226,7 +3135,10 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
Handle<String> check = Handle<String>::cast(right_literal_value);
- VisitForTypeofValue(left_unary->expression(), kAccumulator);
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(left_unary->expression());
+ }
+
if (check->Equals(Heap::number_symbol())) {
Condition is_smi = masm_->CheckSmi(rax);
__ j(is_smi, if_true);
@@ -3301,8 +3213,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3310,21 +3222,21 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Expression* left = expr->left();
Expression* right = expr->right();
if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
return;
}
- VisitForValue(expr->left(), kStack);
+ VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
- VisitForValue(expr->right(), kStack);
+ VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
- VisitForValue(expr->right(), kStack);
+ VisitForStackValue(expr->right());
InstanceofStub stub;
__ CallStub(&stub);
__ testq(rax, rax);
@@ -3334,7 +3246,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
default: {
- VisitForValue(expr->right(), kAccumulator);
+ VisitForAccumulatorValue(expr->right());
Condition cc = no_condition;
bool strict = false;
switch (op) {
@@ -3392,7 +3304,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Convert the result of the comparison into one expected for this
// expression's context.
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
@@ -3402,10 +3314,10 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
- VisitForValue(expr->expression(), kAccumulator);
+ VisitForAccumulatorValue(expr->expression());
__ CompareRoot(rax, Heap::kNullValueRootIndex);
if (expr->is_strict()) {
Split(equal, if_true, if_false, fall_through);
@@ -3421,20 +3333,46 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
}
- Apply(context_, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- Apply(context_, rax);
+ context()->Plug(rax);
}
-Register FullCodeGenerator::result_register() { return rax; }
+Register FullCodeGenerator::result_register() {
+ return rax;
+}
-Register FullCodeGenerator::context_register() { return rsi; }
+Register FullCodeGenerator::context_register() {
+ return rsi;
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+ ASSERT(mode == RelocInfo::CODE_TARGET ||
+ mode == RelocInfo::CODE_TARGET_CONTEXT);
+ __ call(ic, mode);
+
+ // If we're calling a (keyed) load or store stub, we have to mark
+ // the call as containing no inlined code so we will not attempt to
+ // patch it.
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ case Code::KEYED_LOAD_IC:
+ case Code::STORE_IC:
+ case Code::KEYED_STORE_IC:
+ __ nop(); // Signals no inlined code.
+ break;
+ default:
+ // Do nothing.
+ break;
+ }
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index a74e621e1..814da760c 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -730,7 +730,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
Label miss;
- Label index_out_of_range;
Register receiver = rdx;
Register index = rax;
@@ -745,7 +744,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
result,
&miss, // When not a string.
&miss, // When not a number.
- &index_out_of_range,
+ &miss, // When index out of range.
STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm);
__ ret(0);
@@ -753,10 +752,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret(0);
-
__ bind(&miss);
GenerateMiss(masm);
}
@@ -847,7 +842,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
- Label box_int;
+ NearLabel box_int;
__ JumpIfUIntNotValidSmiValue(rcx, &box_int);
@@ -898,8 +893,9 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &slow);
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &slow);
+ // Check that the key is an array index, that is Uint32.
+ STATIC_ASSERT(kSmiValueSize <= 32);
+ __ JumpIfNotPositiveSmi(rax, &slow);
// Get the map of the receiver.
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
@@ -1032,7 +1028,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// No more bailouts to slow case on this path, so key not needed.
__ SmiToInteger32(rdi, rax);
{ // Clamp the value to [0..255].
- Label done;
+ NearLabel done;
__ testl(rdi, Immediate(0xFFFFFF00));
__ j(zero, &done);
__ setcc(negative, rdi); // 1 if negative, 0 if positive.
@@ -1082,7 +1078,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rax: value
// rbx: receiver's elements array (a FixedArray)
// rcx: index
- Label non_smi_value;
+ NearLabel non_smi_value;
__ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
__ JumpIfNotSmi(rax, &non_smi_value);
@@ -1104,7 +1100,7 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, check_heap_number;
+ Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
@@ -1145,6 +1141,7 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
+ NearLabel check_heap_number;
__ JumpIfNotSmi(rax, &check_heap_number);
// No more branches to slow case on this path. Key and receiver not needed.
__ SmiToInteger32(rdx, rax);
@@ -1488,7 +1485,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- Label do_call, slow_call, slow_load, slow_reload_receiver;
+ Label do_call, slow_call, slow_load;
Label check_number_dictionary, check_string, lookup_monomorphic_cache;
Label index_smi, index_string;
@@ -1730,6 +1727,14 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
}
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+ Object* map,
+ Object* cell) {
+ // TODO(<bug#>): implement this.
+ return false;
+}
+
+
// The offset from the inlined patch site to the start of the inlined
// store instruction.
const int StoreIC::kOffsetToStoreInstruction = 20;
@@ -1880,7 +1885,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
- Label miss, restore_miss;
+ Label miss;
GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 165c51dd2..869986ebc 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -85,7 +85,7 @@ void MacroAssembler::RecordWriteHelper(Register object,
Register scratch) {
if (FLAG_debug_code) {
// Check that the object is not in new space.
- Label not_in_new_space;
+ NearLabel not_in_new_space;
InNewSpace(object, scratch, not_equal, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
@@ -171,7 +171,7 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
Label done;
if (FLAG_debug_code) {
- Label okay;
+ NearLabel okay;
JumpIfNotSmi(object, &okay);
Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
bind(&okay);
@@ -221,42 +221,6 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
}
}
-
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch) {
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask());
- and_(scratch, kScratchRegister);
- } else {
- movq(scratch, ExternalReference::new_space_mask());
- and_(scratch, object);
- }
- movq(kScratchRegister, ExternalReference::new_space_start());
- cmpq(scratch, kScratchRegister);
- j(cc, branch);
- } else {
- ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
- intptr_t new_space_start =
- reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
- if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
- } else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
- }
- and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
- j(cc, branch);
- }
-}
-
-
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (FLAG_debug_code) Check(cc, msg);
}
@@ -264,7 +228,7 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
void MacroAssembler::AssertFastElements(Register elements) {
if (FLAG_debug_code) {
- Label ok;
+ NearLabel ok;
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
j(equal, &ok);
@@ -278,7 +242,7 @@ void MacroAssembler::AssertFastElements(Register elements) {
void MacroAssembler::Check(Condition cc, const char* msg) {
- Label L;
+ NearLabel L;
j(cc, &L);
Abort(msg);
// will not return here
@@ -291,7 +255,7 @@ void MacroAssembler::CheckStackAlignment() {
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
+ NearLabel alignment_as_expected;
testq(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected);
// Abort if stack is not aligned.
@@ -304,7 +268,7 @@ void MacroAssembler::CheckStackAlignment() {
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
- Label ok;
+ NearLabel ok;
testl(result, result);
j(not_zero, &ok);
testl(op, op);
@@ -642,8 +606,6 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
-static int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
Register MacroAssembler::GetSmiConstant(Smi* source) {
int value = source->value();
if (value == 0) {
@@ -666,7 +628,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (allow_stub_calls()) {
Assert(equal, "Uninitialized kSmiConstantRegister");
} else {
- Label ok;
+ NearLabel ok;
j(equal, &ok);
int3();
bind(&ok);
@@ -716,20 +678,9 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
}
}
-void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
- ASSERT_EQ(0, kSmiTag);
- if (!dst.is(src)) {
- movl(dst, src);
- }
- shl(dst, Immediate(kSmiShift));
-}
-
-void MacroAssembler::Integer32ToSmi(Register dst,
- Register src,
- Label* on_overflow) {
+void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
ASSERT_EQ(0, kSmiTag);
- // 32-bit integer always fits in a long smi.
if (!dst.is(src)) {
movl(dst, src);
}
@@ -740,7 +691,7 @@ void MacroAssembler::Integer32ToSmi(Register dst,
void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
if (FLAG_debug_code) {
testb(dst, Immediate(0x01));
- Label ok;
+ NearLabel ok;
j(zero, &ok);
if (allow_stub_calls()) {
Abort("Integer32ToSmiField writing to non-smi location");
@@ -949,180 +900,6 @@ Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
}
-void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
- // Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
- j(not_equal, on_smi_result);
- movq(src, kScratchRegister);
- } else {
- movq(dst, src);
- neg(dst);
- cmpq(dst, src);
- // If the result is zero or Smi::kMinValue, negation failed to create a smi.
- j(not_equal, on_smi_result);
- }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!dst.is(src2));
- if (on_not_smi_result == NULL) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible.
- if (dst.is(src1)) {
- addq(dst, src2);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- }
- Assert(no_overflow, "Smi addition overflow");
- } else if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!dst.is(src2));
- if (on_not_smi_result == NULL) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- if (dst.is(src1)) {
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- }
- Assert(no_overflow, "Smi subtraction overflow");
- } else if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result);
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result) {
- if (on_not_smi_result == NULL) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- if (dst.is(src1)) {
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- }
- Assert(no_overflow, "Smi subtraction overflow");
- } else if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-void MacroAssembler::SmiMul(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!dst.is(src2));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
-
- if (dst.is(src1)) {
- Label failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, &failure);
-
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- Label correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result);
-
- movq(dst, kScratchRegister);
- xor_(dst, src2);
- j(positive, &zero_correct_result); // Result was positive zero.
-
- bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
-
- bind(&zero_correct_result);
- xor_(dst, dst);
-
- bind(&correct_result);
- } else {
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, on_not_smi_result);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- Label correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result);
- // One of src1 and src2 is zero, the check whether the other is
- // negative.
- movq(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
- j(negative, on_not_smi_result);
- bind(&correct_result);
- }
-}
-
-
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- ASSERT_EQ(0, kSmiTag);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1179,29 +956,6 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
}
-void MacroAssembler::SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
-
- LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- LoadSmiConstant(dst, constant);
- addq(dst, src);
- j(overflow, on_not_smi_result);
- }
-}
-
-
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1226,165 +980,48 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
}
-void MacroAssembler::SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- }
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ Register src2) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible.
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ addq(dst, src2);
} else {
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
- j(overflow, on_not_smi_result);
- }
+ movq(dst, src1);
+ addq(dst, src2);
}
+ Assert(no_overflow, "Smi addition overflow");
}
-void MacroAssembler::SmiDiv(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
-
- // Check for 0 divisor (result is +/-Infinity).
- Label positive_divisor;
- testq(src2, src2);
- j(zero, on_not_smi_result);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- // We need to rule out dividing Smi::kMinValue by -1, since that would
- // overflow in idiv and raise an exception.
- // We combine this with negative zero test (negative zero only happens
- // when dividing zero by a negative number).
-
- // We overshoot a little and go to slow case if we divide min-value
- // by any negative value, not just -1.
- Label safe_div;
- testl(rax, Immediate(0x7fffffff));
- j(not_zero, &safe_div);
- testq(src2, src2);
- if (src1.is(rax)) {
- j(positive, &safe_div);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
- } else {
- j(negative, on_not_smi_result);
- }
- bind(&safe_div);
-
- SmiToInteger32(src2, src2);
- // Sign extend src1 into edx:eax.
- cdq();
- idivl(src2);
- Integer32ToSmi(src2, src2);
- // Check that the remainder is zero.
- testl(rdx, rdx);
- if (src1.is(rax)) {
- Label smi_result;
- j(zero, &smi_result);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
- bind(&smi_result);
+void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible (e.g., subtracting two positive smis).
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ subq(dst, src2);
} else {
- j(not_zero, on_not_smi_result);
- }
- if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movq(dst, src1);
+ subq(dst, src2);
}
- Integer32ToSmi(dst, rax);
+ Assert(no_overflow, "Smi subtraction overflow");
}
-void MacroAssembler::SmiMod(Register dst,
+void MacroAssembler::SmiSub(Register dst,
Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
- ASSERT(!src1.is(src2));
-
- testq(src2, src2);
- j(zero, on_not_smi_result);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- SmiToInteger32(src2, src2);
-
- // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
- Label safe_div;
- cmpl(rax, Immediate(Smi::kMinValue));
- j(not_equal, &safe_div);
- cmpl(src2, Immediate(-1));
- j(not_equal, &safe_div);
- // Retag inputs and go slow case.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- jmp(on_not_smi_result);
- bind(&safe_div);
-
- // Sign extend eax into edx:eax.
- cdq();
- idivl(src2);
- // Restore smi tags on inputs.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ const Operand& src2) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible (e.g., subtracting two positive smis).
+ if (dst.is(src1)) {
+ subq(dst, src2);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
}
- // Check for a negative zero result. If the result is zero, and the
- // dividend is negative, go slow to return a floating point negative zero.
- Label smi_result;
- testl(rdx, rdx);
- j(not_zero, &smi_result);
- testq(src1, src1);
- j(negative, on_not_smi_result);
- bind(&smi_result);
- Integer32ToSmi(dst, rdx);
+ Assert(no_overflow, "Smi subtraction overflow");
}
@@ -1480,25 +1117,6 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
}
-void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
- Register src,
- int shift_value,
- Label* on_not_smi_result) {
- // Logic right shift interprets its result as an *unsigned* number.
- if (dst.is(src)) {
- UNIMPLEMENTED(); // Not used.
- } else {
- movq(dst, src);
- if (shift_value == 0) {
- testq(dst, dst);
- j(negative, on_not_smi_result);
- }
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
- }
-}
-
-
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
int shift_value) {
@@ -1515,7 +1133,7 @@ void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
Register src2) {
ASSERT(!dst.is(rcx));
- Label result_ok;
+ NearLabel result_ok;
// Untag shift amount.
if (!dst.is(src1)) {
movq(dst, src1);
@@ -1527,42 +1145,6 @@ void MacroAssembler::SmiShiftLeft(Register dst,
}
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- Label result_ok;
- if (src1.is(rcx) || src2.is(rcx)) {
- movq(kScratchRegister, rcx);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- Label positive_result;
- j(positive, &positive_result);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result);
- bind(&positive_result);
- } else {
- j(negative, on_not_smi_result); // src2 was zero and src1 negative.
- }
-}
-
-
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2) {
@@ -1590,44 +1172,6 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
}
-void MacroAssembler::SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(src1));
- ASSERT(!dst.is(src2));
- // Both operands must not be smis.
-#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
- }
-#endif
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(0, Smi::FromInt(0));
- movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
- testl(kScratchRegister, src2);
- // If non-zero then both are smis.
- j(not_zero, on_not_smis);
-
- // Exactly one operand is a smi.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
- // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
- // If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
- // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
- // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
@@ -1663,136 +1207,13 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
}
-void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
- ASSERT_EQ(0, kSmiTag);
- Condition smi = CheckSmi(src);
- j(smi, on_smi);
-}
-
-
-void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
- Condition smi = CheckSmi(src);
- j(NegateCondition(smi), on_not_smi);
-}
-
-
-void MacroAssembler::JumpIfNotPositiveSmi(Register src,
- Label* on_not_positive_smi) {
- Condition positive_smi = CheckPositiveSmi(src);
- j(NegateCondition(positive_smi), on_not_positive_smi);
-}
-
-
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
- Smi* constant,
- Label* on_equals) {
- SmiCompare(src, constant);
- j(equal, on_equals);
-}
-
-
-void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
- Condition is_valid = CheckInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid);
-}
-
-
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
- Label* on_invalid) {
- Condition is_valid = CheckUInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid);
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
- Label* on_not_both_smi) {
- Condition both_smi = CheckBothSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
- Label* on_not_both_smi) {
- Condition both_smi = CheckBothPositiveSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- Label* on_fail) {
- // Check that both objects are not smis.
- Condition either_smi = CheckEitherSmi(first_object, second_object);
- j(either_smi, on_fail);
-
- // Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
- movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label *failure) {
- if (!scratch.is(instance_type)) {
- movl(scratch, instance_type);
+void MacroAssembler::Move(Register dst, Register src) {
+ if (!dst.is(src)) {
+ movq(dst, src);
}
-
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
- andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- j(not_equal, failure);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* on_fail) {
- // Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
-
- // Check that both are flat ascii strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail);
-}
void MacroAssembler::Move(Register dst, Handle<Object> source) {
@@ -1903,7 +1324,6 @@ void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
- WriteRecordedPositions();
call(code_object, rmode);
}
@@ -1994,7 +1414,7 @@ void MacroAssembler::CheckMap(Register obj,
void MacroAssembler::AbortIfNotNumber(Register object) {
- Label ok;
+ NearLabel ok;
Condition is_smi = CheckSmi(object);
j(is_smi, &ok);
Cmp(FieldOperand(object, HeapObject::kMapOffset),
@@ -2005,14 +1425,14 @@ void MacroAssembler::AbortIfNotNumber(Register object) {
void MacroAssembler::AbortIfSmi(Register object) {
- Label ok;
+ NearLabel ok;
Condition is_smi = CheckSmi(object);
Assert(NegateCondition(is_smi), "Operand is a smi");
}
void MacroAssembler::AbortIfNotSmi(Register object) {
- Label ok;
+ NearLabel ok;
Condition is_smi = CheckSmi(object);
Assert(is_smi, "Operand is not a smi");
}
@@ -2052,7 +1472,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_equal, miss);
// Make sure that the function has an instance prototype.
- Label non_instance;
+ NearLabel non_instance;
testb(FieldOperand(result, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNonInstancePrototype));
j(not_zero, &non_instance);
@@ -2068,7 +1488,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(equal, miss);
// If the function does not have an initial map, we're done.
- Label done;
+ NearLabel done;
CmpObjectType(result, MAP_TYPE, kScratchRegister);
j(not_equal, &done);
@@ -2133,76 +1553,11 @@ void MacroAssembler::DebugBreak() {
#endif // ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
- Label* done,
- InvokeFlag flag) {
- bool definitely_matches = false;
- Label invoke;
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- Set(rax, actual.immediate());
- if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
- // Don't worry about adapting arguments for built-ins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- Set(rbx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
- j(equal, &invoke);
- ASSERT(expected.reg().is(rbx));
- Set(rax, actual.immediate());
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
- j(equal, &invoke);
- ASSERT(actual.reg().is(rax));
- ASSERT(expected.reg().is(rbx));
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
- }
-
- if (flag == CALL_FUNCTION) {
- Call(adaptor, RelocInfo::CODE_TARGET);
- jmp(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
- Label done;
+ NearLabel done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
if (flag == CALL_FUNCTION) {
call(code);
@@ -2219,7 +1574,7 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag) {
- Label done;
+ NearLabel done;
Register dummy = rax;
InvokePrologue(expected, actual, code, dummy, &done, flag);
if (flag == CALL_FUNCTION) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 9f5a74658..a8ffca918 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -91,10 +91,11 @@ class MacroAssembler: public Assembler {
// Check if object is in new space. The condition cc can be equal or
// not_equal. If it is equal a jump will be done if the object is on new
// space. The register scratch can be object itself, but it will be clobbered.
+ template <typename LabelType>
void InNewSpace(Register object,
Register scratch,
Condition cc,
- Label* branch);
+ LabelType* branch);
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
@@ -215,14 +216,9 @@ class MacroAssembler: public Assembler {
// Tag an integer value. The result must be known to be a valid smi value.
// Only uses the low 32 bits of the src register. Sets the N and Z flags
- // based on the value of the resulting integer.
+ // based on the value of the resulting smi.
void Integer32ToSmi(Register dst, Register src);
- // Tag an integer value if possible, or jump the integer value cannot be
- // represented as a smi. Only uses the low 32 bit of the src registers.
- // NOTICE: Destroys the dst register even if unsuccessful!
- void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
-
// Stores an integer32 value into a memory field that already holds a smi.
void Integer32ToSmiField(const Operand& dst, Register src);
@@ -300,30 +296,42 @@ class MacroAssembler: public Assembler {
// above with a conditional jump.
// Jump if the value cannot be represented by a smi.
- void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
+ template <typename LabelType>
+ void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
// Jump if the unsigned integer value cannot be represented by a smi.
- void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
+ template <typename LabelType>
+ void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
// Jump to label if the value is a tagged smi.
- void JumpIfSmi(Register src, Label* on_smi);
+ template <typename LabelType>
+ void JumpIfSmi(Register src, LabelType* on_smi);
// Jump to label if the value is not a tagged smi.
- void JumpIfNotSmi(Register src, Label* on_not_smi);
+ template <typename LabelType>
+ void JumpIfNotSmi(Register src, LabelType* on_not_smi);
// Jump to label if the value is not a positive tagged smi.
- void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
+ template <typename LabelType>
+ void JumpIfNotPositiveSmi(Register src, LabelType* on_not_smi);
// Jump to label if the value, which must be a tagged smi, has value equal
// to the constant.
- void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals);
+ template <typename LabelType>
+ void JumpIfSmiEqualsConstant(Register src,
+ Smi* constant,
+ LabelType* on_equals);
// Jump if either or both register are not smi values.
- void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
+ template <typename LabelType>
+ void JumpIfNotBothSmi(Register src1,
+ Register src2,
+ LabelType* on_not_both_smi);
// Jump if either or both register are not positive smi values.
+ template <typename LabelType>
void JumpIfNotBothPositiveSmi(Register src1, Register src2,
- Label* on_not_both_smi);
+ LabelType* on_not_both_smi);
// Operations on tagged smi values.
@@ -333,10 +341,11 @@ class MacroAssembler: public Assembler {
// Optimistically adds an integer constant to a supposed smi.
// If the src is not a smi, or the result is not a smi, jump to
// the label.
+ template <typename LabelType>
void SmiTryAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
@@ -348,10 +357,11 @@ class MacroAssembler: public Assembler {
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
+ template <typename LabelType>
void SmiAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result. No testing on the result is done. Sets the N and Z flags
@@ -360,60 +370,80 @@ class MacroAssembler: public Assembler {
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result, or jumping to a label if the result cannot be represented by a smi.
+ template <typename LabelType>
void SmiSubConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Negating a smi can give a negative zero or too large positive value.
// NOTICE: This operation jumps on success, not failure!
+ template <typename LabelType>
void SmiNeg(Register dst,
Register src,
- Label* on_smi_result);
+ LabelType* on_smi_result);
// Adds smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
+ template <typename LabelType>
void SmiAdd(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
+
+ void SmiAdd(Register dst,
+ Register src1,
+ Register src2);
// Subtracts smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
+ template <typename LabelType>
void SmiSub(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
+
+ void SmiSub(Register dst,
+ Register src1,
+ Register src2);
+ template <typename LabelType>
void SmiSub(Register dst,
Register src1,
const Operand& src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
+
+ void SmiSub(Register dst,
+ Register src1,
+ const Operand& src2);
// Multiplies smi values and return the result as a smi,
// if possible.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
+ template <typename LabelType>
void SmiMul(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Divides one smi by another and returns the quotient.
// Clobbers rax and rdx registers.
+ template <typename LabelType>
void SmiDiv(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Divides one smi by another and returns the remainder.
// Clobbers rax and rdx registers.
+ template <typename LabelType>
void SmiMod(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Bitwise operations.
void SmiNot(Register dst, Register src);
@@ -427,10 +457,11 @@ class MacroAssembler: public Assembler {
void SmiShiftLeftConstant(Register dst,
Register src,
int shift_value);
+ template <typename LabelType>
void SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
void SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value);
@@ -443,10 +474,11 @@ class MacroAssembler: public Assembler {
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
+ template <typename LabelType>
void SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result);
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result);
// Shifts a smi value to the right, sign extending the top, and
// returns the signed intepretation of the result. That will always
// be a valid smi value, since it's numerically smaller than the
@@ -460,10 +492,11 @@ class MacroAssembler: public Assembler {
// Select the non-smi register of two registers where exactly one is a
// smi. If neither are smis, jump to the failure label.
+ template <typename LabelType>
void SelectNonSmi(Register dst,
Register src1,
Register src2,
- Label* on_not_smis);
+ LabelType* on_not_smis);
// Converts, if necessary, a smi to a combination of number and
// multiplier to be used as a scaled index.
@@ -493,25 +526,29 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String macros.
+ template <typename LabelType>
void JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
Register scratch1,
Register scratch2,
- Label* on_not_both_flat_ascii);
+ LabelType* on_not_both_flat_ascii);
// Check whether the instance type represents a flat ascii string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
- Register scratch,
- Label *on_not_flat_ascii_string);
+ template <typename LabelType>
+ void JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ LabelType *on_not_flat_ascii_string);
+ template <typename LabelType>
void JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first_object_instance_type,
Register second_object_instance_type,
Register scratch1,
Register scratch2,
- Label* on_fail);
+ LabelType* on_fail);
// ---------------------------------------------------------------------------
// Macro instructions.
@@ -520,6 +557,9 @@ class MacroAssembler: public Assembler {
void Set(Register dst, int64_t x);
void Set(const Operand& dst, int64_t x);
+ // Move if the registers are not identical.
+ void Move(Register target, Register source);
+
// Handle support
void Move(Register dst, Handle<Object> source);
void Move(const Operand& dst, Handle<Object> source);
@@ -865,11 +905,12 @@ class MacroAssembler: public Assembler {
Handle<Object> code_object_;
// Helper functions for generating invokes.
+ template <typename LabelType>
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_register,
- Label* done,
+ LabelType* done,
InvokeFlag flag);
// Activation support.
@@ -961,6 +1002,697 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
+// -----------------------------------------------------------------------------
+// Template implementations.
+
+static int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+
+template <typename LabelType>
+void MacroAssembler::SmiNeg(Register dst,
+ Register src,
+ LabelType* on_smi_result) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ movq(kScratchRegister, src);
+ neg(dst); // Low 32 bits are retained as zero by negation.
+ // Test if result is zero or Smi::kMinValue.
+ cmpq(dst, kScratchRegister);
+ j(not_equal, on_smi_result);
+ movq(src, kScratchRegister);
+ } else {
+ movq(dst, src);
+ neg(dst);
+ cmpq(dst, src);
+ // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+ j(not_equal, on_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ } else {
+ movq(dst, src1);
+ addq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ cmpq(dst, src2);
+ j(overflow, on_not_smi_result);
+ subq(dst, src2);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ const Operand& src2,
+ LabelType* on_not_smi_result) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src2);
+ cmpq(src1, kScratchRegister);
+ j(overflow, on_not_smi_result);
+ subq(src1, kScratchRegister);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiMul(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!dst.is(src2));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+
+ if (dst.is(src1)) {
+ NearLabel failure, zero_correct_result;
+ movq(kScratchRegister, src1); // Create backup for later testing.
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, &failure);
+
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ NearLabel correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result);
+
+ movq(dst, kScratchRegister);
+ xor_(dst, src2);
+ j(positive, &zero_correct_result); // Result was positive zero.
+
+ bind(&failure); // Reused failure exit, restores src1.
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+
+ bind(&zero_correct_result);
+ xor_(dst, dst);
+
+ bind(&correct_result);
+ } else {
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, on_not_smi_result);
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ NearLabel correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result);
+ // One of src1 and src2 is zero, the check whether the other is
+ // negative.
+ movq(kScratchRegister, src1);
+ xor_(kScratchRegister, src2);
+ j(negative, on_not_smi_result);
+ bind(&correct_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiTryAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ LabelType* on_not_smi_result) {
+ // Does not assume that src is a smi.
+ ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src.is(kScratchRegister));
+
+ JumpIfNotSmi(src, on_not_smi_result);
+ Register tmp = (dst.is(src) ? kScratchRegister : dst);
+ LoadSmiConstant(tmp, constant);
+ addq(tmp, src);
+ j(overflow, on_not_smi_result);
+ if (dst.is(src)) {
+ movq(dst, tmp);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ LabelType* on_not_smi_result) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ LoadSmiConstant(kScratchRegister, constant);
+ addq(kScratchRegister, src);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ } else {
+ LoadSmiConstant(dst, constant);
+ addq(dst, src);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSubConstant(Register dst,
+ Register src,
+ Smi* constant,
+ LabelType* on_not_smi_result) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ } else {
+ // Subtract by adding the negation.
+ LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
+ addq(kScratchRegister, dst);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ }
+ } else {
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result);
+ LoadSmiConstant(dst, constant);
+ // Adding and subtracting the min-value gives the same result, it only
+ // differs on the overflow bit, which we don't check here.
+ addq(dst, src);
+ } else {
+ // Subtract by adding the negation.
+ LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+ addq(dst, src);
+ j(overflow, on_not_smi_result);
+ }
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiDiv(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+
+ // Check for 0 divisor (result is +/-Infinity).
+ NearLabel positive_divisor;
+ testq(src2, src2);
+ j(zero, on_not_smi_result);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ // We need to rule out dividing Smi::kMinValue by -1, since that would
+ // overflow in idiv and raise an exception.
+ // We combine this with negative zero test (negative zero only happens
+ // when dividing zero by a negative number).
+
+ // We overshoot a little and go to slow case if we divide min-value
+ // by any negative value, not just -1.
+ NearLabel safe_div;
+ testl(rax, Immediate(0x7fffffff));
+ j(not_zero, &safe_div);
+ testq(src2, src2);
+ if (src1.is(rax)) {
+ j(positive, &safe_div);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+ } else {
+ j(negative, on_not_smi_result);
+ }
+ bind(&safe_div);
+
+ SmiToInteger32(src2, src2);
+ // Sign extend src1 into edx:eax.
+ cdq();
+ idivl(src2);
+ Integer32ToSmi(src2, src2);
+ // Check that the remainder is zero.
+ testl(rdx, rdx);
+ if (src1.is(rax)) {
+ NearLabel smi_result;
+ j(zero, &smi_result);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+ bind(&smi_result);
+ } else {
+ j(not_zero, on_not_smi_result);
+ }
+ if (!dst.is(src1) && src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ Integer32ToSmi(dst, rax);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiMod(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+ ASSERT(!src1.is(src2));
+
+ testq(src2, src2);
+ j(zero, on_not_smi_result);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ SmiToInteger32(src2, src2);
+
+ // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+ NearLabel safe_div;
+ cmpl(rax, Immediate(Smi::kMinValue));
+ j(not_equal, &safe_div);
+ cmpl(src2, Immediate(-1));
+ j(not_equal, &safe_div);
+ // Retag inputs and go slow case.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ jmp(on_not_smi_result);
+ bind(&safe_div);
+
+ // Sign extend eax into edx:eax.
+ cdq();
+ idivl(src2);
+ // Restore smi tags on inputs.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ // Check for a negative zero result. If the result is zero, and the
+ // dividend is negative, go slow to return a floating point negative zero.
+ NearLabel smi_result;
+ testl(rdx, rdx);
+ j(not_zero, &smi_result);
+ testq(src1, src1);
+ j(negative, on_not_smi_result);
+ bind(&smi_result);
+ Integer32ToSmi(dst, rdx);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiShiftLogicalRightConstant(
+ Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
+ // Logic right shift interprets its result as an *unsigned* number.
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ movq(dst, src);
+ if (shift_value == 0) {
+ testq(dst, dst);
+ j(negative, on_not_smi_result);
+ }
+ shr(dst, Immediate(shift_value + kSmiShift));
+ shl(dst, Immediate(kSmiShift));
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(rcx));
+ NearLabel result_ok;
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ SmiToInteger32(rcx, src2);
+ orl(rcx, Immediate(kSmiShift));
+ shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
+ shl(dst, Immediate(kSmiShift));
+ testq(dst, dst);
+ if (src1.is(rcx) || src2.is(rcx)) {
+ NearLabel positive_result;
+ j(positive, &positive_result);
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ jmp(on_not_smi_result);
+ bind(&positive_result);
+ } else {
+ j(negative, on_not_smi_result); // src2 was zero and src1 negative.
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SelectNonSmi(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smis) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(src1));
+ ASSERT(!dst.is(src2));
+ // Both operands must not be smis.
+#ifdef DEBUG
+ if (allow_stub_calls()) { // Check contains a stub call.
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+ }
+#endif
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ movl(kScratchRegister, Immediate(kSmiTagMask));
+ and_(kScratchRegister, src1);
+ testl(kScratchRegister, src2);
+ // If non-zero then both are smis.
+ j(not_zero, on_not_smis);
+
+ // Exactly one operand is a smi.
+ ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+ // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+ subq(kScratchRegister, Immediate(1));
+ // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+ movq(dst, src1);
+ xor_(dst, src2);
+ and_(dst, kScratchRegister);
+ // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+ xor_(dst, src1);
+ // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
+ ASSERT_EQ(0, kSmiTag);
+ Condition smi = CheckSmi(src);
+ j(smi, on_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
+ Condition smi = CheckSmi(src);
+ j(NegateCondition(smi), on_not_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+ LabelType* on_not_positive_smi) {
+ Condition positive_smi = CheckPositiveSmi(src);
+ j(NegateCondition(positive_smi), on_not_positive_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+ Smi* constant,
+ LabelType* on_equals) {
+ SmiCompare(src, constant);
+ j(equal, on_equals);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotValidSmiValue(Register src,
+ LabelType* on_invalid) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+ LabelType* on_invalid) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+ Register src2,
+ LabelType* on_not_both_smi) {
+ Condition both_smi = CheckBothSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1,
+ Register src2,
+ LabelType* on_not_both_smi) {
+ Condition both_smi = CheckBothPositiveSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ LabelType* on_fail) {
+ // Check that both objects are not smis.
+ Condition either_smi = CheckEitherSmi(first_object, second_object);
+ j(either_smi, on_fail);
+
+ // Load instance type for both strings.
+ movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ LabelType *failure) {
+ if (!scratch.is(instance_type)) {
+ movl(scratch, instance_type);
+ }
+
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+ andl(scratch, Immediate(kFlatAsciiStringMask));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ j(not_equal, failure);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ LabelType* on_fail) {
+ // Load instance type for both strings.
+ movq(scratch1, first_object_instance_type);
+ movq(scratch2, second_object_instance_type);
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ LabelType* branch) {
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ if (scratch.is(object)) {
+ movq(kScratchRegister, ExternalReference::new_space_mask());
+ and_(scratch, kScratchRegister);
+ } else {
+ movq(scratch, ExternalReference::new_space_mask());
+ and_(scratch, object);
+ }
+ movq(kScratchRegister, ExternalReference::new_space_start());
+ cmpq(scratch, kScratchRegister);
+ j(cc, branch);
+ } else {
+ ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+ intptr_t new_space_start =
+ reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
+ movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+ if (scratch.is(object)) {
+ addq(scratch, kScratchRegister);
+ } else {
+ lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ }
+ and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+ j(cc, branch);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_register,
+ LabelType* done,
+ InvokeFlag flag) {
+ bool definitely_matches = false;
+ NearLabel invoke;
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ Set(rax, actual.immediate());
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for built-ins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ Set(rbx, expected.immediate());
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ // Expected is in register, actual is immediate. This is the
+ // case when we invoke function values without going through the
+ // IC mechanism.
+ cmpq(expected.reg(), Immediate(actual.immediate()));
+ j(equal, &invoke);
+ ASSERT(expected.reg().is(rbx));
+ Set(rax, actual.immediate());
+ } else if (!expected.reg().is(actual.reg())) {
+ // Both expected and actual are in (different) registers. This
+ // is the case when we invoke functions using call and apply.
+ cmpq(expected.reg(), actual.reg());
+ j(equal, &invoke);
+ ASSERT(actual.reg().is(rax));
+ ASSERT(expected.reg().is(rbx));
+ }
+ }
+
+ if (!definitely_matches) {
+ Handle<Code> adaptor =
+ Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ if (!code_constant.is_null()) {
+ movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+ addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ } else if (!code_register.is(rdx)) {
+ movq(rdx, code_register);
+ }
+
+ if (flag == CALL_FUNCTION) {
+ Call(adaptor, RelocInfo::CODE_TARGET);
+ jmp(done);
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(&invoke);
+ }
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 765a90c02..eb48da9a9 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -216,7 +216,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype) {
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ // Check we're still in the same context.
+ __ Move(prototype, Top::global());
+ __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)),
+ prototype);
+ __ j(not_equal, miss);
// Get the global function with the given index.
JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
@@ -964,7 +969,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax);
+ masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, rdi, name, &miss);
}
@@ -983,7 +988,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax);
+ masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, rdi, name, &miss);
}
@@ -1004,7 +1009,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax);
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, rdi, name, &miss);
}
@@ -1358,7 +1363,8 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- rax);
+ rax,
+ &miss);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, rdi, name, &miss);
@@ -1429,7 +1435,8 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- rax);
+ rax,
+ &miss);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, rdi, name, &miss);
@@ -1541,6 +1548,119 @@ Object* CallStubCompiler::CompileStringFromCharCodeCall(
}
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // TODO(872): implement this.
+ return Heap::undefined_value();
+}
+
+
+Object* CallStubCompiler::CompileMathAbsCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rcx : function name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
+ __ JumpIfSmi(rdx, &miss);
+
+ CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into rax.
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(rax, &not_smi);
+ __ SmiToInteger32(rax, rax);
+
+ // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
+ // otherwise.
+ __ movl(rbx, rax);
+ __ sarl(rbx, Immediate(kBitsPerInt - 1));
+
+ // Do bitwise not or do nothing depending on ebx.
+ __ xorl(rax, rbx);
+
+ // Add 1 or do nothing depending on ebx.
+ __ subl(rax, rbx);
+
+ // If the result is still negative, go to the slow case.
+ // This only happens for the most negative smi.
+ Label slow;
+ __ j(negative, &slow);
+
+ // Smi case done.
+ __ Integer32ToSmi(rax, rax);
+ __ ret(2 * kPointerSize);
+
+ // Check if the argument is a heap number and load its value.
+ __ bind(&not_smi);
+ __ CheckMap(rax, Factory::heap_number_map(), &slow, true);
+ __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+
+ // Check the sign of the argument. If the argument is positive,
+ // just return it.
+ Label negative_sign;
+ const int sign_mask_shift =
+ (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
+ __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
+ RelocInfo::NONE);
+ __ testq(rbx, rdi);
+ __ j(not_zero, &negative_sign);
+ __ ret(2 * kPointerSize);
+
+ // If the argument is negative, clear the sign, and return a new
+ // number. We still have the sign mask in rdi.
+ __ bind(&negative_sign);
+ __ xor_(rbx, rdi);
+ __ AllocateHeapNumber(rax, rdx, &slow);
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
+ __ ret(2 * kPointerSize);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+ __ bind(&miss);
+ // rcx: function name.
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+}
+
+
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
@@ -1684,7 +1804,10 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx, rdi,
callback, name, &miss, &failure);
- if (!success) return failure;
+ if (!success) {
+ miss.Unuse();
+ return failure;
+ }
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1740,7 +1863,10 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
name,
rdx,
&miss);
- if (cell->IsFailure()) return cell;
+ if (cell->IsFailure()) {
+ miss.Unuse();
+ return cell;
+ }
}
// Return undefined if maps of the full prototype chain are still the
@@ -1845,12 +1971,12 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
+ __ IncrementCounter(&Counters::named_load_global_stub, 1);
__ movq(rax, rbx);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+ __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
@@ -1878,7 +2004,10 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi,
callback, name, &miss, &failure);
- if (!success) return failure;
+ if (!success) {
+ miss.Unuse();
+ return failure;
+ }
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1);
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index 88e7cc881..e88a993b2 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -259,7 +259,7 @@ void VirtualFrame::Push(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL) {
- Slot* slot = proxy->var()->slot();
+ Slot* slot = proxy->var()->AsSlot();
if (slot->type() == Slot::LOCAL) {
PushLocalAt(slot->index());
return;