summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-06-15 14:52:00 -0700
committerRyan Dahl <ry@tinyclouds.org>2010-06-15 14:52:00 -0700
commitbc76624ec72792b23b4e951ef1ee8b95b454aa34 (patch)
treec5012b8980ec1b7e00be02b87da720a5e55abf7a
parentf3cd7bbe778bb69251637d49a33ddacc0973e973 (diff)
downloadnode-new-bc76624ec72792b23b4e951ef1ee8b95b454aa34.tar.gz
Upgrade V8 to 2.2.17
-rw-r--r--deps/v8/ChangeLog11
-rw-r--r--deps/v8/SConstruct5
-rw-r--r--deps/v8/src/arm/codegen-arm.cc131
-rw-r--r--deps/v8/src/arm/codegen-arm.h15
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc6
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc15
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h14
-rw-r--r--deps/v8/src/debug.cc34
-rw-r--r--deps/v8/src/debug.h3
-rw-r--r--deps/v8/src/flag-definitions.h2
-rw-r--r--deps/v8/src/full-codegen.cc2
-rw-r--r--deps/v8/src/full-codegen.h2
-rw-r--r--deps/v8/src/heap-inl.h9
-rw-r--r--deps/v8/src/heap.cc6
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc101
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h10
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc6
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc361
-rw-r--r--deps/v8/src/ic.cc24
-rw-r--r--deps/v8/src/ic.h2
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc2
-rw-r--r--deps/v8/src/mirror-debugger.js50
-rw-r--r--deps/v8/src/profile-generator-inl.h11
-rw-r--r--deps/v8/src/profile-generator.cc793
-rw-r--r--deps/v8/src/profile-generator.h309
-rw-r--r--deps/v8/src/runtime.cc55
-rw-r--r--deps/v8/src/spaces-inl.h27
-rw-r--r--deps/v8/src/spaces.h1
-rw-r--r--deps/v8/src/v8-counters.h8
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/codegen-x64.cc79
-rw-r--r--deps/v8/src/x64/codegen-x64.h16
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc6
-rw-r--r--deps/v8/test/cctest/test-heap.cc2
-rw-r--r--deps/v8/test/mjsunit/debug-return-value.js163
-rw-r--r--deps/v8/test/mjsunit/keyed-call-generic.js96
36 files changed, 2142 insertions, 237 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index e277c7a1ad..941c314a7e 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,6 +1,13 @@
-2010-06-07: Version 2.2.16
+2010-06-14: Version 2.2.17
- Remove the SetExternalStringDiposeCallback API. Changed the
+ Improved debugger support for stepping out of functions.
+
+ Incremental performance improvements.
+
+
+2010-06-09: Version 2.2.16
+
+ Removed the SetExternalStringDiposeCallback API. Changed the
disposal of external string resources to call a virtual Dispose
method on the resource.
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index b8de1b81a9..8af97d6870 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -781,6 +781,7 @@ def GetOptions():
result = Options()
result.Add('mode', 'compilation mode (debug, release)', 'release')
result.Add('sample', 'build sample (shell, process, lineprocessor)', '')
+ result.Add('cache', 'directory to use for scons build cache', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
for (name, option) in SIMPLE_OPTIONS.iteritems():
@@ -862,6 +863,8 @@ def VerifyOptions(env):
Abort("Shared Object soname not applicable for static library.")
if env['os'] != 'win32' and env['pgo'] != 'off':
Abort("Profile guided optimization only supported on Windows.")
+ if env['cache'] and not os.path.isdir(env['cache']):
+ Abort("The specified cache directory does not exist.")
if not (env['arch'] == 'arm' or env['simulator'] == 'arm') and ('unalignedaccesses' in ARGUMENTS):
print env['arch']
print env['simulator']
@@ -1130,6 +1133,8 @@ def Build():
else:
env.Default('library')
+ if env['cache']:
+ CacheDir(env['cache'])
# We disable deprecation warnings because we need to be able to use
# env.Copy without getting warnings for compatibility with older
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 8d4bf14c98..1ca236d117 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -5087,18 +5087,28 @@ void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and jump to the runtime.
Load(args->at(0));
- frame_->CallRuntime(Runtime::kMath_sin, 1);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ frame_->SpillAllButCopyTOSToR0();
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kMath_sin, 1);
+ }
frame_->EmitPush(r0);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and jump to the runtime.
Load(args->at(0));
- frame_->CallRuntime(Runtime::kMath_cos, 1);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
+ frame_->SpillAllButCopyTOSToR0();
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kMath_cos, 1);
+ }
frame_->EmitPush(r0);
}
@@ -7090,7 +7100,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
CpuFeatures::Scope scope(VFP3);
__ CheckMap(object,
scratch1,
- Factory::heap_number_map(),
+ Heap::kHeapNumberMapRootIndex,
not_found,
true);
@@ -8236,6 +8246,110 @@ Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
}
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Argument is a number and is on stack and in r0.
+ Label runtime_call;
+ Label input_not_smi;
+ Label loaded;
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Load argument and check if it is a smi.
+ __ BranchOnNotSmi(r0, &input_not_smi);
+
+ CpuFeatures::Scope scope(VFP3);
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into r2, r3.
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ b(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(r0,
+ r1,
+ Heap::kHeapNumberMapRootIndex,
+ &runtime_call,
+ true);
+ // Input is a HeapNumber. Load it to a double register and store the
+ // low and high words into r2, r3.
+ __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
+
+ __ bind(&loaded);
+ // r2 = low 32 bits of double value
+ // r3 = high 32 bits of double value
+ // Compute hash:
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ eor(r1, r2, Operand(r3));
+ __ eor(r1, r1, Operand(r1, LSR, 16));
+ __ eor(r1, r1, Operand(r1, LSR, 8));
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ const int kTranscendentalCacheSizeBits = 9;
+ ASSERT_EQ(1 << kTranscendentalCacheSizeBits,
+ TranscendentalCache::kCacheSize);
+ __ ubfx(r1, r1, 0, kTranscendentalCacheSizeBits);
+ } else {
+ __ and_(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
+ }
+
+ // r2 = low 32 bits of double value.
+ // r3 = high 32 bits of double value.
+ // r1 = TranscendentalCache::hash(double value).
+ __ mov(r0,
+ Operand(ExternalReference::transcendental_cache_array_address()));
+ // r0 points to cache array.
+ __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // r0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ cmp(r0, Operand(0));
+ __ b(eq, &runtime_call);
+
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+
+ // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
+ __ add(r1, r1, Operand(r1, LSL, 1));
+ __ add(r0, r0, Operand(r1, LSL, 2));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
+ __ cmp(r2, r4);
+ __ b(ne, &runtime_call);
+ __ cmp(r3, r5);
+ __ b(ne, &runtime_call);
+ // Cache hit. Load result, pop argument and return.
+ __ mov(r0, Operand(r6));
+ __ pop();
+ __ Ret();
+ }
+
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
void StackCheckStub::Generate(MacroAssembler* masm) {
// Do tail-call to runtime routine. Runtime routines expect at least one
// argument, so give it a Smi.
@@ -9550,8 +9664,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, scratch_,
- Factory::heap_number_map(), index_not_number_, true);
+ __ CheckMap(index_,
+ scratch_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ true);
call_helper.BeforeCall(masm);
__ Push(object_, index_);
__ push(index_); // Consumed by runtime conversion function.
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index cabdf007b9..91adff0f66 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -589,6 +589,21 @@ class CodeGenerator: public AstVisitor {
};
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+};
+
+
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index e2cd84d60b..e61966399d 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -196,11 +196,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
// body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
- EmitReturnSequence(function()->end_position());
+ EmitReturnSequence();
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ b(&return_label_);
@@ -224,7 +224,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
- CodeGenerator::RecordPositions(masm_, position);
+ CodeGenerator::RecordPositions(masm_, function()->end_position());
__ RecordJSReturn();
masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 2a4f354d9f..6292b58157 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -1199,6 +1199,21 @@ void MacroAssembler::CheckMap(Register obj,
}
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ bool is_heap_object) {
+ if (!is_heap_object) {
+ BranchOnSmi(obj, fail);
+ }
+ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ LoadRoot(ip, index);
+ cmp(scratch, ip);
+ b(ne, fail);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 4bfabb30cf..87f7b5feef 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -401,15 +401,23 @@ class MacroAssembler: public Assembler {
InstanceType type);
- // Check if the map of an object is equal to a specified map and
- // branch to label if not. Skip the smi check if not required
- // (object is known to be a heap object)
+ // Check if the map of an object is equal to a specified map (either
+ // given directly or as an index into the root list) and branch to
+ // label if not. Skip the smi check if not required (object is known
+ // to be a heap object)
void CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
bool is_heap_object);
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ bool is_heap_object);
+
+
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// Returns a condition that will be enabled if the object was a string.
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index ca20a65b65..98e366c7bc 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -1713,6 +1713,40 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
}
+bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
+ HandleScope scope;
+
+ // Get the executing function in which the debug break occurred.
+ Handle<SharedFunctionInfo> shared =
+ Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+ if (!EnsureDebugInfo(shared)) {
+ // Return if we failed to retrieve the debug info.
+ return false;
+ }
+ Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+ Handle<Code> code(debug_info->code());
+#ifdef DEBUG
+ // Get the code which is actually executing.
+ Handle<Code> frame_code(frame->code());
+ ASSERT(frame_code.is_identical_to(code));
+#endif
+
+ // Find the call address in the running code.
+ Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
+
+ // Check if the location is at JS return.
+ RelocIterator it(debug_info->code());
+ while (!it.done()) {
+ if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
+ return (it.rinfo()->pc() ==
+ addr - Assembler::kPatchReturnSequenceAddressOffset);
+ }
+ it.next();
+ }
+ return false;
+}
+
+
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id) {
thread_local_.frames_are_dropped_ = true;
thread_local_.break_frame_id_ = new_break_frame_id;
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 0e12c76f05..1c674711d9 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -270,6 +270,9 @@ class Debug {
// Check whether a global object is the debug global object.
static bool IsDebugGlobal(GlobalObject* global);
+ // Check whether this frame is just about to return.
+ static bool IsBreakAtReturn(JavaScriptFrame* frame);
+
// Fast check to see if any break points are active.
inline static bool has_break_points() { return has_break_points_; }
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index dc8efcc106..91477f9abd 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -191,6 +191,8 @@ DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
+DEFINE_bool(flush_code, false,
+ "flush code that we expect not to use again before full gc")
// v8.cc
DEFINE_bool(use_idle_notification, true,
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 6316158cd9..b64a179094 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -1077,7 +1077,7 @@ void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
}
__ Drop(stack_depth);
- EmitReturnSequence(stmt->statement_pos());
+ EmitReturnSequence();
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 0ce600b9f6..3d562324a0 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -389,7 +389,7 @@ class FullCodeGenerator: public AstVisitor {
FunctionLiteral* function);
// Platform-specific return sequence
- void EmitReturnSequence(int position);
+ void EmitReturnSequence();
// Platform-specific code sequences for calls
void EmitCallWithStub(Call* expr);
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index cc558b8248..5cb24eec2f 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -196,12 +196,9 @@ void Heap::RecordWrite(Address address, int offset) {
void Heap::RecordWrites(Address address, int start, int len) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
- for (int offset = start;
- offset < start + len * kPointerSize;
- offset += kPointerSize) {
- SLOW_ASSERT(Contains(address + offset));
- Page::FromAddress(address)->MarkRegionDirty(address + offset);
- }
+ Page* page = Page::FromAddress(address);
+ page->SetRegionMarks(page->GetRegionMarks() |
+ page->GetRegionMaskForSpan(address + start, len * kPointerSize));
}
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 3115cb31d6..3fc7d02bc8 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -605,8 +605,10 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
EnsureFromSpaceIsCommitted();
if (collector == MARK_COMPACTOR) {
- // Flush all potentially unused code.
- FlushCode();
+ if (FLAG_flush_code) {
+ // Flush all potentially unused code.
+ FlushCode();
+ }
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 603f9135ff..29b6c69192 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -1446,10 +1446,40 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
}
-static void CheckTwoForSminess(MacroAssembler* masm,
- Register left, Register right, Register scratch,
- TypeInfo left_info, TypeInfo right_info,
- DeferredInlineBinaryOperation* deferred);
+void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ Register scratch,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred) {
+ if (left.is(right)) {
+ if (!left_info.IsSmi()) {
+ __ test(left, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(left);
+ }
+ } else if (!left_info.IsSmi()) {
+ if (!right_info.IsSmi()) {
+ __ mov(scratch, left);
+ __ or_(scratch, Operand(right));
+ __ test(scratch, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ } else {
+ __ test(left, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ if (FLAG_debug_code) __ AbortIfNotSmi(right);
+ }
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(left);
+ if (!right_info.IsSmi()) {
+ __ test(right, Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(right);
+ }
+ }
+}
// Implements a binary operation using a deferred code object and some
@@ -1539,19 +1569,11 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left_type_info,
right_type_info,
overwrite_mode);
- if (left->reg().is(right->reg())) {
- __ test(left->reg(), Immediate(kSmiTagMask));
- } else {
- // Use the quotient register as a scratch for the tag check.
- if (!left_is_in_eax) __ mov(eax, left->reg());
- left_is_in_eax = false; // About to destroy the value in eax.
- __ or_(eax, Operand(right->reg()));
- ASSERT(kSmiTag == 0); // Adjust test if not the case.
- __ test(eax, Immediate(kSmiTagMask));
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), edx,
+ left_type_info, right_type_info, deferred);
+ if (!left_is_in_eax) {
+ __ mov(eax, left->reg());
}
- deferred->Branch(not_zero);
-
- if (!left_is_in_eax) __ mov(eax, left->reg());
// Sign extend eax into edx:eax.
__ cdq();
// Check for 0 divisor.
@@ -1674,8 +1696,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
__ cmp(answer.reg(), 0xc0000000);
deferred->Branch(negative);
} else {
- CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info, deferred);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
+ left_type_info, right_type_info, deferred);
// Untag both operands.
__ mov(answer.reg(), left->reg());
@@ -1751,8 +1773,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left_type_info,
right_type_info,
overwrite_mode);
- CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info, deferred);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
+ left_type_info, right_type_info, deferred);
__ mov(answer.reg(), left->reg());
switch (op) {
@@ -8976,40 +8998,6 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
#define __ ACCESS_MASM(masm)
-static void CheckTwoForSminess(MacroAssembler* masm,
- Register left, Register right, Register scratch,
- TypeInfo left_info, TypeInfo right_info,
- DeferredInlineBinaryOperation* deferred) {
- if (left.is(right)) {
- if (!left_info.IsSmi()) {
- __ test(left, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- __ mov(scratch, left);
- __ or_(scratch, Operand(right));
- __ test(scratch, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- __ test(left, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
- if (!right_info.IsSmi()) {
- __ test(right, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
- }
- }
-}
-
-
Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED);
Property* property = expression_->AsProperty();
@@ -10302,13 +10290,14 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ mov(ecx, ebx);
__ xor_(ecx, Operand(edx));
__ mov(eax, ecx);
- __ sar(eax, 16);
+ __ shr(eax, 16);
__ xor_(ecx, Operand(eax));
__ mov(eax, ecx);
- __ sar(eax, 8);
+ __ shr(eax, 8);
__ xor_(ecx, Operand(eax));
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
__ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
+
// ST[0] == double value.
// ebx = low 32 bits of double value.
// edx = high 32 bits of double value.
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 180b9a6bdf..a432c13f15 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -519,6 +519,16 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode);
+ // Emits code sequence that jumps to deferred code if the inputs
+ // are not both smis. Cannot be in MacroAssembler because it takes
+ // advantage of TypeInfo to skip unneeded checks.
+ void JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ Register scratch,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred);
+
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect.
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 28b766a572..c75044402c 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -186,12 +186,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
{ Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the body.
__ mov(eax, Factory::undefined_value());
- EmitReturnSequence(function()->end_position());
+ EmitReturnSequence();
}
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -207,7 +207,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
- CodeGenerator::RecordPositions(masm_, position);
+ CodeGenerator::RecordPositions(masm_, function()->end_position());
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index dc6bfc90de..f339d2e197 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -57,6 +57,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Register r0,
Register r1,
Register r2,
+ Register result,
DictionaryCheck check_dictionary) {
// Register use:
//
@@ -66,9 +67,10 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// r0 - used to hold the property dictionary.
//
// r1 - used for the index into the property dictionary
- // - holds the result on exit.
//
// r2 - used to hold the capacity of the property dictionary.
+ //
+ // result - holds the result on exit.
Label done;
@@ -149,7 +151,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(r1, Operand(r0, r1, times_4, kValueOffset - kHeapObjectTag));
+ __ mov(result, Operand(r0, r1, times_4, kValueOffset - kHeapObjectTag));
}
@@ -159,14 +161,13 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Register key,
Register r0,
Register r1,
- Register r2) {
+ Register r2,
+ Register result) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
//
- // key - holds the smi key on entry and is unchanged if a branch is
- // performed to the miss label. If the load succeeds and we
- // fall through, key holds the result on exit.
+ // key - holds the smi key on entry and is unchanged.
//
// Scratch registers:
//
@@ -175,6 +176,9 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
+ //
+ // result - holds the result on exit if the load succeeds and we fall through.
+
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
@@ -246,7 +250,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
- __ mov(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+ __ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -298,52 +302,158 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, check_string, index_smi, index_string;
- Label check_pixel_array, probe_dictionary, check_number_dictionary;
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register r0,
+ Label* slow) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // Scratch registers:
+ // r0 - used to hold the map of the receiver.
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, slow, not_taken);
// Get the map of the receiver.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), kSlowCaseBitFieldMask);
- __ j(not_zero, &slow, not_taken);
+ __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
+ KeyedLoadIC::kSlowCaseBitFieldMask);
+ __ j(not_zero, slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing
- // into string objects work as intended.
+ // into string objects works as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
- __ j(below, &slow, not_taken);
- // Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string, not_taken);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
- __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ CmpInstanceType(r0, JS_OBJECT_TYPE);
+ __ j(below, slow, not_taken);
+}
+
+
+// Loads an indexed element from a fast case array.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register scratch,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key and is unchanged (must be a smi).
+ // Scratch registers:
+ // scratch - used to hold elements of the receiver and the loaded value.
+ // result - holds the result on exit if the load succeeds and
+ // we fall through.
+
+ __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
+ __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true);
// Check that the key (index) is within bounds.
- __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
+ __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
+ __ j(above_equal, out_of_range);
// Fast case: Do the load.
ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
- __ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
- __ cmp(Operand(ecx), Immediate(Factory::the_hole_value()));
+ __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
+ __ cmp(Operand(scratch), Immediate(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
- __ j(equal, &slow);
- __ mov(eax, ecx);
+ __ j(equal, out_of_range);
+ if (!result.is(scratch)) {
+ __ mov(result, scratch);
+ }
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if a key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_symbol) {
+ // Register use:
+ // key - holds the key and is unchanged. Assumed to be non-smi.
+ // Scratch registers:
+ // map - used to hold the map of the key.
+ // hash - used to hold the hash of the key.
+ __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
+ __ j(above_equal, not_symbol);
+
+ // Is the string an array index, with cached numeric value?
+ __ mov(hash, FieldOperand(key, String::kHashFieldOffset));
+ __ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
+ __ j(zero, index_string, not_taken);
+
+ // Is the string a symbol?
+ ASSERT(kSymbolTag != 0);
+ __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsSymbolMask);
+ __ j(zero, not_symbol, not_taken);
+}
+
+
+// Picks out an array index from the hash field.
+// The generated code never falls through.
+static void GenerateIndexFromHash(MacroAssembler* masm,
+ Register key,
+ Register hash,
+ Label* index_smi) {
+ // Register use:
+ // key - holds the overwritten key on exit.
+ // hash - holds the key's hash. Clobbered.
+
+ // The assert checks that the constants for the maximum number of digits
+ // for an array index cached in the hash field and the number of bits
+ // reserved for it does not conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ // key: string key
+ // ebx: hash field.
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ and_(hash, String::kArrayIndexValueMask);
+ __ shr(hash, String::kHashShift - kSmiTagSize);
+ // Here we actually clobber the key which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
+ __ mov(key, hash);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(index_smi);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, check_string, index_smi, index_string;
+ Label check_pixel_array, probe_dictionary, check_number_dictionary;
+
+ GenerateKeyedLoadReceiverCheck(masm, edx, ecx, &slow);
+
+ // Check that the key is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &check_string, not_taken);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from
+ // where a numeric string is converted to a smi.
+
+ GenerateFastArrayLoad(masm,
+ edx,
+ eax,
+ ecx,
+ eax,
+ &check_pixel_array,
+ &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
@@ -379,7 +489,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
eax,
ebx,
edx,
- edi);
+ edi,
+ eax);
// Pop receiver before returning.
__ pop(edx);
__ ret(0);
@@ -396,22 +507,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
- // The key is not a smi.
- // Is it a string?
- // edx: receiver
- // eax: key
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &slow);
- // Is the string an array index, with cached numeric value?
- __ mov(ebx, FieldOperand(eax, String::kHashFieldOffset));
- __ test(ebx, Immediate(String::kContainsCachedArrayIndexMask));
- __ j(zero, &index_string, not_taken);
-
- // Is the string a symbol?
- // ecx: key map.
- ASSERT(kSymbolTag != 0);
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kIsSymbolMask);
- __ j(zero, &slow, not_taken);
+ GenerateKeyStringCheck(masm, eax, ecx, ebx, &index_string, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
@@ -472,32 +568,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ebx,
ecx,
edi,
+ eax,
DICTIONARY_CHECK_DONE);
- __ mov(eax, ecx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- // We want the smi-tagged index in eax. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- // eax: key (string).
- // ebx: hash field.
- // edx: receiver.
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ and_(ebx, String::kArrayIndexValueMask);
- __ shr(ebx, String::kHashShift - kSmiTagSize);
- // Here we actually clobber the key (eax) which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- __ mov(eax, ebx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
+ GenerateIndexFromHash(masm, eax, ebx, &index_smi);
}
@@ -1115,7 +1192,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// Search dictionary - put result in register edi.
__ mov(edi, edx);
- GenerateDictionaryLoad(masm, miss, edx, ecx, eax, edi, ebx, CHECK_DICTIONARY);
+ GenerateDictionaryLoad(
+ masm, miss, edx, ecx, eax, edi, ebx, edi, CHECK_DICTIONARY);
// Check that the result is not a smi.
__ test(edi, Immediate(kSmiTagMask));
@@ -1293,47 +1371,123 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- Label miss, skip_probe;
+ Label do_call, slow_call, slow_load, slow_reload_receiver;
+ Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+ Label index_smi, index_string;
- // Do not probe monomorphic cache if a key is a smi.
+ // Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ j(equal, &skip_probe, taken);
-
- GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC, &skip_probe);
-
- __ bind(&skip_probe);
+ __ j(not_zero, &check_string, not_taken);
- __ mov(eax, ecx);
- __ EnterInternalFrame();
- __ push(ecx);
- __ call(Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_Generic)),
- RelocInfo::CODE_TARGET);
- __ pop(ecx);
- __ LeaveInternalFrame();
- __ mov(edi, eax);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from
+ // where a numeric string is converted to a smi.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ GenerateKeyedLoadReceiverCheck(masm, edx, eax, &slow_call);
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ GenerateFastArrayLoad(masm,
+ edx,
+ ecx,
+ eax,
+ edi,
+ &check_number_dictionary,
+ &slow_load);
+ __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
- // Check that the receiver is a valid JS object.
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, eax);
- __ j(below, &miss, not_taken);
+ __ bind(&do_call);
+ // receiver in edx is not used after this point.
+ // ecx: key
+ // edi: function
- // Check that the value is a JavaScript function.
+ // Check that the value in edi is a JavaScript function.
__ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &slow_call, not_taken);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ j(not_equal, &miss, not_taken);
-
+ __ j(not_equal, &slow_call, not_taken);
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(edi, actual, JUMP_FUNCTION);
- __ bind(&miss);
+ __ bind(&check_number_dictionary);
+ // eax: elements
+ // ecx: smi key
+ // Check whether the elements is a number dictionary.
+ __ CheckMap(eax, Factory::hash_table_map(), &slow_load, true);
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ // ebx: untagged index
+ // Receiver in edx will be clobbered, need to reload it on miss.
+ GenerateNumberDictionaryLoad(masm,
+ &slow_reload_receiver,
+ eax,
+ ecx,
+ ebx,
+ edx,
+ edi,
+ edi);
+ __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
+ __ jmp(&do_call);
+
+ __ bind(&slow_reload_receiver);
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ __ bind(&slow_load);
+ // This branch is taken when calling KeyedCallIC_Miss is neither required
+ // nor beneficial.
+ __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1);
+ __ EnterInternalFrame();
+ __ push(ecx); // save the key
+ __ push(edx); // pass the receiver
+ __ push(ecx); // pass the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(ecx); // restore the key
+ __ LeaveInternalFrame();
+ __ mov(edi, eax);
+ __ jmp(&do_call);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow_call);
+
+ // The key is known to be a symbol.
+ // If the receiver is a regular JS object with slow properties then do
+ // a quick inline probe of the receiver's dictionary.
+ // Otherwise do the monomorphic cache probe.
+ GenerateKeyedLoadReceiverCheck(masm, edx, eax, &lookup_monomorphic_cache);
+
+ __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(not_equal, &lookup_monomorphic_cache, not_taken);
+
+ GenerateDictionaryLoad(masm,
+ &slow_load,
+ edx,
+ ecx,
+ ebx,
+ eax,
+ edi,
+ edi,
+ DICTIONARY_CHECK_DONE);
+ __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
+ __ jmp(&do_call);
+
+ __ bind(&lookup_monomorphic_cache);
+ __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC, &slow_call);
+ // Fall through on miss.
+
+ __ bind(&slow_call);
+ // This branch is taken if:
+ // - the receiver requires boxing or access check,
+ // - the key is neither smi nor symbol,
+ // - the value loaded is not a function,
+ // - there is hope that the runtime will create a monomorphic call stub
+ // that will get fetched next time.
+ __ IncrementCounter(&Counters::keyed_call_generic_slow, 1);
GenerateMiss(masm, argc);
+
+ __ bind(&index_string);
+ GenerateIndexFromHash(masm, ecx, ebx, &index_smi);
}
@@ -1410,6 +1564,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
edx,
edi,
ebx,
+ edi,
CHECK_DICTIONARY);
__ mov(eax, edi);
__ ret(0);
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 338f630064..2b77a54e95 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -58,7 +58,7 @@ static char TransitionMarkFromState(IC::State state) {
}
void IC::TraceIC(const char* type,
- Handle<String> name,
+ Handle<Object> name,
State old_state,
Code* new_target,
const char* extra_info) {
@@ -610,15 +610,19 @@ Object* KeyedCallIC::LoadFunction(State state,
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
ReceiverToObject(object);
- } else {
- if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
- int argc = target()->arguments_count();
- InLoopFlag in_loop = target()->ic_in_loop();
- Object* code = StubCache::ComputeCallMegamorphic(
- argc, in_loop, Code::KEYED_CALL_IC);
- if (!code->IsFailure()) {
- set_target(Code::cast(code));
- }
+ }
+
+ if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
+ int argc = target()->arguments_count();
+ InLoopFlag in_loop = target()->ic_in_loop();
+ Object* code = StubCache::ComputeCallMegamorphic(
+ argc, in_loop, Code::KEYED_CALL_IC);
+ if (!code->IsFailure()) {
+ set_target(Code::cast(code));
+#ifdef DEBUG
+ TraceIC(
+ "KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
+#endif
}
}
Object* result = Runtime::GetObjectProperty(object, key);
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index a9ad28b7fa..5fd5078fed 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -140,7 +140,7 @@ class IC {
#ifdef DEBUG
static void TraceIC(const char* type,
- Handle<String> name,
+ Handle<Object> name,
State old_state,
Code* new_target,
const char* extra_info = "");
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index afda2cbb21..17ee531a3e 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -45,7 +45,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence() {
UNIMPLEMENTED_MIPS();
}
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 29d0069437..761b9b317b 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -1240,8 +1240,9 @@ const kFrameDetailsArgumentCountIndex = 3;
const kFrameDetailsLocalCountIndex = 4;
const kFrameDetailsSourcePositionIndex = 5;
const kFrameDetailsConstructCallIndex = 6;
-const kFrameDetailsDebuggerFrameIndex = 7;
-const kFrameDetailsFirstDynamicIndex = 8;
+const kFrameDetailsAtReturnIndex = 7;
+const kFrameDetailsDebuggerFrameIndex = 8;
+const kFrameDetailsFirstDynamicIndex = 9;
const kFrameDetailsNameIndex = 0;
const kFrameDetailsValueIndex = 1;
@@ -1258,8 +1259,11 @@ const kFrameDetailsNameValueSize = 2;
* 4: Local count
* 5: Source position
* 6: Construct call
+ * 7: Is at return
+ * 8: Debugger frame
* Arguments name, value
* Locals name, value
+ * Return value if any
* @param {number} break_id Current break id
* @param {number} index Frame number
* @constructor
@@ -1294,6 +1298,12 @@ FrameDetails.prototype.isConstructCall = function() {
}
+FrameDetails.prototype.isAtReturn = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsAtReturnIndex];
+}
+
+
FrameDetails.prototype.isDebuggerFrame = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsDebuggerFrameIndex];
@@ -1341,7 +1351,8 @@ FrameDetails.prototype.sourcePosition = function() {
FrameDetails.prototype.localName = function(index) {
%CheckExecutionState(this.break_id_);
if (index >= 0 && index < this.localCount()) {
- var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize
+ var locals_offset = kFrameDetailsFirstDynamicIndex +
+ this.argumentCount() * kFrameDetailsNameValueSize
return this.details_[locals_offset +
index * kFrameDetailsNameValueSize +
kFrameDetailsNameIndex]
@@ -1352,7 +1363,8 @@ FrameDetails.prototype.localName = function(index) {
FrameDetails.prototype.localValue = function(index) {
%CheckExecutionState(this.break_id_);
if (index >= 0 && index < this.localCount()) {
- var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize
+ var locals_offset = kFrameDetailsFirstDynamicIndex +
+ this.argumentCount() * kFrameDetailsNameValueSize
return this.details_[locals_offset +
index * kFrameDetailsNameValueSize +
kFrameDetailsValueIndex]
@@ -1360,6 +1372,17 @@ FrameDetails.prototype.localValue = function(index) {
}
+FrameDetails.prototype.returnValue = function() {
+ %CheckExecutionState(this.break_id_);
+ var return_value_offset =
+ kFrameDetailsFirstDynamicIndex +
+ (this.argumentCount() + this.localCount()) * kFrameDetailsNameValueSize;
+ if (this.details_[kFrameDetailsAtReturnIndex]) {
+ return this.details_[return_value_offset];
+ }
+}
+
+
FrameDetails.prototype.scopeCount = function() {
return %GetScopeCount(this.break_id_, this.frameId());
}
@@ -1412,6 +1435,11 @@ FrameMirror.prototype.isConstructCall = function() {
};
+FrameMirror.prototype.isAtReturn = function() {
+ return this.details_.isAtReturn();
+};
+
+
FrameMirror.prototype.isDebuggerFrame = function() {
return this.details_.isDebuggerFrame();
};
@@ -1447,6 +1475,11 @@ FrameMirror.prototype.localValue = function(index) {
};
+FrameMirror.prototype.returnValue = function() {
+ return MakeMirror(this.details_.returnValue());
+};
+
+
FrameMirror.prototype.sourcePosition = function() {
return this.details_.sourcePosition();
};
@@ -1574,6 +1607,11 @@ FrameMirror.prototype.invocationText = function() {
result += ')';
}
+ if (this.isAtReturn()) {
+ result += ' returning ';
+ result += this.returnValue().toText();
+ }
+
return result;
}
@@ -2267,6 +2305,10 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
content.script = this.serializeReference(func.script());
}
content.constructCall = mirror.isConstructCall();
+ content.atReturn = mirror.isAtReturn();
+ if (mirror.isAtReturn()) {
+ content.returnValue = this.serializeReference(mirror.returnValue());
+ }
content.debuggerFrame = mirror.isDebuggerFrame();
var x = new Array(mirror.argumentCount());
for (var i = 0; i < mirror.argumentCount(); i++) {
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index fecb70b775..ea9bc98767 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -130,6 +130,17 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
}
}
+
+template<class Visitor>
+void HeapEntriesMap::Apply(Visitor* visitor) {
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ if (!IsAlias(p->value))
+ visitor->Apply(reinterpret_cast<HeapEntry*>(p->value));
+ }
+}
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 44163a0568..805ed3e674 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -29,11 +29,12 @@
#include "v8.h"
#include "global-handles.h"
+#include "scopeinfo.h"
+#include "top.h"
+#include "zone-inl.h"
#include "profile-generator-inl.h"
-#include "../include/v8-profiler.h"
-
namespace v8 {
namespace internal {
@@ -811,6 +812,794 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
profiles_->AddPathToCurrentProfiles(entries);
}
+
+HeapGraphEdge::HeapGraphEdge(Type type,
+ const char* name,
+ HeapEntry* from,
+ HeapEntry* to)
+ : type_(type), name_(name), from_(from), to_(to) {
+ ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY);
+}
+
+
+HeapGraphEdge::HeapGraphEdge(int index,
+ HeapEntry* from,
+ HeapEntry* to)
+ : type_(ELEMENT), index_(index), from_(from), to_(to) {
+}
+
+
+static void DeleteHeapGraphEdge(HeapGraphEdge** edge_ptr) {
+ delete *edge_ptr;
+}
+
+
+static void DeleteHeapGraphPath(HeapGraphPath** path_ptr) {
+ delete *path_ptr;
+}
+
+
+HeapEntry::~HeapEntry() {
+ children_.Iterate(DeleteHeapGraphEdge);
+ retaining_paths_.Iterate(DeleteHeapGraphPath);
+}
+
+
+void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) {
+ HeapGraphEdge* edge =
+ new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry);
+ children_.Add(edge);
+ entry->retainers_.Add(edge);
+}
+
+
+void HeapEntry::SetElementReference(int index, HeapEntry* entry) {
+ HeapGraphEdge* edge = new HeapGraphEdge(index, this, entry);
+ children_.Add(edge);
+ entry->retainers_.Add(edge);
+}
+
+
+void HeapEntry::SetPropertyReference(const char* name, HeapEntry* entry) {
+ HeapGraphEdge* edge =
+ new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry);
+ children_.Add(edge);
+ entry->retainers_.Add(edge);
+}
+
+
+void HeapEntry::SetAutoIndexReference(HeapEntry* entry) {
+ SetElementReference(next_auto_index_++, entry);
+}
+
+
+int HeapEntry::TotalSize() {
+ return total_size_ != kUnknownSize ? total_size_ : CalculateTotalSize();
+}
+
+
+int HeapEntry::NonSharedTotalSize() {
+ return non_shared_total_size_ != kUnknownSize ?
+ non_shared_total_size_ : CalculateNonSharedTotalSize();
+}
+
+
+int HeapEntry::CalculateTotalSize() {
+ snapshot_->ClearPaint();
+ List<HeapEntry*> list(10);
+ list.Add(this);
+ total_size_ = self_size_;
+ this->PaintReachable();
+ while (!list.is_empty()) {
+ HeapEntry* entry = list.RemoveLast();
+ const int children_count = entry->children_.length();
+ for (int i = 0; i < children_count; ++i) {
+ HeapEntry* child = entry->children_[i]->to();
+ if (!child->painted_reachable()) {
+ list.Add(child);
+ child->PaintReachable();
+ total_size_ += child->self_size_;
+ }
+ }
+ }
+ return total_size_;
+}
+
+
+namespace {
+
+class NonSharedSizeCalculator {
+ public:
+ NonSharedSizeCalculator()
+ : non_shared_total_size_(0) {
+ }
+
+ int non_shared_total_size() const { return non_shared_total_size_; }
+
+ void Apply(HeapEntry* entry) {
+ if (entry->painted_reachable()) {
+ non_shared_total_size_ += entry->self_size();
+ }
+ }
+
+ private:
+ int non_shared_total_size_;
+};
+
+} // namespace
+
+int HeapEntry::CalculateNonSharedTotalSize() {
+ // To calculate non-shared total size, first we paint all reachable
+ // nodes in one color, then we paint all nodes reachable from other
+ // nodes with a different color. Then we consider only nodes painted
+ // with the first color for caclulating the total size.
+ snapshot_->ClearPaint();
+ List<HeapEntry*> list(10);
+ list.Add(this);
+ this->PaintReachable();
+ while (!list.is_empty()) {
+ HeapEntry* entry = list.RemoveLast();
+ const int children_count = entry->children_.length();
+ for (int i = 0; i < children_count; ++i) {
+ HeapEntry* child = entry->children_[i]->to();
+ if (!child->painted_reachable()) {
+ list.Add(child);
+ child->PaintReachable();
+ }
+ }
+ }
+
+ List<HeapEntry*> list2(10);
+ if (this != snapshot_->root()) {
+ list2.Add(snapshot_->root());
+ snapshot_->root()->PaintReachableFromOthers();
+ }
+ while (!list2.is_empty()) {
+ HeapEntry* entry = list2.RemoveLast();
+ const int children_count = entry->children_.length();
+ for (int i = 0; i < children_count; ++i) {
+ HeapEntry* child = entry->children_[i]->to();
+ if (child != this && child->not_painted_reachable_from_others()) {
+ list2.Add(child);
+ child->PaintReachableFromOthers();
+ }
+ }
+ }
+
+ NonSharedSizeCalculator calculator;
+ snapshot_->IterateEntries(&calculator);
+ return calculator.non_shared_total_size();
+}
+
+
+class CachedHeapGraphPath {
+ public:
+ CachedHeapGraphPath()
+ : nodes_(NodesMatch) { }
+ CachedHeapGraphPath(const CachedHeapGraphPath& src)
+ : nodes_(NodesMatch, &HashMap::DefaultAllocator, src.nodes_.capacity()),
+ path_(src.path_.length() + 1) {
+ for (HashMap::Entry* p = src.nodes_.Start();
+ p != NULL;
+ p = src.nodes_.Next(p)) {
+ nodes_.Lookup(p->key, p->hash, true);
+ }
+ path_.AddAll(src.path_);
+ }
+ void Add(HeapGraphEdge* edge) {
+ nodes_.Lookup(edge->to(), Hash(edge->to()), true);
+ path_.Add(edge);
+ }
+ bool ContainsNode(HeapEntry* node) {
+ return nodes_.Lookup(node, Hash(node), false) != NULL;
+ }
+ const List<HeapGraphEdge*>* path() const { return &path_; }
+
+ private:
+ static uint32_t Hash(HeapEntry* entry) {
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry));
+ }
+ static bool NodesMatch(void* key1, void* key2) { return key1 == key2; }
+
+ HashMap nodes_;
+ List<HeapGraphEdge*> path_;
+};
+
+
+const List<HeapGraphPath*>* HeapEntry::GetRetainingPaths() {
+ if (retaining_paths_.length() == 0 && retainers_.length() != 0) {
+ CachedHeapGraphPath path;
+ FindRetainingPaths(this, &path);
+ }
+ return &retaining_paths_;
+}
+
+
+void HeapEntry::FindRetainingPaths(HeapEntry* node,
+ CachedHeapGraphPath* prev_path) {
+ for (int i = 0; i < node->retainers_.length(); ++i) {
+ HeapGraphEdge* ret_edge = node->retainers_[i];
+ if (prev_path->ContainsNode(ret_edge->from())) continue;
+ if (ret_edge->from() != snapshot_->root()) {
+ CachedHeapGraphPath path(*prev_path);
+ path.Add(ret_edge);
+ FindRetainingPaths(ret_edge->from(), &path);
+ } else {
+ HeapGraphPath* ret_path = new HeapGraphPath(*prev_path->path());
+ ret_path->Set(0, ret_edge);
+ retaining_paths_.Add(ret_path);
+ }
+ }
+}
+
+
+static void RemoveEdge(List<HeapGraphEdge*>* list, HeapGraphEdge* edge) {
+ for (int i = 0; i < list->length(); ) {
+ if (list->at(i) == edge) {
+ list->Remove(i);
+ return;
+ } else {
+ ++i;
+ }
+ }
+ UNREACHABLE();
+}
+
+
+void HeapEntry::RemoveChild(HeapGraphEdge* edge) {
+ RemoveEdge(&children_, edge);
+ delete edge;
+}
+
+
+void HeapEntry::RemoveRetainer(HeapGraphEdge* edge) {
+ RemoveEdge(&retainers_, edge);
+}
+
+
+void HeapEntry::CutEdges() {
+ for (int i = 0; i < children_.length(); ++i) {
+ HeapGraphEdge* edge = children_[i];
+ edge->to()->RemoveRetainer(edge);
+ }
+ children_.Iterate(DeleteHeapGraphEdge);
+ children_.Clear();
+
+ for (int i = 0; i < retainers_.length(); ++i) {
+ HeapGraphEdge* edge = retainers_[i];
+ edge->from()->RemoveChild(edge);
+ }
+ retainers_.Clear();
+}
+
+
+void HeapEntry::Print(int max_depth, int indent) {
+ OS::Print("%6d %6d %6d", self_size_, TotalSize(), NonSharedTotalSize());
+ if (type_ != STRING) {
+ OS::Print("%s %.40s\n", TypeAsString(), name_);
+ } else {
+ OS::Print("\"");
+ const char* c = name_;
+ while (*c && (c - name_) <= 40) {
+ if (*c != '\n')
+ OS::Print("%c", *c);
+ else
+ OS::Print("\\n");
+ ++c;
+ }
+ OS::Print("\"\n");
+ }
+ if (--max_depth == 0) return;
+ const int children_count = children_.length();
+ for (int i = 0; i < children_count; ++i) {
+ HeapGraphEdge* edge = children_[i];
+ switch (edge->type()) {
+ case HeapGraphEdge::CONTEXT_VARIABLE:
+ OS::Print(" %*c #%s: ", indent, ' ', edge->name());
+ break;
+ case HeapGraphEdge::ELEMENT:
+ OS::Print(" %*c %d: ", indent, ' ', edge->index());
+ break;
+ case HeapGraphEdge::PROPERTY:
+ OS::Print(" %*c %s: ", indent, ' ', edge->name());
+ break;
+ default:
+ OS::Print("!!! unknown edge type: %d ", edge->type());
+ }
+ edge->to()->Print(max_depth, indent + 2);
+ }
+}
+
+
+const char* HeapEntry::TypeAsString() {
+ switch (type_) {
+ case INTERNAL: return "/internal/";
+ case JS_OBJECT: return "/object/";
+ case CLOSURE: return "/closure/";
+ case STRING: return "/string/";
+ case CODE: return "/code/";
+ case ARRAY: return "/array/";
+ default: return "???";
+ }
+}
+
+
+HeapGraphPath::HeapGraphPath(const List<HeapGraphEdge*>& path)
+ : path_(path.length() + 1) {
+ Add(NULL);
+ for (int i = path.length() - 1; i >= 0; --i) {
+ Add(path[i]);
+ }
+}
+
+
+void HeapGraphPath::Print() {
+ path_[0]->from()->Print(1, 0);
+ for (int i = 0; i < path_.length(); ++i) {
+ OS::Print(" -> ");
+ HeapGraphEdge* edge = path_[i];
+ switch (edge->type()) {
+ case HeapGraphEdge::CONTEXT_VARIABLE:
+ OS::Print("[#%s] ", edge->name());
+ break;
+ case HeapGraphEdge::ELEMENT:
+ OS::Print("[%d] ", edge->index());
+ break;
+ case HeapGraphEdge::PROPERTY:
+ OS::Print("[%s] ", edge->name());
+ break;
+ default:
+ OS::Print("!!! unknown edge type: %d ", edge->type());
+ }
+ edge->to()->Print(1, 0);
+ }
+ OS::Print("\n");
+}
+
+
+class IndexedReferencesExtractor : public ObjectVisitor {
+ public:
+ IndexedReferencesExtractor(HeapSnapshot* snapshot, HeapEntry* parent)
+ : snapshot_(snapshot),
+ parent_(parent) {
+ }
+
+ void VisitPointer(Object** o) {
+ if (!(*o)->IsHeapObject()) return;
+ HeapEntry* entry = snapshot_->GetEntry(HeapObject::cast(*o));
+ if (entry != NULL) {
+ parent_->SetAutoIndexReference(entry);
+ }
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) VisitPointer(p);
+ }
+
+ private:
+ HeapSnapshot* snapshot_;
+ HeapEntry* parent_;
+};
+
+
+HeapEntriesMap::HeapEntriesMap()
+ : entries_(HeapObjectsMatch) {
+}
+
+
+HeapEntriesMap::~HeapEntriesMap() {
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ if (!IsAlias(p->value)) delete reinterpret_cast<HeapEntry*>(p->value);
+ }
+}
+
+
+void HeapEntriesMap::Alias(HeapObject* object, HeapEntry* entry) {
+ HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
+ if (cache_entry->value == NULL)
+ cache_entry->value = reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(entry) | kAliasTag);
+}
+
+
+void HeapEntriesMap::Apply(void (HeapEntry::*Func)(void)) {
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ if (!IsAlias(p->value)) (reinterpret_cast<HeapEntry*>(p->value)->*Func)();
+ }
+}
+
+
+HeapEntry* HeapEntriesMap::Map(HeapObject* object) {
+ HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), false);
+ return cache_entry != NULL ?
+ reinterpret_cast<HeapEntry*>(
+ reinterpret_cast<intptr_t>(cache_entry->value) & (~kAliasTag)) : NULL;
+}
+
+
+void HeapEntriesMap::Pair(HeapObject* object, HeapEntry* entry) {
+ HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
+ ASSERT(cache_entry->value == NULL);
+ cache_entry->value = entry;
+}
+
+
+HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
+ const char* title,
+ unsigned uid)
+ : collection_(collection),
+ title_(title),
+ uid_(uid),
+ root_(this) {
+}
+
+
+void HeapSnapshot::ClearPaint() {
+ root_.ClearPaint();
+ entries_.Apply(&HeapEntry::ClearPaint);
+}
+
+
+HeapEntry* HeapSnapshot::GetEntry(Object* obj) {
+ if (!obj->IsHeapObject()) return NULL;
+ HeapObject* object = HeapObject::cast(obj);
+
+ {
+ HeapEntry* existing = FindEntry(object);
+ if (existing != NULL) return existing;
+ }
+
+ // Add new entry.
+ if (object->IsJSFunction()) {
+ JSFunction* func = JSFunction::cast(object);
+ SharedFunctionInfo* shared = func->shared();
+ String* name = String::cast(shared->name())->length() > 0 ?
+ String::cast(shared->name()) : shared->inferred_name();
+ return AddEntry(object, HeapEntry::CLOSURE, collection_->GetName(name));
+ } else if (object->IsJSObject()) {
+ return AddEntry(object,
+ HeapEntry::JS_OBJECT,
+ collection_->GetName(
+ JSObject::cast(object)->constructor_name()));
+ } else if (object->IsJSGlobalPropertyCell()) {
+ HeapEntry* value = GetEntry(JSGlobalPropertyCell::cast(object)->value());
+ // If GPC references an object that we have interest in, add the object.
+ // We don't store HeapEntries for GPCs. Instead, we make our hash map
+ // to point to object's HeapEntry by GPCs address.
+ if (value != NULL) AddEntryAlias(object, value);
+ return value;
+ } else if (object->IsString()) {
+ return AddEntry(object,
+ HeapEntry::STRING,
+ collection_->GetName(String::cast(object)));
+ } else if (object->IsCode()
+ || object->IsSharedFunctionInfo()
+ || object->IsScript()) {
+ return AddEntry(object, HeapEntry::CODE);
+ } else if (object->IsFixedArray()) {
+ return AddEntry(object, HeapEntry::ARRAY);
+ }
+ // No interest in this object.
+ return NULL;
+}
+
+
+void HeapSnapshot::SetClosureReference(HeapEntry* parent,
+ String* reference_name,
+ Object* child) {
+ HeapEntry* child_entry = GetEntry(child);
+ if (child_entry != NULL) {
+ parent->SetClosureReference(
+ collection_->GetName(reference_name), child_entry);
+ }
+}
+
+
+void HeapSnapshot::SetElementReference(HeapEntry* parent,
+ int index,
+ Object* child) {
+ HeapEntry* child_entry = GetEntry(child);
+ if (child_entry != NULL) {
+ parent->SetElementReference(index, child_entry);
+ }
+}
+
+
+void HeapSnapshot::SetPropertyReference(HeapEntry* parent,
+ String* reference_name,
+ Object* child) {
+ HeapEntry* child_entry = GetEntry(child);
+ if (child_entry != NULL) {
+ parent->SetPropertyReference(
+ collection_->GetName(reference_name), child_entry);
+ }
+}
+
+
+HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
+ HeapEntry::Type type,
+ const char* name) {
+ HeapEntry* entry = new HeapEntry(this,
+ type,
+ name,
+ GetObjectSize(object),
+ GetObjectSecurityToken(object));
+ entries_.Pair(object, entry);
+
+ // Detect, if this is a JS global object of the current context, and
+ // add it to snapshot's roots. There can be several JS global objects
+ // in a context.
+ if (object->IsJSGlobalProxy()) {
+ int global_security_token = GetGlobalSecurityToken();
+ int object_security_token =
+ collection_->token_enumerator()->GetTokenId(
+ Context::cast(
+ JSGlobalProxy::cast(object)->context())->security_token());
+ if (object_security_token == TokenEnumerator::kNoSecurityToken
+ || object_security_token == global_security_token) {
+ HeapEntry* global_object_entry =
+ GetEntry(HeapObject::cast(object->map()->prototype()));
+ ASSERT(global_object_entry != NULL);
+ root_.SetAutoIndexReference(global_object_entry);
+ }
+ }
+
+ return entry;
+}
+
+
+namespace {
+
+class EdgesCutter {
+ public:
+ explicit EdgesCutter(int global_security_token)
+ : global_security_token_(global_security_token) {
+ }
+
+ void Apply(HeapEntry* entry) {
+ if (entry->security_token_id() != TokenEnumerator::kNoSecurityToken
+ && entry->security_token_id() != global_security_token_) {
+ entry->CutEdges();
+ }
+ }
+
+ private:
+ const int global_security_token_;
+};
+
+} // namespace
+
+void HeapSnapshot::CutObjectsFromForeignSecurityContexts() {
+ EdgesCutter cutter(GetGlobalSecurityToken());
+ entries_.Apply(&cutter);
+}
+
+
+int HeapSnapshot::GetGlobalSecurityToken() {
+ return collection_->token_enumerator()->GetTokenId(
+ Top::context()->global()->global_context()->security_token());
+}
+
+
+int HeapSnapshot::GetObjectSize(HeapObject* obj) {
+ return obj->IsJSObject() ?
+ CalculateNetworkSize(JSObject::cast(obj)) : obj->Size();
+}
+
+
+int HeapSnapshot::GetObjectSecurityToken(HeapObject* obj) {
+ if (obj->IsGlobalContext()) {
+ return collection_->token_enumerator()->GetTokenId(
+ Context::cast(obj)->security_token());
+ } else {
+ return TokenEnumerator::kNoSecurityToken;
+ }
+}
+
+
+int HeapSnapshot::CalculateNetworkSize(JSObject* obj) {
+ int size = obj->Size();
+ // If 'properties' and 'elements' are non-empty (thus, non-shared),
+ // take their size into account.
+ if (FixedArray::cast(obj->properties())->length() != 0) {
+ size += obj->properties()->Size();
+ }
+ if (FixedArray::cast(obj->elements())->length() != 0) {
+ size += obj->elements()->Size();
+ }
+ // For functions, also account non-empty context and literals sizes.
+ if (obj->IsJSFunction()) {
+ JSFunction* f = JSFunction::cast(obj);
+ if (f->unchecked_context()->IsContext()) {
+ size += f->context()->Size();
+ }
+ if (f->literals()->length() != 0) {
+ size += f->literals()->Size();
+ }
+ }
+ return size;
+}
+
+
+void HeapSnapshot::Print(int max_depth) {
+ root_.Print(max_depth, 0);
+}
+
+
+HeapSnapshotsCollection::HeapSnapshotsCollection()
+ : snapshots_uids_(HeapSnapshotsMatch),
+ token_enumerator_(new TokenEnumerator()) {
+}
+
+
+static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
+ delete *snapshot_ptr;
+}
+
+
+HeapSnapshotsCollection::~HeapSnapshotsCollection() {
+ delete token_enumerator_;
+ snapshots_.Iterate(DeleteHeapSnapshot);
+}
+
+
+HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
+ unsigned uid) {
+ HeapSnapshot* snapshot = new HeapSnapshot(this, name, uid);
+ snapshots_.Add(snapshot);
+ HashMap::Entry* entry =
+ snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
+ static_cast<uint32_t>(snapshot->uid()),
+ true);
+ ASSERT(entry->value == NULL);
+ entry->value = snapshot;
+ return snapshot;
+}
+
+
+HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
+ HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid),
+ static_cast<uint32_t>(uid),
+ false);
+ return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL;
+}
+
+
+HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
+ : snapshot_(snapshot) {
+}
+
+
+void HeapSnapshotGenerator::GenerateSnapshot() {
+ AssertNoAllocation no_alloc;
+
+ // Iterate heap contents.
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ ExtractReferences(obj);
+ }
+
+ snapshot_->CutObjectsFromForeignSecurityContexts();
+}
+
+
+void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) {
+ HeapEntry* entry = snapshot_->GetEntry(obj);
+ if (entry == NULL) return;
+ if (entry->visited()) return;
+
+ if (obj->IsJSObject()) {
+ JSObject* js_obj = JSObject::cast(obj);
+ ExtractClosureReferences(js_obj, entry);
+ ExtractPropertyReferences(js_obj, entry);
+ ExtractElementReferences(js_obj, entry);
+ snapshot_->SetPropertyReference(
+ entry, Heap::prototype_symbol(), js_obj->map()->prototype());
+ } else if (obj->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(obj);
+ snapshot_->SetElementReference(entry, 0, cell->value());
+ } else if (obj->IsString()) {
+ if (obj->IsConsString()) {
+ ConsString* cs = ConsString::cast(obj);
+ snapshot_->SetElementReference(entry, 0, cs->first());
+ snapshot_->SetElementReference(entry, 1, cs->second());
+ }
+ } else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
+ IndexedReferencesExtractor refs_extractor(snapshot_, entry);
+ obj->Iterate(&refs_extractor);
+ } else if (obj->IsFixedArray()) {
+ IndexedReferencesExtractor refs_extractor(snapshot_, entry);
+ obj->Iterate(&refs_extractor);
+ }
+ entry->MarkAsVisited();
+}
+
+
+void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj,
+ HeapEntry* entry) {
+ if (js_obj->IsJSFunction()) {
+ HandleScope hs;
+ JSFunction* func = JSFunction::cast(js_obj);
+ Context* context = func->context();
+ ZoneScope zscope(DELETE_ON_EXIT);
+ ScopeInfo<ZoneListAllocationPolicy> scope_info(
+ context->closure()->shared()->code());
+ int locals_number = scope_info.NumberOfLocals();
+ for (int i = 0; i < locals_number; ++i) {
+ String* local_name = *scope_info.LocalName(i);
+ int idx = ScopeInfo<>::ContextSlotIndex(
+ context->closure()->shared()->code(), local_name, NULL);
+ if (idx >= 0 && idx < context->length()) {
+ snapshot_->SetClosureReference(entry, local_name, context->get(idx));
+ }
+ }
+ }
+}
+
+
+void HeapSnapshotGenerator::ExtractPropertyReferences(JSObject* js_obj,
+ HeapEntry* entry) {
+ if (js_obj->HasFastProperties()) {
+ DescriptorArray* descs = js_obj->map()->instance_descriptors();
+ for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ switch (descs->GetType(i)) {
+ case FIELD: {
+ int index = descs->GetFieldIndex(i);
+ snapshot_->SetPropertyReference(
+ entry, descs->GetKey(i), js_obj->FastPropertyAt(index));
+ break;
+ }
+ case CONSTANT_FUNCTION:
+ snapshot_->SetPropertyReference(
+ entry, descs->GetKey(i), descs->GetConstantFunction(i));
+ break;
+ default: ;
+ }
+ }
+ } else {
+ StringDictionary* dictionary = js_obj->property_dictionary();
+ int length = dictionary->Capacity();
+ for (int i = 0; i < length; ++i) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ snapshot_->SetPropertyReference(
+ entry, String::cast(k), dictionary->ValueAt(i));
+ }
+ }
+ }
+}
+
+
+void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj,
+ HeapEntry* entry) {
+ if (js_obj->HasFastElements()) {
+ FixedArray* elements = FixedArray::cast(js_obj->elements());
+ int length = js_obj->IsJSArray() ?
+ Smi::cast(JSArray::cast(js_obj)->length())->value() :
+ elements->length();
+ for (int i = 0; i < length; ++i) {
+ if (!elements->get(i)->IsTheHole()) {
+ snapshot_->SetElementReference(entry, i, elements->get(i));
+ }
+ }
+ } else if (js_obj->HasDictionaryElements()) {
+ NumberDictionary* dictionary = js_obj->element_dictionary();
+ int length = dictionary->Capacity();
+ for (int i = 0; i < length; ++i) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ ASSERT(k->IsNumber());
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ snapshot_->SetElementReference(entry, index, dictionary->ValueAt(i));
+ }
+ }
+ }
+}
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index be0e94ea19..3f90702bc4 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -31,6 +31,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "hashmap.h"
+#include "../include/v8-profiler.h"
namespace v8 {
namespace internal {
@@ -53,6 +54,8 @@ class TokenEnumerator {
List<bool> token_removed_;
friend class TokenEnumeratorTester;
+
+ DISALLOW_COPY_AND_ASSIGN(TokenEnumerator);
};
@@ -357,6 +360,8 @@ class SampleRateCalculator {
unsigned measurements_count_;
unsigned wall_time_query_countdown_;
double last_wall_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(SampleRateCalculator);
};
@@ -416,6 +421,310 @@ class ProfileGenerator {
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};
+
+class HeapSnapshot;
+class HeapEntry;
+
+
+class HeapGraphEdge {
+ public:
+ enum Type {
+ CONTEXT_VARIABLE,
+ ELEMENT,
+ PROPERTY
+ };
+
+ HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to);
+ HeapGraphEdge(int index, HeapEntry* from, HeapEntry* to);
+
+ Type type() const { return type_; }
+ int index() const {
+ ASSERT(type_ == ELEMENT);
+ return index_;
+ }
+ const char* name() const {
+ ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY);
+ return name_;
+ }
+ HeapEntry* from() const { return from_; }
+ HeapEntry* to() const { return to_; }
+
+ private:
+ Type type_;
+ union {
+ int index_;
+ const char* name_;
+ };
+ HeapEntry* from_;
+ HeapEntry* to_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
+};
+
+
+class HeapGraphPath;
+class CachedHeapGraphPath;
+
+class HeapEntry {
+ public:
+ enum Type {
+ INTERNAL,
+ ARRAY,
+ STRING,
+ JS_OBJECT,
+ CODE,
+ CLOSURE
+ };
+
+ explicit HeapEntry(HeapSnapshot* snapshot)
+ : snapshot_(snapshot),
+ visited_(false),
+ type_(INTERNAL),
+ name_(""),
+ next_auto_index_(0),
+ self_size_(0),
+ security_token_id_(TokenEnumerator::kNoSecurityToken),
+ children_(1),
+ retainers_(0),
+ retaining_paths_(0),
+ total_size_(kUnknownSize),
+ non_shared_total_size_(kUnknownSize),
+ painted_(kUnpainted) { }
+ HeapEntry(HeapSnapshot* snapshot,
+ Type type,
+ const char* name,
+ int self_size,
+ int security_token_id)
+ : snapshot_(snapshot),
+ visited_(false),
+ type_(type),
+ name_(name),
+ next_auto_index_(1),
+ self_size_(self_size),
+ security_token_id_(security_token_id),
+ children_(4),
+ retainers_(4),
+ retaining_paths_(4),
+ total_size_(kUnknownSize),
+ non_shared_total_size_(kUnknownSize),
+ painted_(kUnpainted) { }
+ ~HeapEntry();
+
+ bool visited() const { return visited_; }
+ Type type() const { return type_; }
+ const char* name() const { return name_; }
+ int self_size() const { return self_size_; }
+ int security_token_id() const { return security_token_id_; }
+ bool painted_reachable() { return painted_ == kPaintReachable; }
+ bool not_painted_reachable_from_others() {
+ return painted_ != kPaintReachableFromOthers;
+ }
+ const List<HeapGraphEdge*>* children() const { return &children_; }
+ const List<HeapGraphEdge*>* retainers() const { return &retainers_; }
+ const List<HeapGraphPath*>* GetRetainingPaths();
+
+ void ClearPaint() { painted_ = kUnpainted; }
+ void CutEdges();
+ void MarkAsVisited() { visited_ = true; }
+ void PaintReachable() {
+ ASSERT(painted_ == kUnpainted);
+ painted_ = kPaintReachable;
+ }
+ void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; }
+ void SetClosureReference(const char* name, HeapEntry* entry);
+ void SetElementReference(int index, HeapEntry* entry);
+ void SetPropertyReference(const char* name, HeapEntry* entry);
+ void SetAutoIndexReference(HeapEntry* entry);
+
+ int TotalSize();
+ int NonSharedTotalSize();
+
+ void Print(int max_depth, int indent);
+
+ private:
+ int CalculateTotalSize();
+ int CalculateNonSharedTotalSize();
+ void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path);
+ void RemoveChild(HeapGraphEdge* edge);
+ void RemoveRetainer(HeapGraphEdge* edge);
+
+ const char* TypeAsString();
+
+ HeapSnapshot* snapshot_;
+ bool visited_;
+ Type type_;
+ const char* name_;
+ int next_auto_index_;
+ int self_size_;
+ int security_token_id_;
+ List<HeapGraphEdge*> children_;
+ List<HeapGraphEdge*> retainers_;
+ List<HeapGraphPath*> retaining_paths_;
+ int total_size_;
+ int non_shared_total_size_;
+ int painted_;
+
+ static const int kUnknownSize = -1;
+ static const int kUnpainted = 0;
+ static const int kPaintReachable = 1;
+ static const int kPaintReachableFromOthers = 2;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HeapEntry);
+};
+
+
+class HeapGraphPath {
+ public:
+ HeapGraphPath()
+ : path_(8) { }
+ explicit HeapGraphPath(const List<HeapGraphEdge*>& path);
+
+ void Add(HeapGraphEdge* edge) { path_.Add(edge); }
+ void Set(int index, HeapGraphEdge* edge) { path_[index] = edge; }
+ const List<HeapGraphEdge*>* path() const { return &path_; }
+
+ void Print();
+
+ private:
+ List<HeapGraphEdge*> path_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapGraphPath);
+};
+
+
+class HeapEntriesMap {
+ public:
+ HeapEntriesMap();
+ ~HeapEntriesMap();
+
+ void Alias(HeapObject* object, HeapEntry* entry);
+ void Apply(void (HeapEntry::*Func)(void));
+ template<class Visitor>
+ void Apply(Visitor* visitor);
+ HeapEntry* Map(HeapObject* object);
+ void Pair(HeapObject* object, HeapEntry* entry);
+
+ private:
+ INLINE(uint32_t Hash(HeapObject* object)) {
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
+ }
+ INLINE(static bool HeapObjectsMatch(void* key1, void* key2)) {
+ return key1 == key2;
+ }
+ INLINE(bool IsAlias(void* ptr)) {
+ return reinterpret_cast<intptr_t>(ptr) & kAliasTag;
+ }
+
+ static const intptr_t kAliasTag = 1;
+
+ HashMap entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
+};
+
+
+class HeapSnapshotsCollection;
+
+// HeapSnapshot represents a single heap snapshot. It is stored in
+// HeapSnapshotsCollection, which is also a factory for
+// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
+// to be able to return them even if they were collected.
+// HeapSnapshotGenerator fills in a HeapSnapshot.
+class HeapSnapshot {
+ public:
+ HeapSnapshot(HeapSnapshotsCollection* collection,
+ const char* title,
+ unsigned uid);
+ void ClearPaint();
+ void CutObjectsFromForeignSecurityContexts();
+ HeapEntry* GetEntry(Object* object);
+ void SetClosureReference(
+ HeapEntry* parent, String* reference_name, Object* child);
+ void SetElementReference(HeapEntry* parent, int index, Object* child);
+ void SetPropertyReference(
+ HeapEntry* parent, String* reference_name, Object* child);
+
+ INLINE(const char* title() const) { return title_; }
+ INLINE(unsigned uid() const) { return uid_; }
+ const HeapEntry* const_root() const { return &root_; }
+ HeapEntry* root() { return &root_; }
+ template<class Visitor>
+ void IterateEntries(Visitor* visitor) { entries_.Apply(visitor); }
+
+ void Print(int max_depth);
+
+ private:
+ HeapEntry* AddEntry(HeapObject* object, HeapEntry::Type type) {
+ return AddEntry(object, type, "");
+ }
+ HeapEntry* AddEntry(
+ HeapObject* object, HeapEntry::Type type, const char* name);
+ void AddEntryAlias(HeapObject* object, HeapEntry* entry) {
+ entries_.Alias(object, entry);
+ }
+ HeapEntry* FindEntry(HeapObject* object) {
+ return entries_.Map(object);
+ }
+ int GetGlobalSecurityToken();
+ int GetObjectSecurityToken(HeapObject* obj);
+ static int GetObjectSize(HeapObject* obj);
+ static int CalculateNetworkSize(JSObject* obj);
+
+ HeapSnapshotsCollection* collection_;
+ const char* title_;
+ unsigned uid_;
+ HeapEntry root_;
+ // HeapObject* -> HeapEntry*
+ HeapEntriesMap entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
+};
+
+
+class HeapSnapshotsCollection {
+ public:
+ HeapSnapshotsCollection();
+ ~HeapSnapshotsCollection();
+
+ HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
+ List<HeapSnapshot*>* snapshots() { return &snapshots_; }
+ HeapSnapshot* GetSnapshot(unsigned uid);
+
+ const char* GetName(String* name) { return names_.GetName(name); }
+
+ TokenEnumerator* token_enumerator() { return token_enumerator_; }
+
+ private:
+ INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
+ return key1 == key2;
+ }
+
+ List<HeapSnapshot*> snapshots_;
+ // uid -> HeapSnapshot*
+ HashMap snapshots_uids_;
+ StringsStorage names_;
+ TokenEnumerator* token_enumerator_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
+};
+
+
+class HeapSnapshotGenerator {
+ public:
+ explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
+ void GenerateSnapshot();
+
+ private:
+ void ExtractReferences(HeapObject* obj);
+ void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
+ void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
+ void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
+
+ HeapSnapshot* snapshot_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
+};
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 4c271714cf..88786e8247 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -8243,8 +8243,9 @@ static const int kFrameDetailsArgumentCountIndex = 3;
static const int kFrameDetailsLocalCountIndex = 4;
static const int kFrameDetailsSourcePositionIndex = 5;
static const int kFrameDetailsConstructCallIndex = 6;
-static const int kFrameDetailsDebuggerFrameIndex = 7;
-static const int kFrameDetailsFirstDynamicIndex = 8;
+static const int kFrameDetailsAtReturnIndex = 7;
+static const int kFrameDetailsDebuggerFrameIndex = 8;
+static const int kFrameDetailsFirstDynamicIndex = 9;
// Return an array with frame details
// args[0]: number: break id
@@ -8258,9 +8259,11 @@ static const int kFrameDetailsFirstDynamicIndex = 8;
// 4: Local count
// 5: Source position
// 6: Constructor call
-// 7: Debugger frame
+// 7: Is at return
+// 8: Debugger frame
// Arguments name, value
// Locals name, value
+// Return value if any
static Object* Runtime_GetFrameDetails(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
@@ -8336,8 +8339,39 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
}
}
- // Now advance to the arguments adapter frame (if any). If contains all
- // the provided parameters and
+ // Check whether this frame is positioned at return.
+ int at_return = (index == 0) ? Debug::IsBreakAtReturn(it.frame()) : false;
+
+ // If positioned just before return find the value to be returned and add it
+ // to the frame information.
+ Handle<Object> return_value = Factory::undefined_value();
+ if (at_return) {
+ StackFrameIterator it2;
+ Address internal_frame_sp = NULL;
+ while (!it2.done()) {
+ if (it2.frame()->is_internal()) {
+ internal_frame_sp = it2.frame()->sp();
+ } else {
+ if (it2.frame()->is_java_script()) {
+ if (it2.frame()->id() == it.frame()->id()) {
+ // The internal frame just before the JavaScript frame contains the
+ // value to return on top. A debug break at return will create an
+ // internal frame to store the return value (eax/rax/r0) before
+ // entering the debug break exit frame.
+ if (internal_frame_sp != NULL) {
+ return_value =
+ Handle<Object>(Memory::Object_at(internal_frame_sp));
+ break;
+ }
+ }
+ }
+
+ // Indicate that the previous frame was not an internal frame.
+ internal_frame_sp = NULL;
+ }
+ it2.Advance();
+ }
+ }
// Now advance to the arguments adapter frame (if any). It contains all
// the provided parameters whereas the function frame always have the number
@@ -8354,7 +8388,8 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
// Calculate the size of the result.
int details_size = kFrameDetailsFirstDynamicIndex +
- 2 * (argument_count + info.NumberOfLocals());
+ 2 * (argument_count + info.NumberOfLocals()) +
+ (at_return ? 1 : 0);
Handle<FixedArray> details = Factory::NewFixedArray(details_size);
// Add the frame id.
@@ -8380,6 +8415,9 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
// Add the constructor information.
details->set(kFrameDetailsConstructCallIndex, Heap::ToBoolean(constructor));
+ // Add the at return information.
+ details->set(kFrameDetailsAtReturnIndex, Heap::ToBoolean(at_return));
+
// Add information on whether this frame is invoked in the debugger context.
details->set(kFrameDetailsDebuggerFrameIndex,
Heap::ToBoolean(*save->context() == *Debug::debug_context()));
@@ -8409,6 +8447,11 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
details->set(details_index++, locals->get(i));
}
+ // Add the value being returned.
+ if (at_return) {
+ details->set(details_index++, *return_value);
+ }
+
// Add the receiver (same as in function frame).
// THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index 862d5bee5a..d49c207518 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -152,6 +152,33 @@ uint32_t Page::GetRegionMaskForAddress(Address addr) {
}
+uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
+ uint32_t result = 0;
+ if (length_in_bytes >= kPageSize) {
+ result = kAllRegionsDirtyMarks;
+ } else if (length_in_bytes > 0) {
+ int start_region = GetRegionNumberForAddress(start);
+ int end_region =
+ GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
+ uint32_t start_mask = (~0) << start_region;
+ uint32_t end_mask = ~((~1) << end_region);
+ result = start_mask & end_mask;
+ // if end_region < start_region, the mask is ored.
+ if (result == 0) result = start_mask | end_mask;
+ }
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ uint32_t expected = 0;
+ for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
+ expected |= GetRegionMaskForAddress(a);
+ }
+ ASSERT(expected == result);
+ }
+#endif
+ return result;
+}
+
+
void Page::MarkRegionDirty(Address address) {
SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 32a3e6cb0e..051ce37cf8 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -220,6 +220,7 @@ class Page {
inline void SetRegionMarks(uint32_t dirty);
inline uint32_t GetRegionMaskForAddress(Address addr);
+ inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
inline int GetRegionNumberForAddress(Address addr);
inline void MarkRegionDirty(Address addr);
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 00e8f439ad..10b8102460 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -126,6 +126,14 @@ namespace internal {
SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
+ /* How is the generic keyed-call stub used? */ \
+ SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \
+ SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
+ SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
+ SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
+ SC(keyed_call_generic_value_type, V8.KeyedCallGenericValueType) \
+ SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
+ SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
/* Count how much the monomorphic keyed-load stubs are hit. */ \
SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \
SC(keyed_load_string_length, V8.KeyedLoadStringLength) \
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 2172524f4e..9fcbb86617 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 16
+#define BUILD_NUMBER 17
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 701ec6501c..f9692ce43c 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -6894,7 +6894,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
smi_value,
overwrite_mode);
}
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
__ SmiAddConstant(operand->reg(),
operand->reg(),
smi_value,
@@ -6915,7 +6916,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
// A smi currently fits in a 32-bit Immediate.
__ SmiSubConstant(operand->reg(),
operand->reg(),
@@ -6944,7 +6946,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
__ SmiShiftArithmeticRightConstant(operand->reg(),
operand->reg(),
shift_value);
@@ -6971,7 +6974,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
__ SmiShiftLogicalRightConstant(answer.reg(),
operand->reg(),
shift_value,
@@ -7003,12 +7007,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
smi_value,
operand->reg(),
overwrite_mode);
- if (!operand->type_info().IsSmi()) {
- Condition is_smi = masm_->CheckSmi(operand->reg());
- deferred->Branch(NegateCondition(is_smi));
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
__ Move(answer.reg(), smi_value);
__ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
@@ -7029,7 +7029,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
deferred->BindExit();
answer = *operand;
} else {
@@ -7042,7 +7043,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
__ SmiShiftLeftConstant(answer.reg(),
operand->reg(),
shift_value);
@@ -7068,7 +7070,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
operand->reg(),
smi_value,
overwrite_mode);
- __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
if (op == Token::BIT_AND) {
__ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
} else if (op == Token::BIT_XOR) {
@@ -7133,6 +7136,37 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
}
+void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
+ TypeInfo type,
+ DeferredCode* deferred) {
+ if (!type.IsSmi()) {
+ __ JumpIfNotSmi(reg, deferred->entry_label());
+ }
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(reg);
+ }
+}
+
+
+void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred) {
+ if (!left_info.IsSmi() && !right_info.IsSmi()) {
+ __ JumpIfNotBothSmi(left, right, deferred->entry_label());
+ } else if (!left_info.IsSmi()) {
+ __ JumpIfNotSmi(left, deferred->entry_label());
+ } else if (!right_info.IsSmi()) {
+ __ JumpIfNotSmi(right, deferred->entry_label());
+ }
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+}
+
+
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
@@ -7142,9 +7176,6 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
// Copy the type info because left and right may be overwritten.
TypeInfo left_type_info = left->type_info();
TypeInfo right_type_info = right->type_info();
- USE(left_type_info);
- USE(right_type_info);
- // TODO(X64): Use type information in calculations.
Token::Value op = expr->op();
Result answer;
// Special handling of div and mod because they use fixed registers.
@@ -7221,7 +7252,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left->reg(),
right->reg(),
overwrite_mode);
- __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
+ left_type_info, right_type_info, deferred);
if (op == Token::DIV) {
__ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
@@ -7303,7 +7335,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
}
}
} else {
- __ JumpIfNotBothSmi(left->reg(), rcx, deferred->entry_label());
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
+ left_type_info, right_type_info, deferred);
}
__ bind(&do_op);
@@ -7351,7 +7384,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left->reg(),
right->reg(),
overwrite_mode);
- __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
+ left_type_info, right_type_info, deferred);
switch (op) {
case Token::ADD:
@@ -8155,14 +8189,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ movl(rcx, rdx);
__ movl(rax, rdx);
__ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
+ __ shrl(rdx, Immediate(8));
+ __ shrl(rcx, Immediate(16));
+ __ shrl(rax, Immediate(24));
__ xorl(rcx, rdx);
__ xorl(rax, rdi);
__ xorl(rcx, rax);
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
__ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+
// ST[0] == double value.
// rbx = bits of double value.
// rcx = TranscendentalCache::hash(double value).
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 5b1c8af86a..cd03d2acc1 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -477,6 +477,22 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode);
+ // Emits code sequence that jumps to deferred code if the input
+ // is not a smi. Cannot be in MacroAssembler because it takes
+ // advantage of TypeInfo to skip unneeded checks.
+ void JumpIfNotSmiUsingTypeInfo(Register reg,
+ TypeInfo type,
+ DeferredCode* deferred);
+
+ // Emits code sequence that jumps to deferred code if the inputs
+ // are not both smis. Cannot be in MacroAssembler because it takes
+ // advantage of TypeInfo to skip unneeded checks.
+ void JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred);
+
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect.
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 1f29de7bbb..d99ea84a6e 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -187,12 +187,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
{ Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the body.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- EmitReturnSequence(function()->end_position());
+ EmitReturnSequence();
}
}
-void FullCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -207,7 +207,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
- CodeGenerator::RecordPositions(masm_, position);
+ CodeGenerator::RecordPositions(masm_, function()->end_position());
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index d30b5ab9b6..195fef49b8 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -960,6 +960,8 @@ TEST(Regression39128) {
TEST(TestCodeFlushing) {
i::FLAG_allow_natives_syntax = true;
+ // If we do not flush code this test is invalid.
+ if (!FLAG_flush_code) return;
InitializeVM();
v8::HandleScope scope;
const char* source = "function foo() {"
diff --git a/deps/v8/test/mjsunit/debug-return-value.js b/deps/v8/test/mjsunit/debug-return-value.js
new file mode 100644
index 0000000000..a9ac520482
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-return-value.js
@@ -0,0 +1,163 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+
+function ParsedResponse(json) {
+ this.response_ = eval('(' + json + ')');
+ this.refs_ = [];
+ if (this.response_.refs) {
+ for (var i = 0; i < this.response_.refs.length; i++) {
+ this.refs_[this.response_.refs[i].handle] = this.response_.refs[i];
+ }
+ }
+}
+
+
+ParsedResponse.prototype.response = function() {
+ return this.response_;
+}
+
+
+ParsedResponse.prototype.body = function() {
+ return this.response_.body;
+}
+
+
+ParsedResponse.prototype.running = function() {
+ return this.response_.running;
+}
+
+
+ParsedResponse.prototype.lookup = function(handle) {
+ return this.refs_[handle];
+}
+
+
+listener_complete = false;
+exception = false;
+break_count = 0;
+expected_return_value = 0;
+debugger_source_position = 0;
+
+// Listener which expects to do four steps to reach returning from the function.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break)
+ {
+ break_count++;
+ if (break_count < 4) {
+ assertFalse(exec_state.frame(0).isAtReturn())
+ switch (break_count) {
+ case 1:
+ // Collect the position of the debugger statement.
+ debugger_source_position = exec_state.frame(0).sourcePosition();
+ break;
+ case 2:
+ // Position now at the if statement.
+ assertEquals(debugger_source_position + 10,
+ exec_state.frame(0).sourcePosition());
+ break;
+ case 3:
+ // Position now at either of the returns.
+ if (expected_return_value == 1) {
+ assertEquals(debugger_source_position + 19,
+ exec_state.frame(0).sourcePosition());
+ } else {
+ assertEquals(debugger_source_position + 38,
+ exec_state.frame(0).sourcePosition());
+ }
+ break;
+ default:
+ fail("Unexpected");
+ }
+ exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ } else {
+ // Position at the end of the function.
+ assertEquals(debugger_source_position + 51,
+ exec_state.frame(0).sourcePosition());
+
+ // Just about to return from the function.
+ assertTrue(exec_state.frame(0).isAtReturn())
+ assertEquals(expected_return_value,
+ exec_state.frame(0).returnValue().value());
+
+ // Check the same using the JSON commands.
+ var dcp = exec_state.debugCommandProcessor(false);
+ var request = '{"seq":0,"type":"request","command":"backtrace"}';
+ var resp = dcp.processDebugJSONRequest(request);
+ response = new ParsedResponse(resp);
+ frames = response.body().frames;
+ assertTrue(frames[0].atReturn);
+ assertEquals(expected_return_value,
+ response.lookup(frames[0].returnValue.ref).value);
+
+ listener_complete = true;
+ }
+ }
+ } catch (e) {
+ exception = e
+ };
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Four steps from the debugger statement in this function will position us at
+// the function return.
+// 0 1 2 3 4 5
+// 0123456789012345678901234567890123456789012345678901
+
+function f(x) {debugger; if (x) { return 1; } else { return 2; } };
+
+// Call f expecting different return values.
+break_count = 0;
+expected_return_value = 2;
+listener_complete = false;
+f();
+assertFalse(exception, "exception in listener")
+assertTrue(listener_complete);
+assertEquals(4, break_count);
+
+break_count = 0;
+expected_return_value = 1;
+listener_complete = false;
+f(true);
+assertFalse(exception, "exception in listener")
+assertTrue(listener_complete);
+assertEquals(4, break_count);
+
+break_count = 0;
+expected_return_value = 2;
+listener_complete = false;
+f(false);
+assertFalse(exception, "exception in listener")
+assertTrue(listener_complete);
+assertEquals(4, break_count);
diff --git a/deps/v8/test/mjsunit/keyed-call-generic.js b/deps/v8/test/mjsunit/keyed-call-generic.js
new file mode 100644
index 0000000000..0b49b3e8b3
--- /dev/null
+++ b/deps/v8/test/mjsunit/keyed-call-generic.js
@@ -0,0 +1,96 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// 'AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A test for keyed call ICs with a mix of smi and string keys.
+
+function testOne(receiver, key, result) {
+ for(var i = 0; i != 10; i++ ) {
+ assertEquals(result, receiver[key]());
+ }
+}
+
+function testMany(receiver, keys, results) {
+ for (var i = 0; i != 10; i++) {
+ for (var k = 0; k != keys.length; k++) {
+ assertEquals(results[k], receiver[keys[k]]());
+ }
+ }
+}
+
+var toStringNonSymbol = 'to';
+toStringNonSymbol += 'String';
+
+function TypeOfThis() { return typeof this; }
+
+Number.prototype.square = function() { return this * this; }
+Number.prototype.power4 = function() { return this.square().square(); }
+
+Number.prototype.type = TypeOfThis;
+String.prototype.type = TypeOfThis;
+Boolean.prototype.type = TypeOfThis;
+
+// Use a non-symbol key to force inline cache to generic case.
+testOne(0, toStringNonSymbol, '0');
+
+testOne(1, 'toString', '1');
+testOne('1', 'toString', '1');
+testOne(1.0, 'toString', '1');
+
+testOne(1, 'type', 'object');
+testOne(2.3, 'type', 'object');
+testOne('x', 'type', 'object');
+testOne(true, 'type', 'object');
+testOne(false, 'type', 'object');
+
+testOne(2, 'square', 4);
+testOne(2, 'power4', 16);
+
+function zero () { return 0; }
+function one () { return 1; }
+function two () { return 2; }
+
+var fixed_array = [zero, one, two];
+
+var dict_array = [ zero, one, two ];
+dict_array[100000] = 1;
+
+var fast_prop = { zero: zero, one: one, two: two };
+
+var normal_prop = { zero: zero, one: one, two: two };
+normal_prop.x = 0;
+delete normal_prop.x;
+
+var first3num = [0, 1, 2];
+var first3str = ['zero', 'one', 'two'];
+
+// Use a non-symbol key to force inline cache to generic case.
+testMany('123', [toStringNonSymbol, 'charAt', 'charCodeAt'], ['123', '1', 49]);
+
+testMany(fixed_array, first3num, first3num);
+testMany(dict_array, first3num, first3num);
+testMany(fast_prop, first3str, first3num);
+testMany(normal_prop, first3str, first3num);